Merge "mm: memblock:  Add more debug logs"
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fab641a..4834753 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2510,8 +2510,8 @@
 			http://repo.or.cz/w/linux-2.6/mini2440.git
 
 	mitigations=
-			[X86,PPC,S390] Control optional mitigations for CPU
-			vulnerabilities.  This is a set of curated,
+			[X86,PPC,S390,ARM64] Control optional mitigations for
+			CPU vulnerabilities.  This is a set of curated,
 			arch-independent options, each of which is an
 			aggregation of existing arch-specific options.
 
@@ -2520,12 +2520,14 @@
 				improves system performance, but it may also
 				expose users to several CPU vulnerabilities.
 				Equivalent to: nopti [X86,PPC]
+					       kpti=0 [ARM64]
 					       nospectre_v1 [PPC]
 					       nobp=0 [S390]
 					       nospectre_v1 [X86]
-					       nospectre_v2 [X86,PPC,S390]
+					       nospectre_v2 [X86,PPC,S390,ARM64]
 					       spectre_v2_user=off [X86]
 					       spec_store_bypass_disable=off [X86,PPC]
+					       ssbd=force-off [ARM64]
 					       l1tf=off [X86]
 					       mds=off [X86]
 
@@ -2873,10 +2875,10 @@
 			(bounds check bypass). With this option data leaks
 			are possible in the system.
 
-	nospectre_v2	[X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
-			(indirect branch prediction) vulnerability. System may
-			allow data leaks with this option, which is equivalent
-			to spectre_v2=off.
+	nospectre_v2	[X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+			the Spectre variant 2 (indirect branch prediction)
+			vulnerability. System may allow data leaks with this
+			option.
 
 	nospec_store_bypass_disable
 			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
@@ -3959,6 +3961,13 @@
 			Run specified binary instead of /init from the ramdisk,
 			used for early userspace startup. See initrd.
 
+	rdrand=		[X86]
+			force - Override the decision by the kernel to hide the
+				advertisement of RDRAND support (this affects
+				certain AMD processors because of buggy BIOS
+				support, specifically around the suspend/resume
+				path).
+
 	rdt=		[HW,X86,RDT]
 			Turn on/off individual RDT features. List is:
 			cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index d6aff2c..6feaffe 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -178,3 +178,7 @@
 HWCAP_FLAGM
 
     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+
+HWCAP_SSBS
+
+    Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index f205898..99ee7dd 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -325,7 +325,7 @@
 
 Using an upper layer path and/or a workdir path that are already used by
 another overlay mount is not allowed and may fail with EBUSY.  Using
-partially overlapping paths is not allowed but will not fail with EBUSY.
+partially overlapping paths is not allowed and may fail with EBUSY.
 If files are accessed from two overlayfs mounts which share or overlap the
 upper layer and/or workdir path the behavior of the overlay is undefined,
 though it will not result in a crash or deadlock.
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
deleted file mode 100644
index aee715a..0000000
--- a/Documentation/usb/rio.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-Copyright (C) 1999, 2000 Bruce Tenison
-Portions Copyright (C) 1999, 2000 David Nelson
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-CHANGES
-
-- Initial Revision
-
-
-OVERVIEW
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.  
-Before I explain how to use this to access the Rio500 please be warned:
-
-W A R N I N G:
---------------
-
-Please note that this software is still under development.  The authors
-are in no way responsible for any damage that may occur, no matter how
-inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash 
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore 
-down to nothing and appear to have corrupted the flash memory. My RIO 
-needed to be replaced as a result.  Diamond tech support is aware of the 
-problem.  Do NOT allow your batteries to wear down to nothing before 
-changing them.  It appears RIO 500 firmware does not handle low battery 
-power well at all. 
-
-On systems with OHCI controllers, the kernel OHCI code appears to have 
-power on problems with some chipsets.  If you are having problems 
-connecting to your RIO 500, try turning it on first and then plugging it 
-into the USB cable.  
-
-Contact information:
---------------------
-
-   The main page for the project is hosted at sourceforge.net in the following
-   URL: <http://rio500.sourceforge.net>. You can also go to the project's
-   sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
-   There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors:
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith 
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use 
-with some important information regarding the communication with the Rio.
-
-ADDITIONAL INFORMATION and Userspace tools
-
-http://rio500.sourceforge.net/
-
-
-REQUIREMENTS
-
-A host with a USB port.  Ideally, either a UHCI (Intel) or OHCI
-(Compaq and others) hardware port should work.
-
-A Linux development kernel (2.3.x) with USB support enabled or a
-backported version to linux-2.2.x.  See http://www.linux-usb.org for
-more information on accomplishing this.
-
-A Linux kernel with RIO 500 support enabled.
-
-'lspci' which is only needed to determine the type of USB hardware
-available in your machine.
-
-CONFIGURATION
-
-Using `lspci -v`, determine the type of USB hardware available.
-
-  If you see something like:
-
-    USB Controller: ......
-    Flags: .....
-    I/O ports at ....
-
-  Then you have a UHCI based controller.
-
-  If you see something like:
-
-     USB Controller: .....
-     Flags: ....
-     Memory at .....
-
-  Then you have a OHCI based controller.
-
-Using `make menuconfig` or your preferred method for configuring the
-kernel, select 'Support for USB', 'OHCI/UHCI' depending on your
-hardware (determined from the steps above), 'USB Diamond Rio500 support', and
-'Preliminary USB device filesystem'.  Compile and install the modules
-(you may need to execute `depmod -a` to update the module
-dependencies).
-
-Add a device for the USB rio500:
-  `mknod /dev/usb/rio500 c 180 64`
-
-Set appropriate permissions for /dev/usb/rio500 (don't forget about
-group and world permissions).  Both read and write permissions are
-required for proper operation.
-
-Load the appropriate modules (if compiled as modules):
-
-  OHCI:
-    modprobe usbcore
-    modprobe usb-ohci
-    modprobe rio500
-
-  UHCI:
-    modprobe usbcore
-    modprobe usb-uhci  (or uhci)
-    modprobe rio500
-
-That's it.  The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-BUGS
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
-
diff --git a/MAINTAINERS b/MAINTAINERS
index 831aeac..923cd02 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15125,13 +15125,6 @@
 S:	Maintained
 F:	drivers/net/usb/dm9601.c
 
-USB DIAMOND RIO500 DRIVER
-M:	Cesar Miquel <miquel@df.uba.ar>
-L:	rio500-users@lists.sourceforge.net
-W:	http://rio500.sourceforge.net
-S:	Maintained
-F:	drivers/usb/misc/rio500*
-
 USB EHCI DRIVER
 M:	Alan Stern <stern@rowland.harvard.edu>
 L:	linux-usb@vger.kernel.org
diff --git a/Makefile b/Makefile
index d3d53eb..37589dd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 67
+SUBLEVEL = 81
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 5c66633..215f515 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -179,6 +179,12 @@ void show_regs(struct pt_regs *regs)
 	struct task_struct *tsk = current;
 	struct callee_regs *cregs;
 
+	/*
+	 * generic code calls us with preemption disabled, but some calls
+	 * here could sleep, so re-enable to avoid lockdep splat
+	 */
+	preempt_enable();
+
 	print_task_path_n_nm(tsk);
 	show_regs_print_info(KERN_INFO);
 
@@ -221,6 +227,8 @@ void show_regs(struct pt_regs *regs)
 	cregs = (struct callee_regs *)current->thread.callee_reg;
 	if (cregs)
 		show_callee_regs(cregs);
+
+	preempt_disable();
 }
 
 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index db69130..4e8143d 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 	struct vm_area_struct *vma = NULL;
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->mm;
-	siginfo_t info;
+	int si_code = SEGV_MAPERR;
 	int ret;
 	vm_fault_t fault;
 	int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
-	clear_siginfo(&info);
-
 	/*
 	 * We fault-in kernel-space virtual memory on-demand. The
 	 * 'reference' page table is init_mm.pgd.
@@ -83,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 	 * only copy the information from the master page table,
 	 * nothing more.
 	 */
-	if (address >= VMALLOC_START) {
+	if (address >= VMALLOC_START && !user_mode(regs)) {
 		ret = handle_kernel_vaddr_fault(address);
 		if (unlikely(ret))
-			goto bad_area_nosemaphore;
+			goto no_context;
 		else
 			return;
 	}
 
-	info.si_code = SEGV_MAPERR;
-
 	/*
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
@@ -119,7 +115,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 	 * we can handle it..
 	 */
 good_area:
-	info.si_code = SEGV_ACCERR;
+	si_code = SEGV_ACCERR;
 
 	/* Handle protection violation, execute on heap or stack */
 
@@ -143,12 +139,17 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 	 */
 	fault = handle_mm_fault(vma, address, flags);
 
-	/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
 	if (unlikely(fatal_signal_pending(current))) {
-		if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
-			up_read(&mm->mmap_sem);
-		if (user_mode(regs))
+
+		/*
+		 * if fault retry, mmap_sem already relinquished by core mm
+		 * so OK to return to user mode (with signal handled first)
+		 */
+		if (fault & VM_FAULT_RETRY) {
+			if (!user_mode(regs))
+				goto no_context;
 			return;
+		}
 	}
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -195,15 +196,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 bad_area:
 	up_read(&mm->mmap_sem);
 
-bad_area_nosemaphore:
 	/* User mode accesses just cause a SIGSEGV */
 	if (user_mode(regs)) {
 		tsk->thread.fault_address = address;
-		info.si_signo = SIGSEGV;
-		info.si_errno = 0;
-		/* info.si_code has been set above */
-		info.si_addr = (void __user *)address;
-		force_sig_info(SIGSEGV, &info, tsk);
+		force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
 		return;
 	}
 
@@ -238,9 +234,5 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 		goto no_context;
 
 	tsk->thread.fault_address = address;
-	info.si_signo = SIGBUS;
-	info.si_errno = 0;
-	info.si_code = BUS_ADRERR;
-	info.si_addr = (void __user *)address;
-	force_sig_info(SIGBUS, &info, tsk);
+	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f201cf3..9ceb0b2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1586,8 +1586,9 @@
 	  code to do integer division.
 
 config AEABI
-	bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
-	default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
+	bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
+		!CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
+	default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
 	help
 	  This option allows for the kernel to be compiled using the latest
 	  ARM ABI (aka EABI).  This is only useful if you are using a user
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7574964..ded1724 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,6 +10,10 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 LDFLAGS_vmlinux	:=-p --no-undefined -X --pic-veneer
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux	+= --be8
@@ -36,7 +40,7 @@
 endif
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
-KBUILD_CFLAGS	+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
+KBUILD_CFLAGS  +=-fno-omit-frame-pointer $(call cc-option,-mapcs,) $(call cc-option,-mno-sched-prolog,)
 endif
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
@@ -314,6 +318,8 @@
 KBUILD_DTBS := dtbs
 endif
 
+DTSSUBDIR       := vendor/qcom
+
 all:	$(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
 
 
@@ -348,6 +354,7 @@
 
 dtbs: prepare scripts
 	$(Q)$(MAKE) $(build)=$(boot)/dts
+	$(foreach DIR, $(DTSSUBDIR), $(Q)$(MAKE) $(build)=$(boot)/dts/$(DIR) MACHINE=$(MACHINE) dtbs)
 
 dtbs_install:
 	$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
@@ -359,7 +366,10 @@
 endif
 
 zImage-dtb: vmlinux scripts dtbs
-	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@
+
+Image-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 3e3199a..c7f62a8 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -33,10 +33,20 @@
 DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
 ifneq ($(DTB_NAMES),)
 DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
-else
-DTB_LIST := $(dtb-y)
-endif
 DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+else
+# Use the same way as ARM64 does to have dtb files appended
+#  to kernel image.
+# For dt overlay support, currently there isn't have any
+# uniform list for dtb files. There is only one uniform list
+# for overlay's dtbo files which is managered by dtbo-y. And
+# dtb files are derived from each dtbo file's dtbo-base. so
+# it use a simple way just to find all dtb files which
+# generated during the build.
+# Note that dtb obj directory will always be cleaned at the
+# beginning of kernel build.
+DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
+endif
 
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d4b7c59..cf1e4f7 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -1142,6 +1142,8 @@
 				ti,hwmods = "dss_dispc";
 				clocks = <&disp_clk>;
 				clock-names = "fck";
+
+				max-memory-bandwidth = <230000000>;
 			};
 
 			rfbi: rfbi@4832a800 {
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index d9a2049..6bebedf 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -98,14 +98,9 @@
 };
 
 &mmc1 {
-	pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+	pinctrl-names = "default", "hs";
 	pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
 	pinctrl-1 = <&mmc1_pins_hs>;
-	pinctrl-2 = <&mmc1_pins_sdr12>;
-	pinctrl-3 = <&mmc1_pins_sdr25>;
-	pinctrl-4 = <&mmc1_pins_sdr50>;
-	pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
-	pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index 3ef9111..9235173 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -20,14 +20,9 @@
 };
 
 &mmc1 {
-	pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+	pinctrl-names = "default", "hs";
 	pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
 	pinctrl-1 = <&mmc1_pins_hs>;
-	pinctrl-2 = <&mmc1_pins_sdr12>;
-	pinctrl-3 = <&mmc1_pins_sdr25>;
-	pinctrl-4 = <&mmc1_pins_sdr50>;
-	pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
-	pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index 378dfa7..ae43de3 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -24,14 +24,9 @@
 };
 
 &mmc1 {
-	pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+	pinctrl-names = "default", "hs";
 	pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
 	pinctrl-1 = <&mmc1_pins_hs>;
-	pinctrl-2 = <&mmc1_pins_default>;
-	pinctrl-3 = <&mmc1_pins_hs>;
-	pinctrl-4 = <&mmc1_pins_sdr50>;
-	pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
-	pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index ad95311..d53532b 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -433,6 +433,7 @@
 
 	bus-width = <4>;
 	cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+	no-1-8-v;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index 5a77b33..34c699658 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -19,14 +19,9 @@
 };
 
 &mmc1 {
-	pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+	pinctrl-names = "default", "hs";
 	pinctrl-0 = <&mmc1_pins_default>;
 	pinctrl-1 = <&mmc1_pins_hs>;
-	pinctrl-2 = <&mmc1_pins_sdr12>;
-	pinctrl-3 = <&mmc1_pins_sdr25>;
-	pinctrl-4 = <&mmc1_pins_sdr50>;
-	pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
-	pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
 	vmmc-supply = <&vdd_3v3>;
 	vqmmc-supply = <&ldo1_reg>;
 };
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 17c41da..ccd9916 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -19,14 +19,9 @@
 };
 
 &mmc1 {
-	pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+	pinctrl-names = "default", "hs";
 	pinctrl-0 = <&mmc1_pins_default>;
 	pinctrl-1 = <&mmc1_pins_hs>;
-	pinctrl-2 = <&mmc1_pins_sdr12>;
-	pinctrl-3 = <&mmc1_pins_sdr25>;
-	pinctrl-4 = <&mmc1_pins_sdr50>;
-	pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
-	pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
 	vmmc-supply = <&vdd_3v3>;
 	vqmmc-supply = <&ldo1_reg>;
 };
diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
index 28ebb4e..214b9e6 100644
--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
@@ -32,7 +32,7 @@
  *
  * Datamanual Revisions:
  *
- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
+ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
  * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
  *
  */
@@ -229,45 +229,45 @@
 
 	mmc3_pins_default: mmc3_pins_default {
 		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
 		>;
 	};
 
 	mmc3_pins_hs: mmc3_pins_hs {
 		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
 		>;
 	};
 
 	mmc3_pins_sdr12: mmc3_pins_sdr12 {
 		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
 		>;
 	};
 
 	mmc3_pins_sdr25: mmc3_pins_sdr25 {
 		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+			DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+			DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+			DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+			DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+			DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+			DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 57c2332..25bdc9d 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -437,6 +437,7 @@
 				regulator-name = "vdd_ldo10";
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
 				regulator-state-mem {
 					regulator-off-in-suspend;
 				};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index d80ab90..7989631 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -437,6 +437,7 @@
 				regulator-name = "vdd_ldo10";
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
 				regulator-state-mem {
 					regulator-off-in-suspend;
 				};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index 502a361..15d6157b 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -65,7 +65,7 @@
 		gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
 		gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 		/* Collides with pflash CE1, not so cool */
-		cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+		cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
 		num-chipselects = <1>;
 
 		panel: display@0 {
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index 895fbde..c1ed831 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -323,6 +323,7 @@
 	vmmc-supply = <&reg_module_3v3>;
 	vqmmc-supply = <&reg_DCDC3>;
 	non-removable;
+	sdhci-caps-mask = <0x80000000 0x0>;
 };
 
 &iomuxc {
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 8bf365d..584418f 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -43,7 +43,7 @@
 			  <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
 	assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
 	assigned-clock-rates = <0>, <100000000>;
-	phy-mode = "rgmii";
+	phy-mode = "rgmii-id";
 	phy-handle = <&ethphy0>;
 	fsl,magic-packet;
 	status = "okay";
@@ -69,7 +69,7 @@
 			  <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
 	assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
 	assigned-clock-rates = <0>, <100000000>;
-	phy-mode = "rgmii";
+	phy-mode = "rgmii-id";
 	phy-handle = <&ethphy1>;
 	fsl,magic-packet;
 	status = "okay";
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 78db673..54d056b 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -386,10 +386,10 @@
 			#address-cells = <3>;
 			#size-cells = <2>;
 
-			ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
-				  0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
+			ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
+				 <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
 
-			interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
+			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "msi";
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 0x7>;
diff --git a/arch/arm/boot/dts/vendor/qcom/Makefile b/arch/arm/boot/dts/vendor/qcom/Makefile
new file mode 100644
index 0000000..541a6d1
--- /dev/null
+++ b/arch/arm/boot/dts/vendor/qcom/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include $(srctree)/arch/arm64/boot/dts/vendor/qcom/Makefile
+$(obj)/%.dtb: $(src)/../../../../../arm64/boot/dts/vendor/qcom/%.dts FORCE
+	$(call if_changed_dep,dtc)
+
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+$(obj)/%.dtbo:$(src)/../../../../../arm64/boot/dts/vendor/qcom/%.dts FORCE
+	$(call if_changed_dep,dtc)
+	$(call if_changed,dtbo_verify)
+
+dtbs: $(addprefix $(obj)/,$(dtb-y)) $(addprefix $(obj)/,$(dtbo-y))
+else
+dtbs: $(addprefix $(obj)/,$(dtb-y))
+endif
+clean-files := *.dtb
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 5ae5b52..ef484c4 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -91,7 +91,6 @@
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 09e1672..0ba8df0 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -197,7 +197,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 6bb506e..cc63d09 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -588,7 +588,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2afb359..bd71d5b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -334,7 +334,6 @@
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 9ea82c1..3aff4ca 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -191,7 +191,6 @@
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/vendor/bengal-perf_defconfig b/arch/arm/configs/vendor/bengal-perf_defconfig
new file mode 100644
index 0000000..44f2dd8
--- /dev/null
+++ b/arch/arm/configs/vendor/bengal-perf_defconfig
@@ -0,0 +1,633 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+# CONFIG_VDSO is not set
+CONFIG_PCI=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_HIGHMEM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_EFI=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_GHASH_ARM_CE=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_AT803X_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
+CONFIG_HVC_DCC=y
+CONFIG_HVC_DCC_SERIALIZE_SMP=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_QG=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_REGULATOR_PM8008=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_PLATFORM=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_SM_DEBUGCC_BENGAL=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_MDT_LOADER=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_XZ_DEC=y
+CONFIG_STACK_HASH_ORDER_SHIFT=12
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_IPC_LOGGING=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm/configs/vendor/bengal_defconfig b/arch/arm/configs/vendor/bengal_defconfig
new file mode 100644
index 0000000..71b910b
--- /dev/null
+++ b/arch/arm/configs/vendor/bengal_defconfig
@@ -0,0 +1,694 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+# CONFIG_VDSO is not set
+CONFIG_PCI=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_HIGHMEM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_EFI=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_ENERGY_MODEL=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_GHASH_ARM_CE=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_AT803X_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_QG=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_REGULATOR_PM8008=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_PLATFORM=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_SM_DEBUGCC_BENGAL=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_MDT_LOADER=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+# CONFIG_MSM_JTAGV8 is not set
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_TORTURE_TEST=m
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_MEMTEST=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index ec1a5fd..c19d2f2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -94,6 +94,21 @@
  *	DMA Cache Coherency
  *	===================
  *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -115,6 +130,8 @@ struct cpu_cache_fns {
 	void (*dma_map_area)(const void *, size_t, int);
 	void (*dma_unmap_area)(const void *, size_t, int);
 
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 } __no_randomize_layout;
 
@@ -140,6 +157,8 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -159,6 +178,8 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+extern void dmac_inv_range(const void *start, const void *end);
+extern void dmac_clean_range(const void *start, const void *end);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 6821f12..8f36119 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,6 +7,7 @@
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
+#include <linux/dma-mapping-fast.h>
 #include <linux/kref.h>
 
 #define ARM_MAPPING_ERROR		(~(dma_addr_t)0x0)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 8d1f498..073ce84 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -159,6 +159,8 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
+#define dmac_inv_range			__glue(_CACHE, _dma_inv_range)
+#define dmac_clean_range		__glue(_CACHE, _dma_clean_range)
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 2cfbc53..0fce96c 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -28,6 +28,7 @@
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
+#include <linux/msm_rtb.h>
 #include <xen/xen.h>
 
 /*
@@ -61,23 +62,24 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
  * the bus. Rather than special-case the machine, just let the compiler
  * generate the access for CPUs prior to ARMv6.
  */
-#define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_readw_no_log(a)		(__chk_io_ptr(a), \
+					*(volatile unsigned short __force *)(a))
+#define __raw_writew_no_log(v, a)      ((void)(__chk_io_ptr(a), \
+					*(volatile unsigned short __force *)\
+					(a) = (v)))
 #else
 /*
  * When running under a hypervisor, we want to avoid I/O accesses with
  * writeback addressing modes as these incur a significant performance
  * overhead (the address generation must be emulated in software).
  */
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
 {
 	asm volatile("strh %1, %0"
 		     : : "Q" (*(volatile u16 __force *)addr), "r" (val));
 }
 
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
 {
 	u16 val;
 	asm volatile("ldrh %0, %1"
@@ -87,22 +89,29 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
 }
 #endif
 
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
 {
 	asm volatile("strb %1, %0"
 		     : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
 }
 
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
 {
 	asm volatile("str %1, %0"
 		     : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
 }
 
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
+{
+	register u64 v asm("r2");
+
+	v = val;
+
+	asm volatile("strd %1, %0"
+		     : : "Qo" (*(volatile u64 __force *)addr), "r" (val));
+}
+
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
 {
 	u8 val;
 	asm volatile("ldrb %0, %1"
@@ -111,8 +120,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
 	return val;
 }
 
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
 {
 	u32 val;
 	asm volatile("ldr %0, %1"
@@ -121,6 +129,58 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
 	return val;
 }
 
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
+{
+	register u64 val asm ("r2");
+
+	asm volatile("ldrd %1, %0"
+		     : "+Qo" (*(volatile u64 __force *)addr),
+		       "=r" (val));
+	return val;
+}
+
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging. The appropriate functions can be used but clients should carefully
+ * consider why they can't support the logging.
+ */
+
+#define __raw_write_logged(v, a, _t)	({ \
+	int _ret; \
+	volatile void __iomem *_a = (a); \
+	void *_addr = (void __force *)(_a); \
+	_ret = uncached_logk(LOGK_WRITEL, _addr); \
+	ETB_WAYPOINT; \
+	__raw_write##_t##_no_log((v), _a); \
+	if (_ret) \
+		LOG_BARRIER; \
+	})
+
+
+#define __raw_writeb(v, a)	__raw_write_logged((v), (a), b)
+#define __raw_writew(v, a)	__raw_write_logged((v), (a), w)
+#define __raw_writel(v, a)	__raw_write_logged((v), (a), l)
+#define __raw_writeq(v, a)	__raw_write_logged((v), (a), q)
+
+#define __raw_read_logged(a, _l, _t)		({ \
+	unsigned _t __a; \
+	const volatile void __iomem *_a = (a); \
+	void *_addr = (void __force *)(_a); \
+	int _ret; \
+	_ret = uncached_logk(LOGK_READL, _addr); \
+	ETB_WAYPOINT; \
+	__a = __raw_read##_l##_no_log(_a);\
+	if (_ret) \
+		LOG_BARRIER; \
+	__a; \
+	})
+
+
+#define __raw_readb(a)		__raw_read_logged((a), b, char)
+#define __raw_readw(a)		__raw_read_logged((a), w, short)
+#define __raw_readl(a)		__raw_read_logged((a), l, int)
+#define __raw_readq(a)		__raw_read_logged((a), q, long long)
+
 /*
  * Architecture ioremap implementation.
  */
@@ -300,18 +360,36 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 					__raw_readw(c)); __r; })
 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
 					__raw_readl(c)); __r; })
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+					__raw_readq(c)); __r; })
+#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+					__raw_readl_no_log(c)); __r; })
+#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+					__raw_readq_no_log(c)); __r; })
 
-#define writeb_relaxed(v,c)	__raw_writeb(v,c)
-#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
-#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
+
+#define writeb_relaxed(v, c)	__raw_writeb(v, c)
+#define writew_relaxed(v, c)	__raw_writew((__force u16) cpu_to_le16(v), c)
+#define writel_relaxed(v, c)	__raw_writel((__force u32) cpu_to_le32(v), c)
+#define writeq_relaxed(v, c)    __raw_writeq((__force u64) cpu_to_le64(v), c)
+#define writeb_relaxed_no_log(v, c)    ((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) \
+					cpu_to_le16(v), c)
+#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) \
+					cpu_to_le32(v), c)
+#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) \
+					cpu_to_le64(v), c)
 
 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readq(c)		({ u64 __v = readq_relaxed(c)\
+					; __iormb(); __v; })
 
 #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
 #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
 #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
+#define writeq(v, c)		({ __iowmb(); writeq_relaxed(v, c); })
 
 #define readsb(p,d,l)		__raw_readsb(p,d,l)
 #define readsw(p,d,l)		__raw_readsw(p,d,l)
@@ -419,6 +497,31 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
 
 void iounmap(volatile void __iomem *iomem_cookie);
 #define iounmap iounmap
+/*
+ * io{read,write}{8,16,32,64} macros
+ */
+#ifndef ioread8
+#define ioread8(p)	({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
+#define ioread16(p)	({ unsigned int __v = le16_to_cpu((__force __le16)\
+				__raw_readw(p)); __iormb(); __v; })
+#define ioread32(p)	({ unsigned int __v = le32_to_cpu((__force __le32)\
+				__raw_readl(p)); __iormb(); __v; })
+#define ioread64(p)	({ unsigned int __v = le64_to_cpu((__force __le64)\
+				__raw_readq(p)); __iormb(); __v; })
+
+#define ioread64be(p)	({ unsigned int __v = be64_to_cpu((__force __be64)\
+				__raw_readq(p)); __iormb(); __v; })
+
+#define iowrite8(v, p)	({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v, p)	({ __iowmb(); __raw_writew((__force __u16)\
+				cpu_to_le16(v), p); })
+#define iowrite32(v, p)	({ __iowmb(); __raw_writel((__force __u32)\
+				cpu_to_le32(v), p); })
+#define iowrite64(v, p)	({ __iowmb(); __raw_writeq((__force __u64)\
+				cpu_to_le64(v), p); })
+
+#define iowrite64be(v, p) ({ __iowmb(); __raw_writeq((__force __u64)\
+				cpu_to_be64(v), p); })
 
 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
 #define arch_memremap_wb arch_memremap_wb
@@ -440,6 +543,7 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
 #define ioport_unmap ioport_unmap
 extern void ioport_unmap(void __iomem *addr);
 #endif
+#endif
 
 struct pci_dev;
 
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index fd6cde2..871fa50 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
-			      const struct coproc_reg *table, size_t num)
+			      const struct coproc_reg *table, size_t num,
+			      unsigned long *bmap)
 {
 	unsigned long i;
 
 	for (i = 0; i < num; i++)
-		if (table[i].reset)
+		if (table[i].reset) {
+			int reg = table[i].reg;
+
 			table[i].reset(vcpu, &table[i]);
+			if (reg > 0 && reg < NR_CP15_REGS) {
+				set_bit(reg, bmap);
+				if (table[i].is_64bit)
+					set_bit(reg + 1, bmap);
+			}
+		}
 }
 
 static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
 {
 	size_t num;
 	const struct coproc_reg *table;
-
-	/* Catch someone adding a register without putting in reset entry. */
-	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
+	DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
 
 	/* Generic chip reset first (so target could override). */
-	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
 
 	table = get_target_table(vcpu->arch.target, &num);
-	reset_coproc_regs(vcpu, table, num);
+	reset_coproc_regs(vcpu, table, num, bmap);
 
 	for (num = 1; num < NR_CP15_REGS; num++)
-		WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+		WARN(!test_bit(num, bmap),
 		     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
 }
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 3c42bf9..708931b4 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -704,6 +704,46 @@ static struct resource da8xx_gpio_resources[] = {
 	},
 	{ /* interrupt */
 		.start	= IRQ_DA8XX_GPIO0,
+		.end	= IRQ_DA8XX_GPIO0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO1,
+		.end	= IRQ_DA8XX_GPIO1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO2,
+		.end	= IRQ_DA8XX_GPIO2,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO3,
+		.end	= IRQ_DA8XX_GPIO3,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO4,
+		.end	= IRQ_DA8XX_GPIO4,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO5,
+		.end	= IRQ_DA8XX_GPIO5,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO6,
+		.end	= IRQ_DA8XX_GPIO6,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO7,
+		.end	= IRQ_DA8XX_GPIO7,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DA8XX_GPIO8,
 		.end	= IRQ_DA8XX_GPIO8,
 		.flags	= IORESOURCE_IRQ,
 	},
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9f7d38d..2b0f5d9 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -548,6 +548,36 @@ static struct resource dm355_gpio_resources[] = {
 	},
 	{	/* interrupt */
 		.start	= IRQ_DM355_GPIOBNK0,
+		.end	= IRQ_DM355_GPIOBNK0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK1,
+		.end	= IRQ_DM355_GPIOBNK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK2,
+		.end	= IRQ_DM355_GPIOBNK2,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK3,
+		.end	= IRQ_DM355_GPIOBNK3,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK4,
+		.end	= IRQ_DM355_GPIOBNK4,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK5,
+		.end	= IRQ_DM355_GPIOBNK5,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM355_GPIOBNK6,
 		.end	= IRQ_DM355_GPIOBNK6,
 		.flags	= IORESOURCE_IRQ,
 	},
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index abcf2a5..4266591 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -267,6 +267,41 @@ static struct resource dm365_gpio_resources[] = {
 	},
 	{	/* interrupt */
 		.start	= IRQ_DM365_GPIO0,
+		.end	= IRQ_DM365_GPIO0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO1,
+		.end	= IRQ_DM365_GPIO1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO2,
+		.end	= IRQ_DM365_GPIO2,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO3,
+		.end	= IRQ_DM365_GPIO3,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO4,
+		.end	= IRQ_DM365_GPIO4,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO5,
+		.end	= IRQ_DM365_GPIO5,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO6,
+		.end	= IRQ_DM365_GPIO6,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM365_GPIO7,
 		.end	= IRQ_DM365_GPIO7,
 		.flags	= IORESOURCE_IRQ,
 	},
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0720da7..de1ec6d 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -492,6 +492,26 @@ static struct resource dm644_gpio_resources[] = {
 	},
 	{	/* interrupt */
 		.start	= IRQ_GPIOBNK0,
+		.end	= IRQ_GPIOBNK0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_GPIOBNK1,
+		.end	= IRQ_GPIOBNK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_GPIOBNK2,
+		.end	= IRQ_GPIOBNK2,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_GPIOBNK3,
+		.end	= IRQ_GPIOBNK3,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_GPIOBNK4,
 		.end	= IRQ_GPIOBNK4,
 		.flags	= IORESOURCE_IRQ,
 	},
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 6bd2ed0..d9b93e2 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -442,6 +442,16 @@ static struct resource dm646x_gpio_resources[] = {
 	},
 	{	/* interrupt */
 		.start	= IRQ_DM646X_GPIOBNK0,
+		.end	= IRQ_DM646X_GPIOBNK0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM646X_GPIOBNK1,
+		.end	= IRQ_DM646X_GPIOBNK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM646X_GPIOBNK2,
 		.end	= IRQ_DM646X_GPIOBNK2,
 		.flags	= IORESOURCE_IRQ,
 	},
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index ddc2763..017c792 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -135,6 +135,8 @@
 	orr r11, r11, r13			@ mask all requested interrupts
 	str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
+	str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
+
 	ands r10, r13, #KEYBRD_CLK_MASK		@ extract keyboard status - set?
 	beq hksw				@ no - try next source
 
@@ -142,7 +144,6 @@
 	@@@@@@@@@@@@@@@@@@@@@@
 	@ Keyboard clock FIQ mode interrupt handler
 	@ r10 now contains KEYBRD_CLK_MASK, use it
-	str r10, [r12, #OMAP1510_GPIO_INT_STATUS]	@ ack the interrupt
 	bic r11, r11, r10				@ unmask it
 	str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index b0dc7dd..b8ba763 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -73,9 +73,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
 			 * interrupts default to since commit 80ac93c27441
 			 * requires interrupt already acked and unmasked.
 			 */
-			if (irq_chip->irq_ack)
-				irq_chip->irq_ack(d);
-			if (irq_chip->irq_unmask)
+			if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
 				irq_chip->irq_unmask(d);
 		}
 		for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b226c8a..7074cfd 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
 	struct device_node *np;
 	struct gen_pool *sram_pool;
 
+	if (!soc_is_omap44xx() && !soc_is_omap54xx())
+		return 0;
+
 	np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
 	if (!np)
 		pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 9ded7bf..3b8fe01 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -946,7 +946,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
 	.rev_offs	= 0x0000,
 	.sysc_offs	= 0x0010,
 	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			  SYSC_HAS_RESET_STATUS,
 	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
 			  SIDLE_SMART_WKUP),
 	.sysc_fields	= &omap_hwmod_sysc_type2,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e6c7061..3547f32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -385,7 +385,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
 static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
 	.rev_offs	= 0x0,
 	.sysc_offs	= 0x4,
-	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			  SYSC_HAS_RESET_STATUS,
 	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
 	.sysc_fields	= &omap_hwmod_sysc_type2,
 };
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index ca03af8..ddf96ad 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -77,83 +77,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
 	return 0;
 }
 
-/*
- * This API is to be called during init to set the various voltage
- * domains to the voltage as per the opp table. Typically we boot up
- * at the nominal voltage. So this function finds out the rate of
- * the clock associated with the voltage domain, finds out the correct
- * opp entry and sets the voltage domain to the voltage specified
- * in the opp entry
- */
-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
-					 const char *oh_name)
-{
-	struct voltagedomain *voltdm;
-	struct clk *clk;
-	struct dev_pm_opp *opp;
-	unsigned long freq, bootup_volt;
-	struct device *dev;
-
-	if (!vdd_name || !clk_name || !oh_name) {
-		pr_err("%s: invalid parameters\n", __func__);
-		goto exit;
-	}
-
-	if (!strncmp(oh_name, "mpu", 3))
-		/* 
-		 * All current OMAPs share voltage rail and clock
-		 * source, so CPU0 is used to represent the MPU-SS.
-		 */
-		dev = get_cpu_device(0);
-	else
-		dev = omap_device_get_by_hwmod_name(oh_name);
-
-	if (IS_ERR(dev)) {
-		pr_err("%s: Unable to get dev pointer for hwmod %s\n",
-			__func__, oh_name);
-		goto exit;
-	}
-
-	voltdm = voltdm_lookup(vdd_name);
-	if (!voltdm) {
-		pr_err("%s: unable to get vdd pointer for vdd_%s\n",
-			__func__, vdd_name);
-		goto exit;
-	}
-
-	clk =  clk_get(NULL, clk_name);
-	if (IS_ERR(clk)) {
-		pr_err("%s: unable to get clk %s\n", __func__, clk_name);
-		goto exit;
-	}
-
-	freq = clk_get_rate(clk);
-	clk_put(clk);
-
-	opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-	if (IS_ERR(opp)) {
-		pr_err("%s: unable to find boot up OPP for vdd_%s\n",
-			__func__, vdd_name);
-		goto exit;
-	}
-
-	bootup_volt = dev_pm_opp_get_voltage(opp);
-	dev_pm_opp_put(opp);
-
-	if (!bootup_volt) {
-		pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
-		       __func__, vdd_name);
-		goto exit;
-	}
-
-	voltdm_scale(voltdm, bootup_volt);
-	return 0;
-
-exit:
-	pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
-	return -EINVAL;
-}
-
 #ifdef CONFIG_SUSPEND
 static int omap_pm_enter(suspend_state_t suspend_state)
 {
@@ -211,25 +134,6 @@ void omap_common_suspend_init(void *pm_suspend)
 }
 #endif /* CONFIG_SUSPEND */
 
-static void __init omap3_init_voltages(void)
-{
-	if (!soc_is_omap34xx())
-		return;
-
-	omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
-	omap2_set_init_voltage("core", "l3_ick", "l3_main");
-}
-
-static void __init omap4_init_voltages(void)
-{
-	if (!soc_is_omap44xx())
-		return;
-
-	omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
-	omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
-	omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
-}
-
 int __maybe_unused omap_pm_nop_init(void)
 {
 	return 0;
@@ -249,10 +153,6 @@ int __init omap2_common_pm_late_init(void)
 	omap4_twl_init();
 	omap_voltage_late_init();
 
-	/* Initialize the voltages */
-	omap3_init_voltages();
-	omap4_init_voltages();
-
 	/* Smartreflex device init */
 	omap_devinit_smartreflex();
 
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 46ed10a..a66a9ad1 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -27,4 +27,27 @@
 	bool "Enable support for MDM9615"
 	select CLKSRC_QCOM
 
+config ARCH_BENGAL
+	bool "Enable Support for Qualcomm Technologies, Inc. BENGAL"
+	select COMMON_CLK_QCOM
+	select CPU_V7
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select PM_OPP
+	select SOC_BUS
+	select THERMAL_WRITABLE_TRIPS
+	select ARM_GIC_V3
+	select ARM_AMBA
+	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
+	select HAVE_ARM_ARCH_TIMER
+	select COMMON_CLK
+	select PINCTRL_MSM
+	select MSM_PM if PM
+	select CPU_FREQ
+	select PM_DEVFREQ
+	help
+	  This enables support for the BENGAL chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 12878e9..685e7b8 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1 +1,3 @@
+obj-$(CONFIG_USE_OF) += board-dt.o
 obj-$(CONFIG_SMP)	+= platsmp.o
+obj-$(CONFIG_ARCH_BENGAL) += board-bengal.o
diff --git a/arch/arm/mach-qcom/board-bengal.c b/arch/arm/mach-qcom/board-bengal.c
new file mode 100644
index 0000000..202994c
--- /dev/null
+++ b/arch/arm/mach-qcom/board-bengal.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+#include "board-dt.h"
+
+static const char *trinket_dt_match[] __initconst = {
+	"qcom,bengal",
+	NULL
+};
+
+static void __init trinket_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(BENGAL,
+	"Qualcomm Technologies, Inc. BENGAL (Flattened Device Tree)")
+	.init_machine		= trinket_init,
+	.dt_compat		= trinket_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/board-dt.c b/arch/arm/mach-qcom/board-dt.c
new file mode 100644
index 0000000..866cb74
--- /dev/null
+++ b/arch/arm/mach-qcom/board-dt.c
@@ -0,0 +1,28 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include "board-dt.h"
+
+void __init board_dt_populate(struct of_dev_auxdata *adata)
+{
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+	/* Explicitly parent the /soc devices to the root node to preserve
+	 * the kernel ABI (sysfs structure, etc) until userspace is updated
+	 */
+	of_platform_populate(of_find_node_by_path("/soc"),
+			     of_default_bus_match_table, adata, NULL);
+}
diff --git a/arch/arm/mach-qcom/board-dt.h b/arch/arm/mach-qcom/board-dt.h
new file mode 100644
index 0000000..0f36e04
--- /dev/null
+++ b/arch/arm/mach-qcom/board-dt.h
@@ -0,0 +1,15 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_platform.h>
+
+void __init board_dt_populate(struct of_dev_auxdata *adata);
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index caa6d5f..b296ada 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
 			* 0x4: Jump by mov instruction
 			* 0x8: Jumping address
 			*/
-			memcpy((__force void *)zero, &zynq_secondary_trampoline,
+			memcpy_toio(zero, &zynq_secondary_trampoline,
 							trampoline_size);
 			writel(address, zero + trampoline_size);
 
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 2149b47..dafaf03 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -350,7 +350,7 @@
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
 	dcache_line_size r2, r3
 	sub	r3, r2, #1
 	tst	r0, r3
@@ -380,7 +380,7 @@
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
 	dcache_line_size r2, r3
 	sub	r3, r2, #1
 	bic	r0, r0, r3
@@ -466,6 +466,8 @@
 
 	globl_equ	b15_dma_map_area,		v7_dma_map_area
 	globl_equ	b15_dma_unmap_area,		v7_dma_unmap_area
+	globl_equ	b15_dma_inv_range,		v7_dma_inv_range
+	globl_equ	b15_dma_clean_range,		v7_dma_clean_range
 	globl_equ	b15_dma_flush_range,		v7_dma_flush_range
 
 	define_cache_functions b15
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8211cf4..e6f0191 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -375,10 +375,10 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
 			prot, caller);
 }
 
-static void __dma_free_remap(void *cpu_addr, size_t size)
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
 {
 	dma_common_free_remap(cpu_addr, size,
-			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
+			VM_ARM_DMA_CONSISTENT | VM_USERMAP, no_warn);
 }
 
 #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
@@ -624,7 +624,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
 {
 	if (want_vaddr) {
 		if (PageHighMem(page))
-			__dma_free_remap(cpu_addr, size);
+			__dma_free_remap(cpu_addr, size, true);
 		else
 			__dma_remap(page, size, PAGE_KERNEL);
 	}
@@ -716,7 +716,7 @@ static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
 static void remap_allocator_free(struct arm_dma_free_args *args)
 {
 	if (args->want_vaddr)
-		__dma_free_remap(args->cpu_addr, args->size);
+		__dma_free_remap(args->cpu_addr, args->size, false);
 
 	__dma_free_buffer(args->page, args->size);
 }
@@ -1648,7 +1648,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
 		dma_common_free_remap(cpu_addr, size,
-			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
+			VM_ARM_DMA_CONSISTENT | VM_USERMAP, true);
 	}
 
 	__iommu_remove_mapping(dev, handle, size);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3232afb..a9ee0d9 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 {
 	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
 
-	if (fsr & FSR_WRITE)
+	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
 		mask = VM_WRITE;
 	if (fsr & FSR_LNX_PF)
 		mask = VM_EXEC;
@@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
 	if (user_mode(regs))
 		flags |= FAULT_FLAG_USER;
-	if (fsr & FSR_WRITE)
+	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
 		flags |= FAULT_FLAG_WRITE;
 
 	/*
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index c063708..9ecc209 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -6,6 +6,7 @@
  * Fault status register encodings.  We steal bit 31 for our own purposes.
  */
 #define FSR_LNX_PF		(1 << 31)
+#define FSR_CM			(1 << 13)
 #define FSR_WRITE		(1 << 11)
 #define FSR_FS4			(1 << 10)
 #define FSR_FS3_0		(15)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0cc8e04..e1d330a2 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -196,6 +196,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
+	phys_addr_t addr = __pfn_to_phys(pfn);
+
+	if (__phys_to_pfn(addr) != pfn)
+		return 0;
+
 	return memblock_is_map_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
@@ -713,7 +718,8 @@ static void update_sections_early(struct section_perm perms[], int n)
 		if (t->flags & PF_KTHREAD)
 			continue;
 		for_each_thread(t, s)
-			set_section_perms(perms, n, true, s->mm);
+			if (s->mm)
+				set_section_perms(perms, n, true, s->mm);
 	}
 	set_section_perms(perms, n, true, current->active_mm);
 	set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index f866870..0b94b67 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -18,8 +18,9 @@
 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 
 /* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MIN_GAP		(128*1024*1024UL)
+#define MAX_GAP		((STACK_TOP)/6*5)
+#define STACK_RND_MASK	(0x7ff >> (PAGE_SHIFT - 12))
 
 static int mmap_is_legacy(struct rlimit *rlim_stack)
 {
@@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
 	unsigned long gap = rlim_stack->rlim_cur;
+	unsigned long pad = stack_guard_gap;
+
+	/* Account for stack randomization if necessary */
+	if (current->flags & PF_RANDOMIZE)
+		pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+	/* Values close to RLIM_INFINITY can overflow. */
+	if (gap + pad > gap)
+		gap += pad;
 
 	if (gap < MIN_GAP)
 		gap = MIN_GAP;
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
 
-	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 }
 
 /*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e46a6a4..70e560c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
 	 */
 	vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
 
+	/*
+	 * The first usable region must be PMD aligned. Mark its start
+	 * as MEMBLOCK_NOMAP if it isn't
+	 */
+	for_each_memblock(memory, reg) {
+		if (!memblock_is_nomap(reg)) {
+			if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
+				phys_addr_t len;
+
+				len = round_up(reg->base, PMD_SIZE) - reg->base;
+				memblock_mark_nomap(reg->base, len);
+			}
+			break;
+		}
+	}
+
 	for_each_memblock(memory, reg) {
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_end = reg->base + reg->size;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 5461d58..53d59cd 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -335,6 +335,8 @@
 	.long	\name\()_flush_kern_dcache_area
 	.long	\name\()_dma_map_area
 	.long	\name\()_dma_unmap_area
+	.long   \name\()_dma_inv_range
+	.long   \name\()_dma_clean_range
 	.long	\name\()_dma_flush_range
 	.size	\name\()_cache_fns, . - \name\()_cache_fns
 .endm
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 054b491..70e8b7d 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -30,6 +30,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all);
 EXPORT_SYMBOL(__cpuc_flush_user_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
 EXPORT_SYMBOL(__cpuc_flush_dcache_area);
+EXPORT_SYMBOL(dmac_inv_range);
+EXPORT_SYMBOL(dmac_clean_range);
+EXPORT_SYMBOL(dmac_flush_range);
 #else
 EXPORT_SYMBOL(cpu_cache);
 #endif
diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
index ce42cc6..71d85ff 100644
--- a/arch/arm/plat-samsung/watchdog-reset.c
+++ b/arch/arm/plat-samsung/watchdog-reset.c
@@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
 #ifdef CONFIG_OF
 static const struct of_device_id s3c2410_wdt_match[] = {
 	{ .compatible = "samsung,s3c2410-wdt" },
+	{ .compatible = "samsung,s3c6410-wdt" },
 	{},
 };
 
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
index b4d7895..bc9a37b 100644
--- a/arch/arm/xen/efi.c
+++ b/arch/arm/xen/efi.c
@@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void)
 	efi.get_variable             = xen_efi_get_variable;
 	efi.get_next_variable        = xen_efi_get_next_variable;
 	efi.set_variable             = xen_efi_set_variable;
+	efi.set_variable_nonblocking = xen_efi_set_variable;
 	efi.query_variable_info      = xen_efi_query_variable_info;
+	efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
 	efi.update_capsule           = xen_efi_update_capsule;
 	efi.query_capsule_caps       = xen_efi_query_capsule_caps;
 	efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index bf0109c..a465396 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -85,6 +85,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IDLE_POLL_SETUP
 	select GENERIC_IRQ_MULTI_HANDLER
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 8e65950..f91b7d1 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -166,6 +166,14 @@
 	  This enables support for the LITO chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_LAGOON
+	bool "Enable Support for Qualcomm Technologies, Inc. LAGOON"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	help
+	  This enables support for the LAGOON chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_BENGAL
 	bool "Enable Support for Qualcomm Technologies, Inc. BENGAL"
 	depends on ARCH_QCOM
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 5089aa6..9a1ea8a 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -140,6 +140,7 @@
 			tx-fifo-depth = <16384>;
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
+			altr,sysmgr-syscon = <&sysmgr 0x44 0>;
 			status = "disabled";
 		};
 
@@ -156,6 +157,7 @@
 			tx-fifo-depth = <16384>;
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
+			altr,sysmgr-syscon = <&sysmgr 0x48 0>;
 			status = "disabled";
 		};
 
@@ -172,6 +174,7 @@
 			tx-fifo-depth = <16384>;
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
+			altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index c142169..e9147e3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -40,6 +40,7 @@
 		pinctrl-0 = <&usb30_host_drv>;
 		regulator-name = "vcc_host_5v";
 		regulator-always-on;
+		regulator-boot-on;
 		vin-supply = <&vcc_sys>;
 	};
 
@@ -50,6 +51,7 @@
 		pinctrl-0 = <&usb20_host_drv>;
 		regulator-name = "vcc_host1_5v";
 		regulator-always-on;
+		regulator-boot-on;
 		vin-supply = <&vcc_sys>;
 	};
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index e065394..92186ed 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -708,6 +708,7 @@
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
+		max-frequency = <150000000>;
 		status = "disabled";
 	};
 
@@ -719,6 +720,7 @@
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
+		max-frequency = <150000000>;
 		status = "disabled";
 	};
 
@@ -730,6 +732,7 @@
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
+		max-frequency = <150000000>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 943f52c..713be11 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -30,6 +30,7 @@
 # CONFIG_FHANDLE is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
 # CONFIG_RSEQ is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
@@ -191,6 +192,7 @@
 CONFIG_NET_CLS_ACT=y
 CONFIG_VSOCKETS=y
 CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_BPF_JIT=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_DEFAULT_PS is not set
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
new file mode 100644
index 0000000..463d4e7
--- /dev/null
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -0,0 +1,600 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_QG=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_REGULATOR_PM8008=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_SM_DEBUGCC_BENGAL=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SLIMBUS=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_XZ_DEC=y
+CONFIG_STACK_HASH_ORDER_SHIFT=12
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_IPC_LOGGING=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index 38f024f..e60f8fd 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -9,6 +9,7 @@
 CONFIG_TASKSTATS=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -18,6 +19,7 @@
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
@@ -50,6 +52,7 @@
 CONFIG_HZ_100=y
 CONFIG_SECCOMP=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
 CONFIG_PRINT_VMEMLAYOUT=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
@@ -68,11 +71,13 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
@@ -93,10 +98,16 @@
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
 CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -178,6 +189,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 # CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -247,11 +259,16 @@
 CONFIG_RFKILL=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -263,6 +280,8 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -296,6 +315,7 @@
 CONFIG_JOYSTICK_XPAD=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -303,9 +323,10 @@
 # CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
-CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -320,17 +341,37 @@
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_QG=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_STATISTICS=y
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
 CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_STUB=y
+CONFIG_REGULATOR_PM8008=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
@@ -353,6 +394,7 @@
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -361,6 +403,8 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
 CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
@@ -374,13 +418,19 @@
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -388,6 +438,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -395,9 +447,19 @@
 CONFIG_MMC_IPC_LOGGING=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_GPI_DMA=y
 CONFIG_QCOM_GPI_DMA_DEBUG=y
@@ -406,6 +468,7 @@
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_IPA3=y
@@ -414,21 +477,28 @@
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_QCOM_GENI_SE=y
-CONFIG_SM_GCC_BENGAL=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_SM_DEBUGCC_BENGAL=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_COMMAND_DB=y
-CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QPNP_PBS=y
@@ -455,16 +525,22 @@
 CONFIG_QCOM_MINIDUMP=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
@@ -473,13 +549,17 @@
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
 CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
 CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
 CONFIG_PHY_XGENE=y
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
 CONFIG_SLIMBUS=y
+CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
@@ -501,6 +581,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
@@ -514,11 +595,18 @@
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_XZ_DEC=y
+CONFIG_STACK_HASH_ORDER_SHIFT=12
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index ad2767a..9c1f97a 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -56,6 +56,7 @@
 CONFIG_SECCOMP=y
 CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARM64_SSBD=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -66,7 +67,6 @@
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_KRYO_PMU_WORKAROUND=y
 CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
 # CONFIG_PM_WAKELOCKS_GC is not set
@@ -126,6 +126,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -192,6 +193,7 @@
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -223,6 +225,7 @@
 CONFIG_IP6_NF_RAW=y
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
 CONFIG_L2TP=y
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=y
@@ -272,12 +275,14 @@
 CONFIG_MHI_SATELLITE=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
 CONFIG_OKL4_USER_VIRQ=y
+CONFIG_WIGIG_SENSING_SPI=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -309,6 +314,7 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPTP=y
 CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
@@ -388,6 +394,7 @@
 CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LIMITS_ISENSE_CDSP=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -419,6 +426,7 @@
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
 # CONFIG_DRM_MSM is not set
+CONFIG_DRM_LONTIUM_LT9611UXC=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
 CONFIG_LOGO=y
@@ -545,7 +553,6 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_MEM_OFFLINE=y
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
-CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
@@ -557,6 +564,7 @@
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_QMI_RMNET=y
 CONFIG_QCOM_QMI_DFC=y
+CONFIG_RMNET_CTL=y
 CONFIG_QCOM_QMI_POWER_COLLAPSE=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
@@ -613,6 +621,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
+CONFIG_DEVFREQ_GOV_STATICMAP=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -623,6 +632,7 @@
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_ESOC=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 5e3ed0d..59205612 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -56,6 +56,7 @@
 CONFIG_SECCOMP=y
 CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARM64_SSBD=y
 CONFIG_PRINT_VMEMLAYOUT=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
@@ -66,7 +67,6 @@
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_KRYO_PMU_WORKAROUND=y
 CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
 # CONFIG_PM_WAKELOCKS_GC is not set
@@ -133,6 +133,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -199,6 +200,7 @@
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -230,6 +232,7 @@
 CONFIG_IP6_NF_RAW=y
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
 CONFIG_L2TP=y
 CONFIG_L2TP_DEBUGFS=y
 CONFIG_L2TP_V3=y
@@ -282,12 +285,14 @@
 CONFIG_MHI_SATELLITE=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
 CONFIG_OKL4_USER_VIRQ=y
+CONFIG_WIGIG_SENSING_SPI=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -318,6 +323,7 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPTP=y
 CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
@@ -355,7 +361,6 @@
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
-CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
@@ -401,6 +406,7 @@
 CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
+CONFIG_QTI_LIMITS_ISENSE_CDSP=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -432,6 +438,7 @@
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
 # CONFIG_DRM_MSM is not set
+CONFIG_DRM_LONTIUM_LT9611UXC=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
@@ -568,7 +575,6 @@
 CONFIG_QCOM_MEM_OFFLINE=y
 CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
-CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
@@ -580,6 +586,8 @@
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_QMI_RMNET=y
 CONFIG_QCOM_QMI_DFC=y
+CONFIG_RMNET_CTL=y
+CONFIG_RMNET_CTL_DEBUG=y
 CONFIG_QCOM_QMI_POWER_COLLAPSE=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
@@ -606,6 +614,7 @@
 CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_MSM_SPCOM=y
@@ -637,6 +646,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_ARM_QCOM_DEVFREQ_QOSLAT=y
+CONFIG_DEVFREQ_GOV_STATICMAP=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -648,6 +658,7 @@
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_ESOC=y
@@ -702,6 +713,7 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 5e47b38..91147d7 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -47,6 +47,7 @@
 CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_LITO=y
+CONFIG_ARCH_LAGOON=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -70,6 +71,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -190,6 +192,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 # CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -326,6 +329,7 @@
 CONFIG_TABLET_USB_HANWANG=y
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
@@ -351,6 +355,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_LITO=y
+CONFIG_PINCTRL_LAGOON=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
@@ -378,6 +383,7 @@
 CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_LIMITS_ISENSE_CDSP=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -393,12 +399,16 @@
 CONFIG_REGULATOR_PM8008=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_NPU=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=m
 CONFIG_VIDEO_VICODEC=y
@@ -644,8 +654,10 @@
 CONFIG_CRYPTO_DEV_QCRYPTO=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_STACK_HASH_ORDER_SHIFT=12
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_PANIC_TIMEOUT=-1
@@ -665,4 +677,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index ffb5530..2d828d0 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -47,6 +47,7 @@
 CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_LITO=y
+CONFIG_ARCH_LAGOON=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -71,6 +72,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -195,6 +197,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 # CONFIG_NETFILTER_XT_MATCH_SCTP is not set
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -333,6 +336,7 @@
 CONFIG_TABLET_USB_HANWANG=y
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
@@ -343,7 +347,6 @@
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
-CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
@@ -360,6 +363,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_LITO=y
+CONFIG_PINCTRL_LAGOON=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
@@ -387,6 +391,7 @@
 CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y
+CONFIG_QTI_LIMITS_ISENSE_CDSP=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -402,12 +407,16 @@
 CONFIG_REGULATOR_PM8008=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_NPU=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=y
 CONFIG_VIDEO_VICODEC=y
@@ -535,6 +544,7 @@
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
 CONFIG_IOMMU_TLBSYNC_DEBUG=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
@@ -544,6 +554,7 @@
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -668,6 +679,7 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
@@ -732,4 +744,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3b09382..d8b01c7 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -74,7 +74,7 @@ __XCHG_CASE( ,  ,  mb_8, dmb ish, nop,  , a, l, "memory")
 #undef __XCHG_CASE
 
 #define __XCHG_GEN(sfx)							\
-static inline unsigned long __xchg##sfx(unsigned long x,		\
+static __always_inline  unsigned long __xchg##sfx(unsigned long x,	\
 					volatile void *ptr,		\
 					int size)			\
 {									\
@@ -116,7 +116,7 @@ __XCHG_GEN(_mb)
 #define xchg(...)		__xchg_wrapper( _mb, __VA_ARGS__)
 
 #define __CMPXCHG_GEN(sfx)						\
-static inline unsigned long __cmpxchg##sfx(volatile void *ptr,		\
+static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr,	\
 					   unsigned long old,		\
 					   unsigned long new,		\
 					   int size)			\
@@ -223,7 +223,7 @@ __CMPWAIT_CASE( ,  , 8);
 #undef __CMPWAIT_CASE
 
 #define __CMPWAIT_GEN(sfx)						\
-static inline void __cmpwait##sfx(volatile void *ptr,			\
+static __always_inline void __cmpwait##sfx(volatile void *ptr,		\
 				  unsigned long val,			\
 				  int size)				\
 {									\
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 510f687..dda6e50 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
 #endif
 }
 
-#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 518882f..0811e7c 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -62,14 +62,6 @@
 #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
 			     MIDR_ARCHITECTURE_MASK)
 
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)		\
-({									\
-	u32 _model = (midr) & MIDR_CPU_MODEL_MASK;			\
-	u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);	\
-									\
-	_model == (model) && rv >= (rv_min) && rv <= (rv_max);		\
- })
-
 #define ARM_CPU_IMP_ARM			0x41
 #define ARM_CPU_IMP_APM			0x50
 #define ARM_CPU_IMP_CAVIUM		0x43
@@ -89,6 +81,8 @@
 #define ARM_CPU_PART_CORTEX_A76		0xD0B
 #define ARM_CPU_PART_KRYO4G		0x804
 #define ARM_CPU_PART_KRYO5S		0x805
+#define ARM_CPU_PART_KRYO2XX_GOLD	0x800
+#define ARM_CPU_PART_KRYO2XX_SILVER	0x801
 
 #define APM_CPU_PART_POTENZA		0x000
 
@@ -126,6 +120,10 @@
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+#define MIDR_KRYO2XX_GOLD \
+	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD)
+#define MIDR_KRYO2XX_SILVER \
+	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
 
 #ifndef __ASSEMBLY__
 
@@ -157,10 +155,19 @@ struct midr_range {
 
 #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
 
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+					   u32 rv_max)
+{
+	u32 _model = midr & MIDR_CPU_MODEL_MASK;
+	u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+	return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
 static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
 {
-	return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
-				 range->rv_min, range->rv_max);
+	return midr_is_cpu_model_range(midr, range->model,
+				       range->rv_min, range->rv_max);
 }
 
 static inline bool
diff --git a/arch/arm64/include/asm/edac.h b/arch/arm64/include/asm/edac.h
new file mode 100644
index 0000000..0a88d83
--- /dev/null
+++ b/arch/arm64/include/asm/edac.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+#if defined(CONFIG_EDAC_CORTEX_ARM64) && \
+	!defined(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)
+void arm64_check_cache_ecc(void *info);
+#else
+static inline void arm64_check_cache_ecc(void *info) { }
+#endif
+
+static inline void atomic_scrub(void *addr, int size) { }
+
+#endif
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..f52a296 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 	((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)		&screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+				    struct screen_info *si)
+{
+}
 
 /* redeclare as 'hidden' so the compiler will generate relative references */
 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 6abe400..367b2e0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 
+void __kvm_enable_ssbs(void);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 				       unsigned long hyp_stack_ptr,
 				       unsigned long vector_ptr)
@@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 	 */
 	BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
 	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+
+	/*
+	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
+	 * at EL2.
+	 */
+	if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
+	    arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+		kvm_call_hyp(__kvm_enable_ssbs);
+	}
 }
 
 static inline bool kvm_arch_check_sve_has_vhe(void)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f64d4e3..f2aa655 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -253,8 +253,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
 	 * or update_mmu_cache() have the necessary barriers.
 	 */
-	if (pte_valid_not_user(pte))
+	if (pte_valid_not_user(pte)) {
 		dsb(ishst);
+		isb();
+	}
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -448,8 +450,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PMD_TYPE_SECT)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud)		(0)
-#define pud_table(pud)		(1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
 #else
 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
 				 PUD_TYPE_SECT)
@@ -461,6 +463,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
 	WRITE_ONCE(*pmdp, pmd);
 	dsb(ishst);
+	isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -517,6 +520,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
 	WRITE_ONCE(*pudp, pud);
 	dsb(ishst);
+	isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a4a1901..fc247b9 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -224,6 +224,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
 
 	__tlbi(vaae1is, addr);
 	dsb(ish);
+	isb();
 }
 #endif
 
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 774e828..cd250ec 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -19,9 +19,11 @@
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
 #include <linux/types.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -87,7 +89,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 
 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -109,9 +110,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
-				      const char *hyp_vecs_start,
-				      const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+				    const char *hyp_vecs_start,
+				    const char *hyp_vecs_end)
 {
 	static DEFINE_SPINLOCK(bp_lock);
 	int cpu, slot = -1;
@@ -138,7 +139,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 #define __smccc_workaround_1_smc_start		NULL
 #define __smccc_workaround_1_smc_end		NULL
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
 				      const char *hyp_vecs_start,
 				      const char *hyp_vecs_end)
 {
@@ -146,23 +147,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 }
 #endif	/* CONFIG_KVM_INDIRECT_VECTORS */
 
-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
-				     bp_hardening_cb_t fn,
-				     const char *hyp_vecs_start,
-				     const char *hyp_vecs_end)
-{
-	u64 pfr0;
-
-	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-		return;
-
-	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
-		return;
-
-	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
 #include <uapi/linux/psci.h>
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
@@ -189,60 +173,83 @@ static void qcom_link_stack_sanitization(void)
 		     : "=&r" (tmp));
 }
 
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+	__nospectre_v2 = true;
+	return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ *  0: No workaround required
+ *  1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
 {
 	bp_hardening_cb_t cb;
 	void *smccc_start, *smccc_end;
 	struct arm_smccc_res res;
 	u32 midr = read_cpuid_id();
 
-	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-		return;
-
 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-		return;
+		return -1;
 
 	switch (psci_ops.conduit) {
 	case PSCI_CONDUIT_HVC:
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if ((int)res.a0 < 0)
-			return;
-		cb = call_hvc_arch_workaround_1;
-		/* This is a guest, no need to patch KVM vectors */
-		smccc_start = NULL;
-		smccc_end = NULL;
+		switch ((int)res.a0) {
+		case 1:
+			/* Firmware says we're just fine */
+			return 0;
+		case 0:
+			cb = call_hvc_arch_workaround_1;
+			/* This is a guest, no need to patch KVM vectors */
+			smccc_start = NULL;
+			smccc_end = NULL;
+			break;
+		default:
+			return -1;
+		}
 		break;
 
 	case PSCI_CONDUIT_SMC:
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if ((int)res.a0 < 0)
-			return;
-		cb = call_smc_arch_workaround_1;
-		smccc_start = __smccc_workaround_1_smc_start;
-		smccc_end = __smccc_workaround_1_smc_end;
+		switch ((int)res.a0) {
+		case 1:
+			/* Firmware says we're just fine */
+			return 0;
+		case 0:
+			cb = call_smc_arch_workaround_1;
+			smccc_start = __smccc_workaround_1_smc_start;
+			smccc_end = __smccc_workaround_1_smc_end;
+			break;
+		default:
+			return -1;
+		}
 		break;
 
 	default:
-		return;
+		return -1;
 	}
 
 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
 		cb = qcom_link_stack_sanitization;
 
-	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+	if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+		install_bp_hardening_cb(cb, smccc_start, smccc_end);
 
-	return;
+	return 1;
 }
-#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
 	const char	*str;
@@ -312,6 +319,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 
 void arm64_set_ssbd_mitigation(bool state)
 {
+	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+		pr_info_once("SSBD disabled by kernel configuration\n");
+		return;
+	}
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
 		if (state)
 			asm volatile(SET_PSTATE_SSBS(0));
@@ -341,16 +353,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 	struct arm_smccc_res res;
 	bool required = true;
 	s32 val;
+	bool this_cpu_safe = false;
 
 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
+	if (cpu_mitigations_off())
+		ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+	/* delay setting __ssb_safe until we get a firmware response */
+	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+		this_cpu_safe = true;
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		required = false;
 		goto out_printmsg;
 	}
 
 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -367,6 +391,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	default:
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -375,14 +401,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 	switch (val) {
 	case SMCCC_RET_NOT_SUPPORTED:
 		ssbd_state = ARM64_SSBD_UNKNOWN;
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 
+	/* machines with mixed mitigation requirements must not return this */
 	case SMCCC_RET_NOT_REQUIRED:
 		pr_info_once("%s mitigation not required\n", entry->desc);
 		ssbd_state = ARM64_SSBD_MITIGATED;
 		return false;
 
 	case SMCCC_RET_SUCCESS:
+		__ssb_safe = false;
 		required = true;
 		break;
 
@@ -392,6 +422,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	default:
 		WARN_ON(1);
+		if (!this_cpu_safe)
+			__ssb_safe = false;
 		return false;
 	}
 
@@ -431,7 +463,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	return required;
 }
-#endif	/* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+	{},
+};
 
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -486,6 +525,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 	CAP_MIDR_RANGE_LIST(midr_list)
 
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
+
 /*
  * Generic helper for handling capabilties with multiple (match,enable) pairs
  * of call backs, sharing the same capability bit.
@@ -518,26 +561,87 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
 			caps->cpu_enable(caps);
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+ */
+static const struct midr_range spectre_v2_safe_list[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+	{ /* sentinel */ }
+};
 
 /*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
  */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	int need_wa;
+
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+	/* If the CPU has CSV2 set, we're safe */
+	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+						 ID_AA64PFR0_CSV2_SHIFT))
+		return false;
+
+	/* Alternatively, we have a list of unaffected CPUs */
+	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+		return false;
+
+	/* Fallback to firmware detection */
+	need_wa = detect_harden_bp_fw();
+	if (!need_wa)
+		return false;
+
+	__spectrev2_safe = false;
+
+	if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+		pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+		__hardenbp_enab = false;
+		return false;
+	}
+
+	/* forced off */
+	if (__nospectre_v2 || cpu_mitigations_off()) {
+		pr_info_once("spectrev2 mitigation disabled by command line option\n");
+		__hardenbp_enab = false;
+		return false;
+	}
+
+	if (need_wa < 0) {
+		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+		__hardenbp_enab = false;
+	}
+
+	return (need_wa > 0);
+}
+
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-	MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
 	{},
 };
 
-#endif
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+			 int scope)
+{
+	int i;
+
+	if (!is_affected_midr_range_list(entry, scope) ||
+	    !is_hyp_mode_available())
+		return false;
+
+	for_each_possible_cpu(i) {
+		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+			return true;
+	}
+
+	return false;
+}
 
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
@@ -549,6 +653,18 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
 
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_858921
+
+static const struct midr_range arm64_workaround_858921_cpus[] = {
+	/* Cortex-A73 all versions */
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+	/* KRYO2XX Gold all versions */
+	MIDR_ALL_VERSIONS(MIDR_KRYO2XX_GOLD),
+	{},
+};
+
+#endif
+
 #ifdef CONFIG_ARM64_ERRATUM_1188873
 
 static const struct midr_range arm64_workaround_1188873_cpus[] = {
@@ -561,6 +677,18 @@ static const struct midr_range arm64_workaround_1188873_cpus[] = {
 
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_845719
+
+static const struct midr_range arm64_workaround_845719_cpus[] = {
+	/* Cortex-A53 r0p[01234] */
+	MIDR_RANGE(MIDR_CORTEX_A53, 0, 0, 0, 4),
+	/* Kryo2xx Silver rAp4 */
+	MIDR_RANGE(MIDR_KRYO2XX_SILVER, 0xA, 0x4, 0xA, 0x4),
+	{},
+};
+
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
 	defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -613,10 +741,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
 	{
-	/* Cortex-A53 r0p[01234] */
 		.desc = "ARM erratum 845719",
 		.capability = ARM64_WORKAROUND_845719,
-		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+		ERRATA_MIDR_RANGE_LIST(arm64_workaround_845719_cpus),
 	},
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -702,19 +829,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
 	{
-	/* Cortex-A73 all versions */
 		.desc = "ARM erratum 858921",
 		.capability = ARM64_WORKAROUND_858921,
-		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		ERRATA_MIDR_RANGE_LIST(arm64_workaround_858921_cpus),
 	},
 #endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-		.cpu_enable = enable_smccc_arch_workaround_1,
-		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+		.matches = check_branch_predictor,
 	},
-#endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 	{
 		.desc = "EL2 vector hardening",
@@ -722,14 +846,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
 	},
 #endif
-#ifdef CONFIG_ARM64_SSBD
 	{
 		.desc = "Speculative Store Bypass Disable",
 		.capability = ARM64_SSBD,
 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 		.matches = has_ssbd_mitigation,
+		.midr_range_list = arm64_ssb_cpus,
 	},
-#endif
 #ifdef CONFIG_ARM64_ERRATUM_1188873
 	{
 		.desc = "ARM erratum 1188873",
@@ -745,6 +868,49 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.matches = has_cortex_a76_erratum_1463225,
 	},
 #endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+	{
+		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+		.matches = needs_tx2_tvm_workaround,
+	},
+#endif
 	{
 	}
 };
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	if (__spectrev2_safe)
+		return sprintf(buf, "Not affected\n");
+
+	if (__hardenbp_enab)
+		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (__ssb_safe)
+		return sprintf(buf, "Not affected\n");
+
+	switch (ssbd_state) {
+	case ARM64_SSBD_KERNEL:
+	case ARM64_SSBD_FORCE_ENABLE:
+		if (IS_ENABLED(CONFIG_ARM64_SSBD))
+			return sprintf(buf,
+			    "Mitigation: Speculative Store Bypass disabled via prctl\n");
+	}
+
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e1fcfca..d6fb0be 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -24,6 +24,7 @@
 #include <linux/stop_machine.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -170,9 +171,17 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
-	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
-	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+	/*
+	 * We already refuse to boot CPUs that don't support our configured
+	 * page size, so we can only detect mismatches for a page size other
+	 * than the one we're currently using. Unfortunately, SoCs like this
+	 * exist in the wild so, even though we don't like it, we'll have to go
+	 * along with it and treat them as non-strict.
+	 */
+	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
 	/* Linux shouldn't care about secure memory */
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
@@ -842,7 +851,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
 	u32 midr = read_cpuid_id();
 
 	/* Cavium ThunderX pass 1.x and 2.x */
-	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+	return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
 		MIDR_CPU_VAR_REV(0, 0),
 		MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
 }
@@ -881,7 +890,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
 	return ctr & BIT(CTR_DIC_SHIFT);
 }
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -891,9 +900,25 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 	static const struct midr_range kpti_safe_list[] = {
 		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 		{ /* sentinel */ }
 	};
-	char const *str = "command line option";
+	char const *str = "kpti command line option";
+	bool meltdown_safe;
+
+	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+	/* Defer to CPU feature registers */
+	if (has_cpuid_feature(entry, scope))
+		meltdown_safe = true;
+
+	if (!meltdown_safe)
+		__meltdown_safe = false;
 
 	/*
 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -905,6 +930,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		__kpti_forced = -1;
 	}
 
+	/* Useful for KASLR robustness */
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+		if (!__kpti_forced) {
+			str = "KASLR";
+			__kpti_forced = 1;
+		}
+	}
+
+	if (cpu_mitigations_off() && !__kpti_forced) {
+		str = "mitigations=off";
+		__kpti_forced = -1;
+	}
+
+	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+		pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+		return false;
+	}
+
 	/* Forced? */
 	if (__kpti_forced) {
 		pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -912,18 +955,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		return __kpti_forced > 0;
 	}
 
-	/* Useful for KASLR robustness */
-	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-		return true;
-
-	/* Don't force KPTI for CPUs that are not vulnerable */
-	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
-		return false;
-
-	/* Defer to CPU feature registers */
-	return !has_cpuid_feature(entry, scope);
+	return !meltdown_safe;
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static void __nocfi
 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
@@ -948,6 +983,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
 	return;
 }
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 static int __init parse_kpti(char *str)
 {
@@ -961,7 +1002,6 @@ static int __init parse_kpti(char *str)
 	return 0;
 }
 early_param("kpti", parse_kpti);
-#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 static inline void __cpu_enable_hw_dbm(void)
@@ -1183,7 +1223,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
 		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
 	},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	{
 		.desc = "Kernel page table isolation (KPTI)",
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1199,7 +1238,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = unmap_kernel_at_el0,
 		.cpu_enable = kpti_install_ng_mappings,
 	},
-#endif
 	{
 		/* FP/SIMD is not implemented */
 		.capability = ARM64_HAS_NO_FPSIMD,
@@ -1840,3 +1878,15 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 	/* Firmware may have left a deferred SError in this register. */
 	write_sysreg_s(0, SYS_DISR_EL1);
 }
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	if (__meltdown_safe)
+		return sprintf(buf, "Not affected\n");
+
+	if (arm64_kernel_unmapped_at_el0())
+		return sprintf(buf, "Mitigation: PTI\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 57e9622..7eff8af 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 	if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-		struct plt_entry trampoline;
+		struct plt_entry trampoline, *dst;
 		struct module *mod;
 
 		/*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		 * is added in the future, but for now, the pr_err() below
 		 * deals with a theoretical issue only.
 		 */
+		dst = mod->arch.ftrace_trampoline;
 		trampoline = get_plt_entry(addr);
-		if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-				       &trampoline)) {
-			if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-					       &(struct plt_entry){})) {
+		if (!plt_entries_equal(dst, &trampoline)) {
+			if (!plt_entries_equal(dst, &(struct plt_entry){})) {
 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 				return -EINVAL;
 			}
 
 			/* point the trampoline to our ftrace entry point */
 			module_disable_ro(mod);
-			*mod->arch.ftrace_trampoline = trampoline;
+			*dst = trampoline;
 			module_enable_ro(mod, true);
 
-			/* update trampoline before patching in the branch */
-			smp_wmb();
+			/*
+			 * Ensure updated trampoline is visible to instruction
+			 * fetch before we patch in the branch.
+			 */
+			__flush_icache_range((unsigned long)&dst[0],
+					     (unsigned long)&dst[1]);
 		}
-		addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+		addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
 		return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c13674f..a031f04 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -364,22 +364,27 @@ void arch_release_task_struct(struct task_struct *tsk)
 	fpsimd_release_task(tsk);
 }
 
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied.  We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead.  This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
 	if (current->mm)
 		fpsimd_preserve_current_state();
 	*dst = *src;
 
+	/* We rely on the above assignment to initialize dst's thread_flags: */
+	BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+	/*
+	 * Detach src's sve_state (if any) from dst so that it does not
+	 * get erroneously used or freed prematurely.  dst's sve_state
+	 * will be allocated on demand later on if dst uses SVE.
+	 * For consistency, also clear TIF_SVE here: this could be done
+	 * later in copy_process(), but to avoid tripping up future
+	 * maintainers it is best not to leave TIF_SVE and sve_state in
+	 * an inconsistent state, even temporarily.
+	 */
+	dst->thread.sve_state = NULL;
+	clear_tsk_thread_flag(dst, TIF_SVE);
+
 	return 0;
 }
 
@@ -393,13 +398,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
 	/*
-	 * Unalias p->thread.sve_state (if any) from the parent task
-	 * and disable discard SVE state for p:
-	 */
-	clear_tsk_thread_flag(p, TIF_SVE);
-	p->thread.sve_state = NULL;
-
-	/*
 	 * In case p was allocated the same task_struct pointer as some
 	 * other recently-exited task, make sure p is disassociated from
 	 * any cpu that may have run that now-exited task recently.
@@ -439,7 +437,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 			childregs->pstate |= PSR_UAO_BIT;
 
 		if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-			childregs->pstate |= PSR_SSBS_BIT;
+			set_ssbs_bit(childregs);
 
 		p->thread.cpu_context.x19 = stack_start;
 		p->thread.cpu_context.x20 = stk_sz;
@@ -480,18 +478,30 @@ void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
 static void ssbs_thread_switch(struct task_struct *next)
 {
-	if (likely(!(next->flags & PF_KTHREAD)) &&
-	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
-	    !test_tsk_thread_flag(next, TIF_SSBD)) {
-		struct pt_regs *regs = task_pt_regs(next);
+	struct pt_regs *regs = task_pt_regs(next);
 
-		if (compat_user_mode(regs))
-			set_compat_ssbs_bit(regs);
-		else if (user_mode(regs))
-			set_ssbs_bit(regs);
-	}
+	/*
+	 * Nothing to do for kernel threads, but 'regs' may be junk
+	 * (e.g. idle task) so check the flags and bail early.
+	 */
+	if (unlikely(next->flags & PF_KTHREAD))
+		return;
+
+	/* If the mitigation is enabled, then we leave SSBS clear. */
+	if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+	    test_tsk_thread_flag(next, TIF_SSBD))
+		return;
+
+	if (compat_user_mode(regs))
+		set_compat_ssbs_bit(regs);
+	else if (user_mode(regs))
+		set_ssbs_bit(regs);
 }
 
 /*
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6219486..0211c3c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *regs)
 }
 
 /*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
  * Userspace cannot use these until they have an architectural meaning.
  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
  * We also reserve IL for the kernel; SS is handled dynamically.
  */
 #define SPSR_EL1_AARCH64_RES0_BITS \
-	(GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
-	 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+	 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
 #define SPSR_EL1_AARCH32_RES0_BITS \
-	(GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
 
 static int valid_compat_regs(struct user_pt_regs *regs)
 {
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc..0311fe5 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -11,6 +11,7 @@
 
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
 		return 0;
 	}
 }
+NOKPROBE_SYMBOL(save_return_addr);
 
 void *return_address(unsigned int level)
 {
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
 		return NULL;
 }
 EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7e..bb482ec 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
 	return 0;
 }
+NOKPROBE_SYMBOL(unwind_frame);
 
 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 		     int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 			break;
 	}
 }
+NOKPROBE_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index cc48eb6..4996e75 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -359,17 +359,28 @@ void remove_cpu_topology(unsigned int cpu)
 }
 
 #ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+	/*
+	 * if the PPTT doesn't have thread information, assume a homogeneous
+	 * machine and return the current CPU's thread state.
+	 */
+	if (is_threaded < 0)
+		is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+	return !!is_threaded;
+}
+
 /*
  * Propagate the topology information of the processor_topology_node tree to the
  * cpu_topology array.
  */
 static int __init parse_acpi_topology(void)
 {
-	bool is_threaded;
 	int cpu, topology_id;
 
-	is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
 	for_each_possible_cpu(cpu) {
 		int i, cache_id;
 
@@ -377,7 +388,7 @@ static int __init parse_acpi_topology(void)
 		if (topology_id < 0)
 			return topology_id;
 
-		if (is_threaded) {
+		if (acpi_cpu_is_threaded(cpu)) {
 			cpu_topology[cpu].thread_id = topology_id;
 			topology_id = find_acpi_cpu_topology(cpu, 1);
 			cpu_topology[cpu].core_id   = topology_id;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 74e469f..4dae476 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -68,6 +68,10 @@
 #define TRAMP_TEXT
 #endif
 
+#define RTIC_BSS					\
+	. = ALIGN(PAGE_SIZE);				\
+	KEEP(*(.bss.rtic));			\
+	. = ALIGN(PAGE_SIZE);				\
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -239,6 +243,10 @@
 	STABS_DEBUG
 
 	HEAD_SYMBOLS
+
+	.bss : {			/* bss segment		*/
+         RTIC_BSS
+	}
 }
 
 /*
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 963d669..7414b76 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.sysregs_loaded_on_cpu = false;
 }
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+	u64 tmp;
+
+	asm volatile(
+	"mrs	%0, sctlr_el2\n"
+	"orr	%0, %0, %1\n"
+	"msr	sctlr_el2, %0"
+	: "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 7a5173e..4c2e96e 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
 	switch (spsr_idx) {
 	case KVM_SPSR_SVC:
 		write_sysreg_el1(v, spsr);
+		break;
 	case KVM_SPSR_ABT:
 		write_sysreg(v, spsr_abt);
+		break;
 	case KVM_SPSR_UND:
 		write_sysreg(v, spsr_und);
+		break;
 	case KVM_SPSR_IRQ:
 		write_sysreg(v, spsr_irq);
+		break;
 	case KVM_SPSR_FIQ:
 		write_sysreg(v, spsr_fiq);
+		break;
 	}
 }
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d112af7..6da2bbd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 	 */
 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-	__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+	__vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
-	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
+	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
-	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
+	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
-	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
+	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
-	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
+	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)						\
@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
 
-	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 }
 
 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
-			      const struct sys_reg_desc *table, size_t num)
+				const struct sys_reg_desc *table, size_t num,
+				unsigned long *bmap)
 {
 	unsigned long i;
 
 	for (i = 0; i < num; i++)
-		if (table[i].reset)
+		if (table[i].reset) {
+			int reg = table[i].reg;
+
 			table[i].reset(vcpu, &table[i]);
+			if (reg > 0 && reg < NR_SYS_REGS)
+				set_bit(reg, bmap);
+		}
 }
 
 /**
@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
 {
 	size_t num;
 	const struct sys_reg_desc *table;
-
-	/* Catch someone adding a register without putting in reset entry. */
-	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+	DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
 
 	/* Generic chip reset first (so target could override). */
-	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
 
 	table = get_target_table(vcpu->arch.target, true, &num);
-	reset_sys_reg_descs(vcpu, table, num);
+	reset_sys_reg_descs(vcpu, table, num, bmap);
 
 	for (num = 1; num < NR_SYS_REGS; num++) {
-		if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+		if (WARN(!test_bit(num, bmap),
 			 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
 			break;
 	}
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 842c8a5..157f2ca 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
 	unsigned long gap = rlim_stack->rlim_cur;
-	unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
+	unsigned long pad = stack_guard_gap;
+
+	/* Account for stack randomization if necessary */
+	if (current->flags & PF_RANDOMIZE)
+		pad += (STACK_RND_MASK << PAGE_SHIFT);
 
 	/* Values close to RLIM_INFINITY can overflow. */
 	if (gap + pad > gap)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 76fd72f..13a2dd4 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -340,6 +340,15 @@
 	msr	sctlr_el1, x18
 	isb
 
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
+
 	/* Set the flag to zero to indicate that we're all done */
 	str	wzr, [flag_ptr]
 	ret
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7f0258e..dd6b600 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -949,3 +949,25 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 					   tmp : orig_prog);
 	return prog;
 }
+
+#ifdef CONFIG_CFI_CLANG
+bool arch_bpf_jit_check_func(const struct bpf_prog *prog)
+{
+	const uintptr_t func = (const uintptr_t)prog->bpf_func;
+
+	/*
+	 * bpf_func must be correctly aligned and within the correct region.
+	 * module_alloc places JIT code in the module region, unless
+	 * ARM64_MODULE_PLTS is enabled, in which case we might end up using
+	 * the vmalloc region too.
+	 */
+	if (unlikely(!IS_ALIGNED(func, sizeof(u32))))
+		return false;
+
+	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+			is_vmalloc_addr(prog->bpf_func))
+		return true;
+
+	return (func >= MODULES_VADDR && func < MODULES_END);
+}
+#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 326448f..1a42ba8 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
 void
 module_arch_cleanup (struct module *mod)
 {
-	if (mod->arch.init_unw_table)
+	if (mod->arch.init_unw_table) {
 		unw_remove_unwind_table(mod->arch.init_unw_table);
-	if (mod->arch.core_unw_table)
+		mod->arch.init_unw_table = NULL;
+	}
+	if (mod->arch.core_unw_table) {
 		unw_remove_unwind_table(mod->arch.core_unw_table);
+		mod->arch.core_unw_table = NULL;
+	}
 }
 
 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 9000b24..407a617 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -22,7 +22,6 @@
 
 #include <linux/types.h>
 #include <asm/bootinfo-atari.h>
-#include <asm/raw_io.h>
 #include <asm/kmap.h>
 
 extern u_long atari_mch_cookie;
@@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present;
  */
 
 
-#define atari_readb   raw_inb
-#define atari_writeb  raw_outb
-
-#define atari_inb_p   raw_inb
-#define atari_outb_p  raw_outb
-
-
-
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 782b78f..e056fea 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -29,7 +29,11 @@
 #include <asm-generic/iomap.h>
 
 #ifdef CONFIG_ATARI
-#include <asm/atarihw.h>
+#define atari_readb   raw_inb
+#define atari_writeb  raw_outb
+
+#define atari_inb_p   raw_inb
+#define atari_outb_p  raw_outb
 #endif
 
 
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 08cee11..e441517 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -4,6 +4,7 @@
 
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/bootinfo-mac.h>
 
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 2bae201..1c7bf11 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -99,7 +99,7 @@
 
 			miscintc: interrupt-controller@18060010 {
 				compatible = "qca,ar7240-misc-intc";
-				reg = <0x18060010 0x4>;
+				reg = <0x18060010 0x8>;
 
 				interrupt-parent = <&cpuintc>;
 				interrupts = <6>;
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index c3d0d0a..6895430 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -623,7 +623,6 @@
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 5f71aa5..1a3e1fe 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -335,7 +335,6 @@
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 0edba3e..4e2ee74 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -387,6 +387,22 @@
 #define cpu_has_dsp3		__ase(MIPS_ASE_DSP3)
 #endif
 
+#ifndef cpu_has_loongson_mmi
+#define cpu_has_loongson_mmi		__ase(MIPS_ASE_LOONGSON_MMI)
+#endif
+
+#ifndef cpu_has_loongson_cam
+#define cpu_has_loongson_cam		__ase(MIPS_ASE_LOONGSON_CAM)
+#endif
+
+#ifndef cpu_has_loongson_ext
+#define cpu_has_loongson_ext		__ase(MIPS_ASE_LOONGSON_EXT)
+#endif
+
+#ifndef cpu_has_loongson_ext2
+#define cpu_has_loongson_ext2		__ase(MIPS_ASE_LOONGSON_EXT2)
+#endif
+
 #ifndef cpu_has_mipsmt
 #define cpu_has_mipsmt		__isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
 #endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dacbdb8..2b4b14a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -436,5 +436,9 @@ enum cpu_type_enum {
 #define MIPS_ASE_MSA		0x00000100 /* MIPS SIMD Architecture */
 #define MIPS_ASE_DSP3		0x00000200 /* Signal Processing ASE Rev 3*/
 #define MIPS_ASE_MIPS16E2	0x00000400 /* MIPS16e2 */
+#define MIPS_ASE_LOONGSON_MMI	0x00000800 /* Loongson MultiMedia extensions Instructions */
+#define MIPS_ASE_LOONGSON_CAM	0x00001000 /* Loongson CAM */
+#define MIPS_ASE_LOONGSON_EXT	0x00002000 /* Loongson EXTensions */
+#define MIPS_ASE_LOONGSON_EXT2	0x00004000 /* Loongson EXTensions R2 */
 
 #endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 01df9ad..1bb9448 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -688,6 +688,9 @@
 #define MIPS_CONF7_IAR		(_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR		(_ULCAST_(1) << 16)
 
+/* Ingenic Config7 bits */
+#define MIPS_CONF7_BTB_LOOP_EN	(_ULCAST_(1) << 4)
+
 /* Config7 Bits specific to MIPS Technologies. */
 
 /* Performance counters implemented Per TC */
@@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
index a2aba4b..1ade1da 100644
--- a/arch/mips/include/uapi/asm/hwcap.h
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -6,5 +6,16 @@
 #define HWCAP_MIPS_R6		(1 << 0)
 #define HWCAP_MIPS_MSA		(1 << 1)
 #define HWCAP_MIPS_CRC32	(1 << 2)
+#define HWCAP_MIPS_MIPS16	(1 << 3)
+#define HWCAP_MIPS_MDMX     (1 << 4)
+#define HWCAP_MIPS_MIPS3D   (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP      (1 << 7)
+#define HWCAP_MIPS_DSP2     (1 << 8)
+#define HWCAP_MIPS_DSP3     (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI  (1 << 11)
+#define HWCAP_LOONGSON_EXT  (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
 
 #endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 97d5239..428ef21 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
 	if (c->tcache.waysize)
 		populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
 
+	this_cpu_ci->cpu_map_populated = true;
+
 	return 0;
 }
 
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d535fc7..581defb 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
 			__cpu_name[cpu] = "ICT Loongson-3";
 			set_elf_platform(cpu, "loongson3a");
 			set_isa(c, MIPS_CPU_ISA_M64R1);
+			c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+				MIPS_ASE_LOONGSON_EXT);
 			break;
 		case PRID_REV_LOONGSON3B_R1:
 		case PRID_REV_LOONGSON3B_R2:
@@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
 			__cpu_name[cpu] = "ICT Loongson-3";
 			set_elf_platform(cpu, "loongson3b");
 			set_isa(c, MIPS_CPU_ISA_M64R1);
+			c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+				MIPS_ASE_LOONGSON_EXT);
 			break;
 		}
 
@@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
 		decode_configs(c);
 		c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
 		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+			MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
 		break;
 	default:
 		panic("Unknown Loongson Processor ID!");
@@ -1879,6 +1885,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
 		c->cputype = CPU_JZRISC;
 		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
 		__cpu_name[cpu] = "Ingenic JZRISC";
+		/*
+		 * The XBurst core by default attempts to avoid branch target
+		 * buffer lookups by detecting & special casing loops. This
+		 * feature will cause BogoMIPS and lpj calculate in error.
+		 * Set cp0 config7 bit 4 to disable this feature.
+		 */
+		set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
 		break;
 	default:
 		panic("Unknown Ingenic Processor ID!");
@@ -2092,6 +2105,39 @@ void cpu_probe(void)
 		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
+	if (cpu_has_mips16)
+		elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+	if (cpu_has_mdmx)
+		elf_hwcap |= HWCAP_MIPS_MDMX;
+
+	if (cpu_has_mips3d)
+		elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+	if (cpu_has_smartmips)
+		elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+	if (cpu_has_dsp)
+		elf_hwcap |= HWCAP_MIPS_DSP;
+
+	if (cpu_has_dsp2)
+		elf_hwcap |= HWCAP_MIPS_DSP2;
+
+	if (cpu_has_dsp3)
+		elf_hwcap |= HWCAP_MIPS_DSP3;
+
+	if (cpu_has_mips16e2)
+		elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+	if (cpu_has_loongson_mmi)
+		elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+	if (cpu_has_loongson_ext)
+		elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+	if (cpu_has_loongson_ext2)
+		elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
 	if (cpu_has_vz)
 		cpu_probe_vz(c);
 
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5f209f1..df7ddd2 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
 
 static int __init init_pit_clocksource(void)
 {
-	if (num_possible_cpus() > 1) /* PIT does not scale! */
+	if (num_possible_cpus() > 1 || /* PIT does not scale! */
+	    !clockevent_state_periodic(&i8253_clockevent))
 		return 0;
 
 	return clocksource_i8253_init();
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index b2de408..f8d3671 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 	if (cpu_has_eva)	seq_printf(m, "%s", " eva");
 	if (cpu_has_htw)	seq_printf(m, "%s", " htw");
 	if (cpu_has_xpa)	seq_printf(m, "%s", " xpa");
+	if (cpu_has_loongson_mmi)	seq_printf(m, "%s", " loongson-mmi");
+	if (cpu_has_loongson_cam)	seq_printf(m, "%s", " loongson-cam");
+	if (cpu_has_loongson_ext)	seq_printf(m, "%s", " loongson-ext");
+	if (cpu_has_loongson_ext2)	seq_printf(m, "%s", " loongson-ext2");
 	seq_printf(m, "\n");
 
 	if (cpu_has_mmips) {
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 0fce460..12abf14 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -43,6 +43,10 @@
       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
 endif
 
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
 #
 # Loongson Machines' Support
 #
diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
index ffefc1c..98c3a7f 100644
--- a/arch/mips/loongson64/common/serial.c
+++ b/arch/mips/loongson64/common/serial.c
@@ -110,7 +110,7 @@ static int __init serial_init(void)
 }
 module_init(serial_init);
 
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
 {
 	platform_device_unregister(&uart8250_device);
 }
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 1b705fb..233033f 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
 EXPORT_SYMBOL(shm_align_mask);
 
 /* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MIN_GAP		(128*1024*1024UL)
+#define MAX_GAP		((TASK_SIZE)/6*5)
+#define STACK_RND_MASK	(0x7ff >> (PAGE_SHIFT - 12))
 
 static int mmap_is_legacy(struct rlimit *rlim_stack)
 {
@@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
 	unsigned long gap = rlim_stack->rlim_cur;
+	unsigned long pad = stack_guard_gap;
+
+	/* Account for stack randomization if necessary */
+	if (current->flags & PF_RANDOMIZE)
+		pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+	/* Values close to RLIM_INFINITY can overflow. */
+	if (gap + pad > gap)
+		gap += pad;
 
 	if (gap < MIN_GAP)
 		gap = MIN_GAP;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 8c4fda5..3944c49 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 		return;
 	}
 
-	if (cpu_has_rixi && _PAGE_NO_EXEC) {
+	if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
 		if (fill_includes_sw_bits) {
 			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
 		} else {
@@ -654,6 +654,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 				   int restore_scratch)
 {
 	if (restore_scratch) {
+		/*
+		 * Ensure the MFC0 below observes the value written to the
+		 * KScratch register by the prior MTC0.
+		 */
+		if (scratch_reg >= 0)
+			uasm_i_ehb(p);
+
 		/* Reset default page size */
 		if (PM_DEFAULT_MASK >> 16) {
 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -668,12 +675,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 			uasm_il_b(p, r, lid);
 		}
-		if (scratch_reg >= 0) {
-			uasm_i_ehb(p);
+		if (scratch_reg >= 0)
 			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-		} else {
+		else
 			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-		}
 	} else {
 		/* Reset default page size */
 		if (PM_DEFAULT_MASK >> 16) {
@@ -922,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 	}
 	if (mode != not_refill && check_for_high_segbits) {
 		uasm_l_large_segbits_fault(l, *p);
+
+		if (mode == refill_scratch && scratch_reg >= 0)
+			uasm_i_ehb(p);
+
 		/*
 		 * We get here if we are an xsseg address, or if we are
 		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -938,12 +947,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 		uasm_i_jr(p, ptr);
 
 		if (mode == refill_scratch) {
-			if (scratch_reg >= 0) {
-				uasm_i_ehb(p);
+			if (scratch_reg >= 0)
 				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-			} else {
+			else
 				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-			}
 		} else {
 			uasm_i_nop(p);
 		}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 6f10312..c99fa1c 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -9,6 +9,7 @@
 	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS)) \
 	$(filter -m%-float,$(KBUILD_CFLAGS)) \
+	$(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
 	-D__VDSO__
 
 ifeq ($(cc-name),clang)
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 92a9b5f..f29f682 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -3,7 +3,7 @@
  * arch/parisc/mm/ioremap.c
  *
  * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
  */
 
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 	addr = (void __iomem *) area->addr;
 	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
 			       phys_addr, pgprot)) {
-		vfree(addr);
+		vunmap(addr);
 		return NULL;
 	}
 
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 }
 EXPORT_SYMBOL(__ioremap);
 
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
 {
-	if (addr > high_memory)
-		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+	if (is_vmalloc_addr((void *)addr))
+		vunmap((void *)addr);
 }
 EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 29f49a3..6a6804c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -212,7 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_POWER9_DD2_1		LONG_ASM_CONST(0x0000080000000000)
 #define CPU_FTR_P9_TM_HV_ASSIST		LONG_ASM_CONST(0x0000100000000000)
 #define CPU_FTR_P9_TM_XER_SO_BUG	LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_P9_TLBIE_BUG		LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG	LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR			LONG_ASM_CONST(0x0000800000000000)
 
 #ifndef __ASSEMBLY__
@@ -460,7 +460,7 @@ static inline void cpu_feature_keys_init(void) { }
 	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
 	    CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
-	    CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
+	    CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TIDR)
 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 9454277..2a7b01f 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 
 	pagefault_enable();
 
-	if (!ret)
-		*oval = oldval;
+	*oval = oldval;
 
 	return ret;
 }
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 83a9aa3..dd18d81 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-	vcpu->arch.cr = val;
+	vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cr;
+	return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index dc435a5..14fa07c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -482,7 +482,7 @@ static inline u64 sanitize_msr(u64 msr)
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 {
-	vcpu->arch.cr  = vcpu->arch.cr_tm;
+	vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
 	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
 	vcpu->arch.regs.link  = vcpu->arch.lr_tm;
 	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
@@ -499,7 +499,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 
 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 {
-	vcpu->arch.cr_tm  = vcpu->arch.cr;
+	vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
 	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
 	vcpu->arch.lr_tm  = vcpu->arch.regs.link;
 	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d513e3e..f0cef62 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-	vcpu->arch.cr = val;
+	vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cr;
+	return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2b6049e..2f95e38 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -538,8 +538,6 @@ struct kvm_vcpu_arch {
 	ulong tar;
 #endif
 
-	u32 cr;
-
 #ifdef CONFIG_PPC_BOOK3S
 	ulong hflags;
 	ulong guest_owned_ext;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b694d6a..ae95395 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -217,12 +217,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
-static inline int arch_dup_mmap(struct mm_struct *oldmm,
-				struct mm_struct *mm)
-{
-	return 0;
-}
-
 #ifndef CONFIG_PPC_BOOK3S_64
 static inline void arch_exit_mmap(struct mm_struct *mm)
 {
@@ -247,6 +241,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 #ifdef CONFIG_PPC_MEM_KEYS
 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
 			       bool execute, bool foreign);
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
 #else /* CONFIG_PPC_MEM_KEYS */
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 		bool write, bool execute, bool foreign)
@@ -259,6 +254,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 #define thread_pkey_regs_save(thread)
 #define thread_pkey_regs_restore(new_thread, old_thread)
 #define thread_pkey_regs_init(thread)
+#define arch_dup_pkeys(oldmm, mm)
 
 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
 {
@@ -267,5 +263,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
 
 #endif /* CONFIG_PPC_MEM_KEYS */
 
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
+{
+	arch_dup_pkeys(oldmm, mm);
+	return 0;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index ff38664..d8d886d 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -275,7 +275,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp,
 int64_t opal_xive_set_vp_info(uint64_t vp,
 			      uint64_t flags,
 			      uint64_t report_cl_pair);
-int64_t opal_xive_allocate_irq(uint32_t chip_id);
+int64_t opal_xive_allocate_irq_raw(uint32_t chip_id);
 int64_t opal_xive_free_irq(uint32_t girq);
 int64_t opal_xive_sync(uint32_t type, uint32_t id);
 int64_t opal_xive_dump(uint32_t type, uint32_t id);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e5b314e..640a4d8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -118,11 +118,16 @@
 #define MSR_TS_S	__MASK(MSR_TS_S_LG)	/*  Transaction Suspended */
 #define MSR_TS_T	__MASK(MSR_TS_T_LG)	/*  Transaction Transactional */
 #define MSR_TS_MASK	(MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
-#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
 #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
 #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#else
+#define MSR_TM_ACTIVE(x) 0
+#endif
+
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define MSR_64BIT	MSR_SF
 
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 23bea99..1ca9e37 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -306,6 +306,7 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long
 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
+	barrier_nospec();
 	return __copy_tofrom_user(to, from, n);
 }
 #endif /* __powerpc64__ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 89cf155..7c3738d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -438,7 +438,7 @@ int main(void)
 #ifdef CONFIG_PPC_BOOK3S
 	OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
 #endif
-	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+	OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
 	OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
@@ -695,7 +695,7 @@ int main(void)
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #else /* CONFIG_PPC_BOOK3S */
-	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+	OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
 	OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
 	OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
 	OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index f432054..f3b8e04 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -694,9 +694,35 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
 	return true;
 }
 
+/*
+ * Handle POWER9 broadcast tlbie invalidation issue using
+ * cpu feature flag.
+ */
+static __init void update_tlbie_feature_flag(unsigned long pvr)
+{
+	if (PVR_VER(pvr) == PVR_POWER9) {
+		/*
+		 * Set the tlbie feature flag for anything below
+		 * Nimbus DD 2.3 and Cumulus DD 1.3
+		 */
+		if ((pvr & 0xe000) == 0) {
+			/* Nimbus */
+			if ((pvr & 0xfff) < 0x203)
+				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		} else if ((pvr & 0xc000) == 0) {
+			/* Cumulus */
+			if ((pvr & 0xfff) < 0x103)
+				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		} else {
+			WARN_ONCE(1, "Unknown PVR");
+			cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+		}
+	}
+}
+
 static __init void cpufeatures_cpu_quirks(void)
 {
-	int version = mfspr(SPRN_PVR);
+	unsigned long version = mfspr(SPRN_PVR);
 
 	/*
 	 * Not all quirks can be derived from the cpufeatures device tree.
@@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
 
 	if ((version & 0xffff0000) == 0x004e0000) {
 		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
-		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
 	}
 
+	update_tlbie_feature_flag(version);
 	/*
 	 * PKEY was not in the initial base or feature node
 	 * specification, but it should become optional in the next
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 67619b4..110eba4 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -811,6 +811,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
 	pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
 		pe->freeze_count, eeh_max_freezes);
 
+	eeh_for_each_pe(pe, tmp_pe)
+		eeh_pe_for_each_dev(tmp_pe, edev, tmp)
+			edev->mode &= ~EEH_DEV_NO_HANDLER;
+
 	/* Walk the various device drivers attached to this slot through
 	 * a reset sequence, giving each an opportunity to do what it needs
 	 * to accomplish the reset.  Each child gets a report of the
@@ -1004,7 +1008,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
  */
 void eeh_handle_special_event(void)
 {
-	struct eeh_pe *pe, *phb_pe;
+	struct eeh_pe *pe, *phb_pe, *tmp_pe;
+	struct eeh_dev *edev, *tmp_edev;
 	struct pci_bus *bus;
 	struct pci_controller *hose;
 	unsigned long flags;
@@ -1075,6 +1080,10 @@ void eeh_handle_special_event(void)
 				    (phb_pe->state & EEH_PE_RECOVERING))
 					continue;
 
+				eeh_for_each_pe(pe, tmp_pe)
+					eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+						edev->mode &= ~EEH_DEV_NO_HANDLER;
+
 				/* Notify all devices to be down */
 				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
 				eeh_set_channel_state(pe, pci_channel_io_perm_failure);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 06cc778..90af86f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -520,6 +520,10 @@
 	RFI_TO_USER_OR_KERNEL
 9:
 	/* Deliver the machine check to host kernel in V mode. */
+BEGIN_FTR_SECTION
+	ld	r10,ORIG_GPR3(r1)
+	mtspr	SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 	MACHINE_CHECK_HANDLER_WINDUP
 	b	machine_check_pSeries
 
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 9168a24..3fb564f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -906,6 +906,7 @@
 /*
  * This is where the main kernel code starts.
  */
+__REF
 start_here_multiplatform:
 	/* set up the TOC */
 	bl      relative_toc
@@ -981,6 +982,7 @@
 	RFI
 	b	.	/* prevent speculative execution */
 
+	.previous
 	/* This is where all platforms converge execution */
 
 start_here_common:
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index efdd16a..93e0677 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
 					mce_ue_event_queue);
 
 static void machine_check_process_queued_event(struct irq_work *work);
+static void machine_check_ue_irq_work(struct irq_work *work);
 void machine_check_ue_event(struct machine_check_event *evt);
 static void machine_process_ue_event(struct work_struct *work);
 
@@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
         .func = machine_check_process_queued_event,
 };
 
+static struct irq_work mce_ue_event_irq_work = {
+	.func = machine_check_ue_irq_work,
+};
+
 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
 
 static void mce_set_error_info(struct machine_check_event *mce,
@@ -208,6 +213,10 @@ void release_mce_event(void)
 	get_mce_event(NULL, true);
 }
 
+static void machine_check_ue_irq_work(struct irq_work *work)
+{
+	schedule_work(&mce_ue_event_work);
+}
 
 /*
  * Queue up the MCE event which then can be handled later.
@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
 	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
 
 	/* Queue work to process this event later. */
-	schedule_work(&mce_ue_event_work);
+	irq_work_queue(&mce_ue_event_irq_work);
 }
 
 /*
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 3022d67..37a110b 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -39,6 +39,7 @@
 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 {
 	pte_t *ptep;
+	unsigned int shift;
 	unsigned long flags;
 	struct mm_struct *mm;
 
@@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 		mm = &init_mm;
 
 	local_irq_save(flags);
-	if (mm == current->mm)
-		ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
-	else
-		ptep = find_init_mm_pte(addr, NULL);
+	ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
 	local_irq_restore(flags);
+
 	if (!ptep || pte_special(*ptep))
 		return ULONG_MAX;
+
+	if (shift > PAGE_SHIFT) {
+		unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+
+		return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+	}
+
 	return pte_pfn(*ptep);
 }
 
@@ -339,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
 { 0, false, 0, 0, 0, 0 } };
 
-static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
+static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
 					uint64_t *phys_addr)
 {
 	/*
@@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs,
 			 * kernel/exception-64s.h
 			 */
 			if (get_paca()->in_mce < MAX_MCE_DEPTH)
-				mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
+				mce_find_instr_ea_and_phys(regs, addr,
+							   phys_addr);
 		}
 		found = 1;
 	}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 262ba94..1bf6aae 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -135,7 +135,7 @@
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5		/* ensure we get enough */
 	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of dcache block size */
-	srw.	r8,r8,r9		/* compute line count */
+	srd.	r8,r8,r9		/* compute line count */
 	beqlr				/* nothing to do? */
 	mtctr	r8
 0:	dcbst	0,r6
@@ -153,7 +153,7 @@
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5		/* ensure we get enough */
 	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
-	srw.	r8,r8,r9		/* compute line count */
+	srd.	r8,r8,r9		/* compute line count */
 	beqlr				/* nothing to do? */
 	sync
 	isync
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d29f2dc..909c940 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -102,27 +102,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
 	}
 }
 
-static inline bool msr_tm_active(unsigned long msr)
-{
-	return MSR_TM_ACTIVE(msr);
-}
-
-static bool tm_active_with_fp(struct task_struct *tsk)
-{
-	return msr_tm_active(tsk->thread.regs->msr) &&
-		(tsk->thread.ckpt_regs.msr & MSR_FP);
-}
-
-static bool tm_active_with_altivec(struct task_struct *tsk)
-{
-	return msr_tm_active(tsk->thread.regs->msr) &&
-		(tsk->thread.ckpt_regs.msr & MSR_VEC);
-}
 #else
-static inline bool msr_tm_active(unsigned long msr) { return false; }
 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 bool strict_msr_control;
@@ -247,7 +228,8 @@ void enable_kernel_fp(void)
 		 * giveup as this would save  to the 'live' structure not the
 		 * checkpointed structure.
 		 */
-		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+		if (!MSR_TM_ACTIVE(cpumsr) &&
+		     MSR_TM_ACTIVE(current->thread.regs->msr))
 			return;
 		__giveup_fpu(current);
 	}
@@ -256,7 +238,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
 
 static int restore_fp(struct task_struct *tsk)
 {
-	if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
+	if (tsk->thread.load_fp) {
 		load_fp_state(&current->thread.fp_state);
 		current->thread.load_fp++;
 		return 1;
@@ -311,7 +293,8 @@ void enable_kernel_altivec(void)
 		 * giveup as this would save  to the 'live' structure not the
 		 * checkpointed structure.
 		 */
-		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+		if (!MSR_TM_ACTIVE(cpumsr) &&
+		     MSR_TM_ACTIVE(current->thread.regs->msr))
 			return;
 		__giveup_altivec(current);
 	}
@@ -337,8 +320,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 
 static int restore_altivec(struct task_struct *tsk)
 {
-	if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
-		(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
+	if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
 		load_vr_state(&tsk->thread.vr_state);
 		tsk->thread.used_vr = 1;
 		tsk->thread.load_vec++;
@@ -397,7 +379,8 @@ void enable_kernel_vsx(void)
 		 * giveup as this would save  to the 'live' structure not the
 		 * checkpointed structure.
 		 */
-		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+		if (!MSR_TM_ACTIVE(cpumsr) &&
+		     MSR_TM_ACTIVE(current->thread.regs->msr))
 			return;
 		__giveup_vsx(current);
 	}
@@ -499,13 +482,14 @@ void giveup_all(struct task_struct *tsk)
 	if (!tsk->thread.regs)
 		return;
 
+	check_if_tm_restore_required(tsk);
+
 	usermsr = tsk->thread.regs->msr;
 
 	if ((usermsr & msr_all_available) == 0)
 		return;
 
 	msr_check_and_set(msr_all_available);
-	check_if_tm_restore_required(tsk);
 
 	WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
 
@@ -530,7 +514,7 @@ void restore_math(struct pt_regs *regs)
 {
 	unsigned long msr;
 
-	if (!msr_tm_active(regs->msr) &&
+	if (!MSR_TM_ACTIVE(regs->msr) &&
 		!current->thread.load_fp && !loadvec(current->thread))
 		return;
 
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8afd146..9e41a9d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
 		return 0;
 
 	for_each_cpu(cpu, cpus) {
+		struct device *dev = get_cpu_device(cpu);
+
 		switch (state) {
 		case DOWN:
-			cpuret = cpu_down(cpu);
+			cpuret = device_offline(dev);
 			break;
 		case UP:
-			cpuret = cpu_up(cpu);
+			cpuret = device_online(dev);
 			break;
 		}
-		if (cpuret) {
+		if (cpuret < 0) {
 			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
 					__func__,
 					((state == UP) ? "up" : "down"),
@@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle)
 	data.token = rtas_token("ibm,suspend-me");
 	data.complete = &done;
 
+	lock_device_hotplug();
+
 	/* All present CPUs must be online */
 	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
 	cpuret = rtas_online_cpus_mask(offline_mask);
@@ -1003,6 +1007,7 @@ int rtas_ibm_suspend_me(u64 handle)
 				__func__);
 
 out:
+	unlock_device_hotplug();
 	free_cpumask_var(offline_mask);
 	return atomic_read(&data.error);
 }
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 02fe6d0..d5f351f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs)
 	if (debugger(regs))
 		goto out;
 
+	kmsg_dump(KMSG_DUMP_OOPS);
 	/*
 	 * A system reset is a request to dump, so we always send
 	 * it through the crashdump code (if fadump or kdump are
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 68e14af..a488c10 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -744,12 +744,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	slots = kvm_memslots(kvm);
 	kvm_for_each_memslot(memslot, slots) {
+		/* Mutual exclusion with kvm_unmap_hva_range etc. */
+		spin_lock(&kvm->mmu_lock);
 		/*
 		 * This assumes it is acceptable to lose reference and
 		 * change bits across a reset.
 		 */
 		memset(memslot->arch.rmap, 0,
 		       memslot->npages * sizeof(*memslot->arch.rmap));
+		spin_unlock(&kvm->mmu_lock);
 	}
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
 }
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 9a3f264..07a8004 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -602,8 +602,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
 		if (kvmppc_gpa_to_ua(vcpu->kvm,
 				tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
-				&ua, NULL))
-			return H_PARAMETER;
+				&ua, NULL)) {
+			ret = H_PARAMETER;
+			goto unlock_exit;
+		}
 
 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 6821ead..eb8b115 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -528,8 +528,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 		ua = 0;
 		if (kvmppc_gpa_to_ua(vcpu->kvm,
 				tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
-				&ua, NULL))
-			return H_PARAMETER;
+				&ua, NULL)) {
+			ret = H_PARAMETER;
+			goto unlock_exit;
+		}
 
 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 36b11c5..2654df2 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
 	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
 	vcpu->arch.tar_tm = vcpu->arch.tar;
 	vcpu->arch.lr_tm = vcpu->arch.regs.link;
-	vcpu->arch.cr_tm = vcpu->arch.cr;
+	vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
 	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
 	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
 }
@@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
 	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
 	vcpu->arch.tar = vcpu->arch.tar_tm;
 	vcpu->arch.regs.link = vcpu->arch.lr_tm;
-	vcpu->arch.cr = vcpu->arch.cr_tm;
+	vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
 	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
 	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
 }
@@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
 	uint64_t texasr;
 
 	/* CR0 = 0 | MSR[TS] | 0 */
-	vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
 		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
 		 << CR0_SHIFT);
 
@@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
 	tm_abort(ra_val);
 
 	/* CR0 = 0 | MSR[TS] | 0 */
-	vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
 		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
 		 << CR0_SHIFT);
 
@@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
 			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
 				preempt_disable();
-				vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
-				  (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
+				vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
+				  (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
 
 				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
 					(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 083dced..3ae3e8d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
 	pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
 	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
-	pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
-	       vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+	pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
+	       vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
 	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
 	pr_err("fault dar = %.16lx dsisr = %.8x\n",
 	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
@@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 		*val = get_reg_val(id, vcpu->arch.pspb);
 		break;
 	case KVM_REG_PPC_DPDES:
-		*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+		/*
+		 * On POWER9, where we are emulating msgsndp etc.,
+		 * we return 1 bit for each vcpu, which can come from
+		 * either vcore->dpdes or doorbell_request.
+		 * On POWER8, doorbell_request is 0.
+		 */
+		*val = get_reg_val(id, vcpu->arch.vcore->dpdes |
+				   vcpu->arch.doorbell_request);
 		break;
 	case KVM_REG_PPC_VTB:
 		*val = get_reg_val(id, vcpu->arch.vcore->vtb);
@@ -2550,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 		if (!spin_trylock(&pvc->lock))
 			continue;
 		prepare_threads(pvc);
-		if (!pvc->n_runnable) {
+		if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
 			list_del_init(&pvc->preempt_list);
 			if (pvc->runner == NULL) {
 				pvc->vcore_state = VCORE_INACTIVE;
@@ -2571,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 	spin_unlock(&lp->lock);
 }
 
-static bool recheck_signals(struct core_info *cip)
+static bool recheck_signals_and_mmu(struct core_info *cip)
 {
 	int sub, i;
 	struct kvm_vcpu *vcpu;
+	struct kvmppc_vcore *vc;
 
-	for (sub = 0; sub < cip->n_subcores; ++sub)
-		for_each_runnable_thread(i, vcpu, cip->vc[sub])
+	for (sub = 0; sub < cip->n_subcores; ++sub) {
+		vc = cip->vc[sub];
+		if (!vc->kvm->arch.mmu_ready)
+			return true;
+		for_each_runnable_thread(i, vcpu, vc)
 			if (signal_pending(vcpu->arch.run_task))
 				return true;
+	}
 	return false;
 }
 
@@ -2800,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	local_irq_disable();
 	hard_irq_disable();
 	if (lazy_irq_pending() || need_resched() ||
-	    recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
+	    recheck_signals_and_mmu(&core_info)) {
 		local_irq_enable();
 		vc->vcore_state = VCORE_INACTIVE;
 		/* Unlock all except the primary vcore */
@@ -3813,12 +3825,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 {
+	kvmppc_rmap_reset(kvm);
+	kvm->arch.process_table = 0;
+	/* Mutual exclusion with kvm_unmap_hva_range etc. */
+	spin_lock(&kvm->mmu_lock);
+	kvm->arch.radix = 0;
+	spin_unlock(&kvm->mmu_lock);
 	kvmppc_free_radix(kvm);
 	kvmppc_update_lpcr(kvm, LPCR_VPM1,
 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-	kvmppc_rmap_reset(kvm);
-	kvm->arch.radix = 0;
-	kvm->arch.process_table = 0;
 	return 0;
 }
 
@@ -3831,10 +3846,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
 	if (err)
 		return err;
 
+	kvmppc_rmap_reset(kvm);
+	/* Mutual exclusion with kvm_unmap_hva_range etc. */
+	spin_lock(&kvm->mmu_lock);
+	kvm->arch.radix = 1;
+	spin_unlock(&kvm->mmu_lock);
 	kvmppc_free_hpt(&kvm->arch.hpt);
 	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-	kvm->arch.radix = 1;
 	return 0;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index a67cf1c..7c68d83 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -452,7 +452,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
 		}
 
-		if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+		if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 			/*
 			 * Need the extra ptesync to make sure we don't
 			 * re-order the tlbie
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1d14046..f1878e1 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -56,6 +56,8 @@
 #define STACK_SLOT_DAWR		(SFS-56)
 #define STACK_SLOT_DAWRX	(SFS-64)
 #define STACK_SLOT_HFSCR	(SFS-72)
+#define STACK_SLOT_AMR		(SFS-80)
+#define STACK_SLOT_UAMOR	(SFS-88)
 
 /*
  * Call kvmppc_hv_entry in real mode.
@@ -760,11 +762,9 @@
 	mfspr	r5, SPRN_TIDR
 	mfspr	r6, SPRN_PSSCR
 	mfspr	r7, SPRN_PID
-	mfspr	r8, SPRN_IAMR
 	std	r5, STACK_SLOT_TID(r1)
 	std	r6, STACK_SLOT_PSSCR(r1)
 	std	r7, STACK_SLOT_PID(r1)
-	std	r8, STACK_SLOT_IAMR(r1)
 	mfspr	r5, SPRN_HFSCR
 	std	r5, STACK_SLOT_HFSCR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -772,11 +772,18 @@
 	mfspr	r5, SPRN_CIABR
 	mfspr	r6, SPRN_DAWR
 	mfspr	r7, SPRN_DAWRX
+	mfspr	r8, SPRN_IAMR
 	std	r5, STACK_SLOT_CIABR(r1)
 	std	r6, STACK_SLOT_DAWR(r1)
 	std	r7, STACK_SLOT_DAWRX(r1)
+	std	r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
+	mfspr	r5, SPRN_AMR
+	std	r5, STACK_SLOT_AMR(r1)
+	mfspr	r6, SPRN_UAMOR
+	std	r6, STACK_SLOT_UAMOR(r1)
+
 BEGIN_FTR_SECTION
 	/* Set partition DABR */
 	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1202,7 +1209,7 @@
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
 	ld	r5, VCPU_LR(r4)
-	lwz	r6, VCPU_CR(r4)
+	ld	r6, VCPU_CR(r4)
 	mtlr	r5
 	mtcr	r6
 
@@ -1313,7 +1320,7 @@
 	std	r3, VCPU_GPR(R12)(r9)
 	/* CR is in the high half of r12 */
 	srdi	r4, r12, 32
-	stw	r4, VCPU_CR(r9)
+	std	r4, VCPU_CR(r9)
 BEGIN_FTR_SECTION
 	ld	r3, HSTATE_CFAR(r13)
 	std	r3, VCPU_CFAR(r9)
@@ -1713,22 +1720,25 @@
 	mtspr	SPRN_PSPB, r0
 	mtspr	SPRN_WORT, r0
 BEGIN_FTR_SECTION
-	mtspr	SPRN_IAMR, r0
 	mtspr	SPRN_TCSCR, r0
 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
 	li	r0, 1
 	sldi	r0, r0, 31
 	mtspr	SPRN_MMCRS, r0
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
-8:
 
-	/* Save and reset AMR and UAMOR before turning on the MMU */
+	/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
+	ld	r8, STACK_SLOT_IAMR(r1)
+	mtspr	SPRN_IAMR, r8
+
+8:	/* Power7 jumps back in here */
 	mfspr	r5,SPRN_AMR
 	mfspr	r6,SPRN_UAMOR
 	std	r5,VCPU_AMR(r9)
 	std	r6,VCPU_UAMOR(r9)
-	li	r6,0
-	mtspr	SPRN_AMR,r6
+	ld	r5,STACK_SLOT_AMR(r1)
+	ld	r6,STACK_SLOT_UAMOR(r1)
+	mtspr	SPRN_AMR, r5
 	mtspr	SPRN_UAMOR, r6
 
 	/* Switch DSCR back to host value */
@@ -1897,11 +1907,9 @@
 	ld	r5, STACK_SLOT_TID(r1)
 	ld	r6, STACK_SLOT_PSSCR(r1)
 	ld	r7, STACK_SLOT_PID(r1)
-	ld	r8, STACK_SLOT_IAMR(r1)
 	mtspr	SPRN_TIDR, r5
 	mtspr	SPRN_PSSCR, r6
 	mtspr	SPRN_PID, r7
-	mtspr	SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
 #ifdef CONFIG_PPC_RADIX_MMU
@@ -2895,29 +2903,39 @@
 kvm_cede_exit:
 	ld	r9, HSTATE_KVM_VCPU(r13)
 #ifdef CONFIG_KVM_XICS
-	/* Abort if we still have a pending escalation */
-	lbz	r5, VCPU_XIVE_ESC_ON(r9)
-	cmpwi	r5, 0
-	beq	1f
-	li	r0, 0
-	stb	r0, VCPU_CEDED(r9)
-1:	/* Enable XIVE escalation */
-	li	r5, XIVE_ESB_SET_PQ_00
-	mfmsr	r0
-	andi.	r0, r0, MSR_DR		/* in real mode? */
-	beq	1f
+	/* are we using XIVE with single escalation? */
 	ld	r10, VCPU_XIVE_ESC_VADDR(r9)
 	cmpdi	r10, 0
 	beq	3f
-	ldx	r0, r10, r5
+	li	r6, XIVE_ESB_SET_PQ_00
+	/*
+	 * If we still have a pending escalation, abort the cede,
+	 * and we must set PQ to 10 rather than 00 so that we don't
+	 * potentially end up with two entries for the escalation
+	 * interrupt in the XIVE interrupt queue.  In that case
+	 * we also don't want to set xive_esc_on to 1 here in
+	 * case we race with xive_esc_irq().
+	 */
+	lbz	r5, VCPU_XIVE_ESC_ON(r9)
+	cmpwi	r5, 0
+	beq	4f
+	li	r0, 0
+	stb	r0, VCPU_CEDED(r9)
+	li	r6, XIVE_ESB_SET_PQ_10
+	b	5f
+4:	li	r0, 1
+	stb	r0, VCPU_XIVE_ESC_ON(r9)
+	/* make sure store to xive_esc_on is seen before xive_esc_irq runs */
+	sync
+5:	/* Enable XIVE escalation */
+	mfmsr	r0
+	andi.	r0, r0, MSR_DR		/* in real mode? */
+	beq	1f
+	ldx	r0, r10, r6
 	b	2f
 1:	ld	r10, VCPU_XIVE_ESC_RADDR(r9)
-	cmpdi	r10, 0
-	beq	3f
-	ldcix	r0, r10, r5
+	ldcix	r0, r10, r6
 2:	sync
-	li	r0, 1
-	stb	r0, VCPU_XIVE_ESC_ON(r9)
 #endif /* CONFIG_KVM_XICS */
 3:	b	guest_exit_cont
 
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 0082850..31cd0f32 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -130,8 +130,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
 			return RESUME_GUEST;
 		}
 		/* Set CR0 to indicate previous transactional state */
-		vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
 		/* L=1 => tresume, L=0 => tsuspend */
 		if (instr & (1 << 21)) {
 			if (MSR_TM_SUSPENDED(msr))
@@ -174,8 +174,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
 		copy_from_checkpoint(vcpu);
 
 		/* Set CR0 to indicate previous transactional state */
-		vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
 		return RESUME_GUEST;
 
@@ -204,8 +204,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
 		copy_to_checkpoint(vcpu);
 
 		/* Set CR0 to indicate previous transactional state */
-		vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
 		vcpu->arch.shregs.msr = msr | MSR_TS_S;
 		return RESUME_GUEST;
 	}
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index b2c7c6f..3cf5863 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
 		if (instr & (1 << 21))
 			vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
 		/* Set CR0 to 0b0010 */
-		vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
+		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+			0x20000000;
 		return 1;
 	}
 
@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
 	vcpu->arch.shregs.msr &= ~MSR_TS_MASK;	/* go to N state */
 	vcpu->arch.regs.nip = vcpu->arch.tfhar;
 	copy_from_checkpoint(vcpu);
-	vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
 }
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 614ebb4..de97022 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
 	svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
 	svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
 	svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
-	svcpu->cr  = vcpu->arch.cr;
+	svcpu->cr  = vcpu->arch.regs.ccr;
 	svcpu->xer = vcpu->arch.regs.xer;
 	svcpu->ctr = vcpu->arch.regs.ctr;
 	svcpu->lr  = vcpu->arch.regs.link;
@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
 	vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
 	vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
 	vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
-	vcpu->arch.cr  = svcpu->cr;
+	vcpu->arch.regs.ccr  = svcpu->cr;
 	vcpu->arch.regs.xer = svcpu->xer;
 	vcpu->arch.regs.ctr = svcpu->ctr;
 	vcpu->arch.regs.link  = svcpu->lr;
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index aae34f2..031f07f0 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
 	/* Mask the VP IPI */
 	xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
 
-	/* Disable the VP */
-	xive_native_disable_vp(xc->vp_id);
-
-	/* Free the queues & associated interrupts */
+	/* Free escalations */
 	for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
-		struct xive_q *q = &xc->queues[i];
-
-		/* Free the escalation irq */
 		if (xc->esc_virq[i]) {
 			free_irq(xc->esc_virq[i], vcpu);
 			irq_dispose_mapping(xc->esc_virq[i]);
 			kfree(xc->esc_virq_names[i]);
 		}
-		/* Free the queue */
+	}
+
+	/* Disable the VP */
+	xive_native_disable_vp(xc->vp_id);
+
+	/* Free the queues */
+	for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+		struct xive_q *q = &xc->queues[i];
+
 		xive_native_disable_queue(xc->vp_id, q, i);
 		if (q->qpage) {
 			free_pages((unsigned long)q->qpage,
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 612b7f6..4e5081e 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -186,7 +186,7 @@
 	 */
 	PPC_LL	r4, PACACURRENT(r13)
 	PPC_LL	r4, (THREAD + THREAD_KVM_VCPU)(r4)
-	stw	r10, VCPU_CR(r4)
+	PPC_STL	r10, VCPU_CR(r4)
 	PPC_STL r11, VCPU_GPR(R4)(r4)
 	PPC_STL	r5, VCPU_GPR(R5)(r4)
 	PPC_STL	r6, VCPU_GPR(R6)(r4)
@@ -296,7 +296,7 @@
 	PPC_STL	r4, VCPU_GPR(R4)(r11)
 	PPC_LL	r4, THREAD_NORMSAVE(0)(r10)
 	PPC_STL	r5, VCPU_GPR(R5)(r11)
-	stw	r13, VCPU_CR(r11)
+	PPC_STL	r13, VCPU_CR(r11)
 	mfspr	r5, \srr0
 	PPC_STL	r3, VCPU_GPR(R10)(r11)
 	PPC_LL	r3, THREAD_NORMSAVE(2)(r10)
@@ -323,7 +323,7 @@
 	PPC_STL	r4, VCPU_GPR(R4)(r11)
 	PPC_LL	r4, GPR9(r8)
 	PPC_STL	r5, VCPU_GPR(R5)(r11)
-	stw	r9, VCPU_CR(r11)
+	PPC_STL	r9, VCPU_CR(r11)
 	mfspr	r5, \srr0
 	PPC_STL	r3, VCPU_GPR(R8)(r11)
 	PPC_LL	r3, GPR10(r8)
@@ -647,7 +647,7 @@
 	PPC_LL	r3, VCPU_LR(r4)
 	PPC_LL	r5, VCPU_XER(r4)
 	PPC_LL	r6, VCPU_CTR(r4)
-	lwz	r7, VCPU_CR(r4)
+	PPC_LL	r7, VCPU_CR(r4)
 	PPC_LL	r8, VCPU_PC(r4)
 	PPC_LD(r9, VCPU_SHARED_MSR, r11)
 	PPC_LL	r0, VCPU_GPR(R0)(r4)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 75dce1e..f91b130 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 
 	emulated = EMULATE_FAIL;
 	vcpu->arch.regs.msr = vcpu->arch.shared->msr;
-	vcpu->arch.regs.ccr = vcpu->arch.cr;
 	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
 		int type = op.type & INSTR_TYPE_MASK;
 		int size = GETSIZE(op.type);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index aaa28fd..0c13561 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -203,7 +203,7 @@ static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 
 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 {
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
 		asm volatile("ptesync": : :"memory");
 		___tlbie(vpn, psize, apsize, ssize);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f23a89d..b1007e9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -37,6 +37,7 @@
 #include <linux/context_tracking.h>
 #include <linux/libfdt.h>
 #include <linux/pkeys.h>
+#include <linux/cpu.h>
 
 #include <asm/debugfs.h>
 #include <asm/processor.h>
@@ -1859,11 +1860,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 	 *
 	 * For guests on platforms before POWER9, we clamp the it limit to 1G
 	 * to avoid some funky things such as RTAS bugs etc...
+	 *
+	 * On POWER9 we limit to 1TB in case the host erroneously told us that
+	 * the RMA was >1TB. Effective address bits 0:23 are treated as zero
+	 * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
+	 * for virtual real mode addressing and so it doesn't make sense to
+	 * have an area larger than 1TB as it can't be addressed.
 	 */
 	if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
 		ppc64_rma_size = first_memblock_size;
 		if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
 			ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
+		else
+			ppc64_rma_size = min_t(u64, ppc64_rma_size,
+					       1UL << SID_SHIFT_1T);
 
 		/* Finally limit subsequent allocations */
 		memblock_set_current_limit(ppc64_rma_size);
@@ -1882,10 +1892,16 @@ static int hpt_order_get(void *data, u64 *val)
 
 static int hpt_order_set(void *data, u64 val)
 {
+	int ret;
+
 	if (!mmu_hash_ops.resize_hpt)
 		return -ENODEV;
 
-	return mmu_hash_ops.resize_hpt(val);
+	cpus_read_lock();
+	ret = mmu_hash_ops.resize_hpt(val);
+	cpus_read_unlock();
+
+	return ret;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index c879979..3ea4c1f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -521,14 +521,6 @@ void __init radix__early_init_devtree(void)
 	mmu_psize_defs[MMU_PAGE_64K].shift = 16;
 	mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
 found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
-		/*
-		 * map vmemmap using 2M if available
-		 */
-		mmu_vmemmap_psize = MMU_PAGE_2M;
-	}
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 	return;
 }
 
@@ -567,7 +559,13 @@ void __init radix__early_init_mmu(void)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 	/* vmemmap mapping */
-	mmu_vmemmap_psize = mmu_virtual_psize;
+	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+		/*
+		 * map vmemmap using 2M if available
+		 */
+		mmu_vmemmap_psize = MMU_PAGE_2M;
+	} else
+		mmu_vmemmap_psize = mmu_virtual_psize;
 #endif
 	/*
 	 * initialize page table size
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index b271b28..25a8dd9 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -414,3 +414,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
 
 	return pkey_access_permitted(vma_pkey(vma), write, execute);
 }
+
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return;
+
+	/* Duplicate the oldmm pkey state in mm: */
+	mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
+	mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
+}
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index fef3e1e..0cddae4 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -220,7 +220,7 @@ static inline void fixup_tlbie(void)
 	unsigned long pid = 0;
 	unsigned long va = ((1UL << 52) - 1);
 
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		asm volatile("ptesync": : :"memory");
 		__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
 	}
@@ -230,7 +230,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
 {
 	unsigned long va = ((1UL << 52) - 1);
 
-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 		asm volatile("ptesync": : :"memory");
 		__tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
 	}
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 828f665..649fb26 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
 				    struct imc_pmu *pmu_ptr)
 {
 	static u64 loc, *imc_mode_addr, *imc_cmd_addr;
-	int chip = 0, nid;
 	char mode[16], cmd[16];
 	u32 cb_offset;
+	struct imc_mem_info *ptr = pmu_ptr->mem_info;
 
 	imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
 
@@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
 	if (of_property_read_u32(node, "cb_offset", &cb_offset))
 		cb_offset = IMC_CNTL_BLK_OFFSET;
 
-	for_each_node(nid) {
-		loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
+	while (ptr->vbase != NULL) {
+		loc = (u64)(ptr->vbase) + cb_offset;
 		imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
-		sprintf(mode, "imc_mode_%d", nid);
+		sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
 		if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
 					    imc_mode_addr))
 			goto err;
 
 		imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
-		sprintf(cmd, "imc_cmd_%d", nid);
+		sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
 		if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
 					    imc_cmd_addr))
 			goto err;
-		chip++;
+		ptr++;
 	}
 	return;
 
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index f4875fe..74215eb 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -303,7 +303,7 @@
 OPAL_CALL(opal_xive_donate_page,		OPAL_XIVE_DONATE_PAGE);
 OPAL_CALL(opal_xive_alloc_vp_block,		OPAL_XIVE_ALLOCATE_VP_BLOCK);
 OPAL_CALL(opal_xive_free_vp_block,		OPAL_XIVE_FREE_VP_BLOCK);
-OPAL_CALL(opal_xive_allocate_irq,		OPAL_XIVE_ALLOCATE_IRQ);
+OPAL_CALL(opal_xive_allocate_irq_raw,		OPAL_XIVE_ALLOCATE_IRQ);
 OPAL_CALL(opal_xive_free_irq,			OPAL_XIVE_FREE_IRQ);
 OPAL_CALL(opal_xive_get_vp_info,		OPAL_XIVE_GET_VP_INFO);
 OPAL_CALL(opal_xive_set_vp_info,		OPAL_XIVE_SET_VP_INFO);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 38fe408..edf9032 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
 				       bin_attr->size);
 }
 
-static BIN_ATTR_RO(symbol_map, 0);
+static struct bin_attribute symbol_map_attr = {
+	.attr = {.name = "symbol_map", .mode = 0400},
+	.read = symbol_map_read
+};
 
 static void opal_export_symmap(void)
 {
@@ -697,10 +700,10 @@ static void opal_export_symmap(void)
 		return;
 
 	/* Setup attributes */
-	bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
-	bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+	symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
+	symbol_map_attr.size = be64_to_cpu(syms[1]);
 
-	rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+	rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
 	if (rc)
 		pr_warn("Error %d creating OPAL symbols file\n", rc);
 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index f5adb6b..15a5671 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
 	struct page *tce_mem = NULL;
 	__be64 *addr;
 
-	tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
+	tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
+			shift - PAGE_SHIFT);
 	if (!tce_mem) {
 		pr_err("Failed to allocate a TCE memory, level shift=%d\n",
 				shift);
@@ -48,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
 	return addr;
 }
 
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+		unsigned long size, unsigned int levels);
+
 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 {
 	__be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
@@ -57,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 
 	while (level) {
 		int n = (idx & mask) >> (level * shift);
-		unsigned long tce;
+		unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
 
-		if (tmp[n] == 0) {
+		if (!tce) {
 			__be64 *tmp2;
 
 			if (!alloc)
@@ -70,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 			if (!tmp2)
 				return NULL;
 
-			tmp[n] = cpu_to_be64(__pa(tmp2) |
-					TCE_PCI_READ | TCE_PCI_WRITE);
+			tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
+			oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
+					cpu_to_be64(tce)));
+			if (oldtce) {
+				pnv_pci_ioda2_table_do_free_pages(tmp2,
+					ilog2(tbl->it_level_size) + 3, 1);
+				tce = oldtce;
+			}
 		}
-		tce = be64_to_cpu(tmp[n]);
 
 		tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
 		idx &= ~mask;
@@ -161,6 +170,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
 
 		if (ptce)
 			*ptce = cpu_to_be64(0);
+		else
+			/* Skip the rest of the level */
+			i |= tbl->it_level_size - 1;
 	}
 }
 
@@ -260,7 +272,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
 			PAGE_SHIFT);
 	const unsigned long tce_table_size = 1UL << table_shift;
-	unsigned int tmplevels = levels;
 
 	if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
 		return -EINVAL;
@@ -268,9 +279,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	if (!is_power_of_2(window_size))
 		return -EINVAL;
 
-	if (alloc_userspace_copy && (window_size > (1ULL << 32)))
-		tmplevels = 1;
-
 	/* Adjust direct table size from window_size and levels */
 	entries_shift = (entries_shift + levels - 1) / levels;
 	level_shift = entries_shift + 3;
@@ -281,7 +289,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 
 	/* Allocate TCE table */
 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-			tmplevels, tce_table_size, &offset, &total_allocated);
+			1, tce_table_size, &offset, &total_allocated);
 
 	/* addr==NULL means that the first level allocation failed */
 	if (!addr)
@@ -292,18 +300,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	 * we did not allocate as much as we wanted,
 	 * release partially allocated table.
 	 */
-	if (tmplevels == levels && offset < tce_table_size)
+	if (levels == 1 && offset < tce_table_size)
 		goto free_tces_exit;
 
 	/* Allocate userspace view of the TCE table */
 	if (alloc_userspace_copy) {
 		offset = 0;
 		uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-				tmplevels, tce_table_size, &offset,
+				1, tce_table_size, &offset,
 				&total_allocated_uas);
 		if (!uas)
 			goto free_tces_exit;
-		if (tmplevels == levels && (offset < tce_table_size ||
+		if (levels == 1 && (offset < tce_table_size ||
 				total_allocated_uas != total_allocated))
 			goto free_uas_exit;
 	}
@@ -318,7 +326,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 
 	pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
 			window_size, tce_table_size, bus_offset, tbl->it_base,
-			tbl->it_userspace, tmplevels, levels);
+			tbl->it_userspace, 1, levels);
 
 	return 0;
 
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8b37b28..e302aa0 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
 extern int pnv_npu2_init(struct pnv_phb *phb);
 
 /* pci-ioda-tce.c */
-#define POWERNV_IOMMU_DEFAULT_LEVELS	1
+#define POWERNV_IOMMU_DEFAULT_LEVELS	2
 #define POWERNV_IOMMU_MAX_LEVELS	5
 
 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 9e52b68..ea602f7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -647,7 +647,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
 	return 0;
 }
 
-/* Must be called in user context */
+/*
+ * Must be called in process context. The caller must hold the
+ * cpus_lock.
+ */
 static int pseries_lpar_resize_hpt(unsigned long shift)
 {
 	struct hpt_resize_state state = {
@@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
 
 	t1 = ktime_get();
 
-	rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
+	rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
+				     &state, NULL);
 
 	t2 = ktime_get();
 
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 7b60fcf..e4ea713 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -12,6 +12,7 @@
 #include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/kobject.h>
+#include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/stat.h>
 #include <linux/completion.h>
@@ -209,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
 
 				prop_data += vd;
 			}
+
+			cond_resched();
 		}
+
+		cond_resched();
 	} while (rtas_rc == 1);
 
 	of_node_put(dn);
@@ -318,8 +323,12 @@ int pseries_devicetree_update(s32 scope)
 					add_dt_node(phandle, drc_index);
 					break;
 				}
+
+				cond_resched();
 			}
 		}
+
+		cond_resched();
 	} while (rc == 1);
 
 	kfree(rtas_buf);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ba1791f..67f4915 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -325,6 +325,9 @@ static void pseries_lpar_idle(void)
 	 * low power mode by ceding processor to hypervisor
 	 */
 
+	if (!prep_irq_for_idle())
+		return;
+
 	/* Indicate to hypervisor that we are idle. */
 	get_lppaca()->idle = 1;
 
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 5b20a67..6d5b2802 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -235,6 +235,17 @@ static bool xive_native_match(struct device_node *node)
 	return of_device_is_compatible(node, "ibm,opal-xive-vc");
 }
 
+static s64 opal_xive_allocate_irq(u32 chip_id)
+{
+	s64 irq = opal_xive_allocate_irq_raw(chip_id);
+
+	/*
+	 * Old versions of skiboot can incorrectly return 0xffffffff to
+	 * indicate no space, fix it up here.
+	 */
+	return irq == 0xffffffff ? OPAL_RESOURCE : irq;
+}
+
 #ifdef CONFIG_SMP
 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 {
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 74cfc1b..bb5db7b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2497,13 +2497,16 @@ static void dump_pacas(void)
 static void dump_one_xive(int cpu)
 {
 	unsigned int hwid = get_hard_smp_processor_id(cpu);
+	bool hv = cpu_has_feature(CPU_FTR_HVMODE);
 
-	opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
-	opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
-	opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
-	opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
-	opal_xive_dump(XIVE_DUMP_VP, hwid);
-	opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
+	if (hv) {
+		opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+		opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+		opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+		opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+		opal_xive_dump(XIVE_DUMP_VP, hwid);
+		opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
+	}
 
 	if (setjmp(bus_error_jmp) != 0) {
 		catch_memory_errors = 0;
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05b..d911a8c 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
 
 static inline void __fstate_clean(struct pt_regs *regs)
 {
-	regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+	regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
 }
 
 static inline void fstate_save(struct task_struct *task,
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fa2c08e..a03821b 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -171,9 +171,13 @@
 	move a1, s4 /* scause */
 	tail do_IRQ
 1:
-	/* Exceptions run with interrupts enabled */
+	/* Exceptions run with interrupts enabled or disabled
+	   depending on the state of sstatus.SR_SPIE */
+	andi t0, s1, SR_SPIE
+	beqz t0, 1f
 	csrs sstatus, SR_SIE
 
+1:
 	/* Handle syscalls */
 	li t0, EXC_SYSCALL
 	beq s4, t0, handle_syscall
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index c433f6d..a840b7d 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 {
 	unsigned long return_hooker = (unsigned long)&return_to_handler;
 	unsigned long old;
-	int err;
 
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 8ff7cb3..2bc1891 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
+	if (!nbytes)
+		return -EINVAL;
+
 	if (unlikely(!xts_ctx->fc))
 		return xts_fallback_encrypt(desc, dst, src, nbytes);
 
@@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
+	if (!nbytes)
+		return -EINVAL;
+
 	if (unlikely(!xts_ctx->fc))
 		return xts_fallback_decrypt(desc, dst, src, nbytes);
 
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c681329..e4d17d9 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
 static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct inode *root_inode;
-	struct dentry *root_dentry;
+	struct dentry *root_dentry, *update_file;
 	int rc = 0;
 	struct hypfs_sb_info *sbi;
 
@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
 		rc = hypfs_diag_create_files(root_dentry);
 	if (rc)
 		return rc;
-	sbi->update_file = hypfs_create_update_file(root_dentry);
-	if (IS_ERR(sbi->update_file))
-		return PTR_ERR(sbi->update_file);
+	update_file = hypfs_create_update_file(root_dentry);
+	if (IS_ERR(update_file))
+		return PTR_ERR(update_file);
+	sbi->update_file = update_file;
 	hypfs_update_update(sb);
 	pr_info("Hypervisor filesystem mounted\n");
 	return 0;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6e758bb..99ef537 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p)
 
 	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
 		return 0;
+
+	if (!try_get_task_stack(p))
+		return 0;
+
 	low = task_stack_page(p);
 	high = (struct stack_frame *) task_pt_regs(p);
 	sf = (struct stack_frame *) p->thread.ksp;
-	if (sf <= low || sf > high)
-		return 0;
+	if (sf <= low || sf > high) {
+		return_address = 0;
+		goto out;
+	}
 	for (count = 0; count < 16; count++) {
 		sf = (struct stack_frame *) sf->back_chain;
-		if (sf <= low || sf > high)
-			return 0;
+		if (sf <= low || sf > high) {
+			return_address = 0;
+			goto out;
+		}
 		return_address = sf->gprs[8];
 		if (!in_sched_functions(return_address))
-			return return_address;
+			goto out;
 	}
-	return 0;
+out:
+	put_task_stack(p);
+	return return_address;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e8184a1..7b96888 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
 	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
 	for_each_online_cpu(cpu) {
 		dev = get_cpu_device(cpu);
-		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+		if (dev)
+			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
 	return rc;
 }
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b43f8d3..18ede6e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,10 +31,9 @@
 SECTIONS
 {
 	. = 0x100000;
-	_stext = .;		/* Start of text section */
 	.text : {
-		/* Text and read-only data */
-		_text = .;
+		_stext = .;		/* Start of text section */
+		_text = .;		/* Text and read-only data */
 		HEAD_TEXT
 		TEXT_TEXT
 		SCHED_TEXT
@@ -46,11 +45,10 @@
 		*(.text.*_indirect_*)
 		*(.fixup)
 		*(.gnu.warning)
+		. = ALIGN(PAGE_SIZE);
+		_etext = .;		/* End of text section */
 	} :text = 0x0700
 
-	. = ALIGN(PAGE_SIZE);
-	_etext = .;		/* End of text section */
-
 	NOTES :text :note
 
 	.dummy : { *(.dummy) } :data
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcb55b0..05ea466 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1879,6 +1879,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
 	case KVM_S390_MCHK:
 		irq->u.mchk.mcic = s390int->parm64;
 		break;
+	case KVM_S390_INT_PFAULT_INIT:
+		irq->u.ext.ext_params = s390int->parm;
+		irq->u.ext.ext_params2 = s390int->parm64;
+		break;
+	case KVM_S390_RESTART:
+	case KVM_S390_INT_CLOCK_COMP:
+	case KVM_S390_INT_CPU_TIMER:
+		break;
+	default:
+		return -EINVAL;
 	}
 	return 0;
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fc7de27..fac1d4e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -928,6 +928,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
 	/* mark all the pages in active slots as dirty */
 	for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
 		ms = slots->memslots + slotnr;
+		if (!ms->dirty_bitmap)
+			return -EINVAL;
 		/*
 		 * The second half of the bitmap is only used on x86,
 		 * and would be wasted otherwise, so we put it to good
@@ -3888,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
 
-	if (mop->flags & ~supported_flags)
+	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
 		return -EINVAL;
 
 	if (mop->size > MEM_OP_MAX_SIZE)
@@ -3956,7 +3958,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
 	}
 	case KVM_S390_INTERRUPT: {
 		struct kvm_s390_interrupt s390int;
-		struct kvm_s390_irq s390irq;
+		struct kvm_s390_irq s390irq = {};
 
 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
 			return -EFAULT;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d7052cb..2617e42 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -841,7 +841,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 		break;
 	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 		/* lcgr %dst,%dst */
-		EMIT4(0xb9130000, dst_reg, dst_reg);
+		EMIT4(0xb9030000, dst_reg, dst_reg);
 		break;
 	/*
 	 * BPF_FROM_BE/LE
@@ -1015,8 +1015,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 		/* llgf %w1,map.max_entries(%b2) */
 		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
 			      offsetof(struct bpf_array, map.max_entries));
-		/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
-		EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+		/* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+		EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
 				  REG_W1, 0, 0xa);
 
 		/*
@@ -1042,8 +1042,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 		 *         goto out;
 		 */
 
-		/* sllg %r1,%b3,3: %r1 = index * 8 */
-		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+		/* llgfr %r1,%b3: %r1 = (u32) index */
+		EMIT4(0xb9160000, REG_1, BPF_REG_3);
+		/* sllg %r1,%r1,3: %r1 *= 8 */
+		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
 		/* lg %r1,prog(%b2,%r1) */
 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
 			      REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index d9ff3b4..2569ffc 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
 	switch (sh_type) {
 	case SH_BREAKPOINT_READ:
 		*gen_type = HW_BREAKPOINT_R;
+		break;
 	case SH_BREAKPOINT_WRITE:
 		*gen_type = HW_BREAKPOINT_W;
 		break;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 45bb65d..5e400c7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@
 
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
 export REALMODE_CFLAGS
 
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index f8debf7..76e1edf 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
 
 	/* Find the first usable memory region under bios_start. */
 	for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+		unsigned long new = bios_start;
+
 		entry = &boot_params->e820_table[i];
 
 		/* Skip all entries above bios_start. */
@@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
 
 		/* Adjust bios_start to the end of the entry if needed. */
 		if (bios_start > entry->addr + entry->size)
-			bios_start = entry->addr + entry->size;
+			new = entry->addr + entry->size;
 
 		/* Keep bios_start page-aligned. */
-		bios_start = round_down(bios_start, PAGE_SIZE);
+		new = round_down(new, PAGE_SIZE);
 
 		/* Skip the entry if it's too small. */
-		if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+		if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
 			continue;
 
+		/* Protect against underflow. */
+		if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
+			break;
+
+		bios_start = new;
 		break;
 	}
 
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index c3ab808..e2b132a 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -30,6 +30,7 @@
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -203,6 +204,7 @@
 CONFIG_NET_CLS_ACT=y
 CONFIG_VSOCKETS=y
 CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_BPF_JIT=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index d50bb4d..80c6d84 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -672,10 +672,17 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
 
 	throttle = perf_event_overflow(event, &data, &regs);
 out:
-	if (throttle)
+	if (throttle) {
 		perf_ibs_stop(event, 0);
-	else
-		perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+	} else {
+		period >>= 4;
+
+		if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+		    (*config & IBS_OP_CNT_CTL))
+			period |= *config & IBS_OP_CUR_CNT_RAND;
+
+		perf_ibs_enable_event(perf_ibs, hwc, period);
+	}
 
 	perf_event_update_userpage(event);
 
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index db5a2ba..2dd8b0d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3319,6 +3319,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
 	return left;
 }
 
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+	return max(left, 32ULL);
+}
+
 PMU_FORMAT_ATTR(event,	"config:0-7"	);
 PMU_FORMAT_ATTR(umask,	"config:8-15"	);
 PMU_FORMAT_ATTR(edge,	"config:18"	);
@@ -4115,6 +4120,7 @@ __init int intel_pmu_init(void)
 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
+		x86_pmu.limit_period = nhm_limit_period;
 
 		x86_pmu.cpu_events = nhm_events_attrs;
 
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index ef5f29f..2f34d52 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
 		 * Lower 12 bits encode the number of additional
 		 * pages to flush (in addition to the 'cur' page).
 		 */
-		if (diff >= HV_TLB_FLUSH_UNIT)
+		if (diff >= HV_TLB_FLUSH_UNIT) {
 			gva_list[gva_n] |= ~PAGE_MASK;
-		else if (diff)
+			cur += HV_TLB_FLUSH_UNIT;
+		}  else if (diff) {
 			gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+			cur = end;
+		}
 
-		cur += HV_TLB_FLUSH_UNIT;
 		gva_n++;
 
 	} while (cur < end);
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index a07ffd2..8fa49cf 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -18,6 +18,20 @@
  * Note: efi_info is commonly left uninitialized, but that field has a
  * private magic, so it is better to leave it unchanged.
  */
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member)				\
+	{								\
+		.start = offsetof(struct boot_params, struct_member),	\
+		.len   = sizeof_mbr(struct boot_params, struct_member),	\
+	}
+
+struct boot_params_to_save {
+	unsigned int start;
+	unsigned int len;
+};
+
 static void sanitize_boot_params(struct boot_params *boot_params)
 {
 	/* 
@@ -36,19 +50,41 @@ static void sanitize_boot_params(struct boot_params *boot_params)
 	 */
 	if (boot_params->sentinel) {
 		/* fields in boot_params are left uninitialized, clear them */
-		memset(&boot_params->ext_ramdisk_image, 0,
-		       (char *)&boot_params->efi_info -
-			(char *)&boot_params->ext_ramdisk_image);
-		memset(&boot_params->kbd_status, 0,
-		       (char *)&boot_params->hdr -
-		       (char *)&boot_params->kbd_status);
-		memset(&boot_params->_pad7[0], 0,
-		       (char *)&boot_params->edd_mbr_sig_buffer[0] -
-			(char *)&boot_params->_pad7[0]);
-		memset(&boot_params->_pad8[0], 0,
-		       (char *)&boot_params->eddbuf[0] -
-			(char *)&boot_params->_pad8[0]);
-		memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+		static struct boot_params scratch;
+		char *bp_base = (char *)boot_params;
+		char *save_base = (char *)&scratch;
+		int i;
+
+		const struct boot_params_to_save to_save[] = {
+			BOOT_PARAM_PRESERVE(screen_info),
+			BOOT_PARAM_PRESERVE(apm_bios_info),
+			BOOT_PARAM_PRESERVE(tboot_addr),
+			BOOT_PARAM_PRESERVE(ist_info),
+			BOOT_PARAM_PRESERVE(hd0_info),
+			BOOT_PARAM_PRESERVE(hd1_info),
+			BOOT_PARAM_PRESERVE(sys_desc_table),
+			BOOT_PARAM_PRESERVE(olpc_ofw_header),
+			BOOT_PARAM_PRESERVE(efi_info),
+			BOOT_PARAM_PRESERVE(alt_mem_k),
+			BOOT_PARAM_PRESERVE(scratch),
+			BOOT_PARAM_PRESERVE(e820_entries),
+			BOOT_PARAM_PRESERVE(eddbuf_entries),
+			BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+			BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+			BOOT_PARAM_PRESERVE(secure_boot),
+			BOOT_PARAM_PRESERVE(hdr),
+			BOOT_PARAM_PRESERVE(e820_table),
+			BOOT_PARAM_PRESERVE(eddbuf),
+		};
+
+		memset(&scratch, 0, sizeof(scratch));
+
+		for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+			memcpy(save_base + to_save[i].start,
+			       bp_base + to_save[i].start, to_save[i].len);
+		}
+
+		memcpy(boot_params, save_base, sizeof(*boot_params));
 	}
 }
 
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index aebedba..5d0b72f 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -58,6 +58,9 @@
 #define INTEL_FAM6_ICELAKE_MOBILE	0x7E
 #define INTEL_FAM6_ICELAKE_NNPI		0x9D
 
+#define INTEL_FAM6_TIGERLAKE_L		0x8C
+#define INTEL_FAM6_TIGERLAKE		0x8D
+
 /* "Small Core" Processors (Atom) */
 
 #define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3245b95..0d3f5cf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -117,7 +117,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -784,6 +784,9 @@ struct kvm_hv {
 	u64 hv_reenlightenment_control;
 	u64 hv_tsc_emulation_control;
 	u64 hv_tsc_emulation_status;
+
+	/* How many vCPUs have VP index != vCPU index */
+	atomic_t num_mismatched_vp_indexes;
 };
 
 enum kvm_irqchip_mode {
@@ -793,9 +796,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-	unsigned int n_used_mmu_pages;
-	unsigned int n_requested_mmu_pages;
-	unsigned int n_max_mmu_pages;
+	unsigned long n_used_mmu_pages;
+	unsigned long n_requested_mmu_pages;
+	unsigned long n_max_mmu_pages;
 	unsigned int indirect_shadow_pages;
 	unsigned long mmu_valid_gen;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
@@ -1198,8 +1201,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 				   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f85f43d..a1d22e4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -334,6 +334,7 @@
 #define MSR_AMD64_PATCH_LEVEL		0x0000008b
 #define MSR_AMD64_TSC_RATIO		0xc0000104
 #define MSR_AMD64_NB_CFG		0xc001001f
+#define MSR_AMD64_CPUID_FN_1		0xc0011004
 #define MSR_AMD64_PATCH_LOADER		0xc0010020
 #define MSR_AMD64_OSVW_ID_LENGTH	0xc0010140
 #define MSR_AMD64_OSVW_STATUS		0xc0010141
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index eb0f80c..3aa82de 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -21,7 +21,7 @@
 #define MWAIT_ECX_INTERRUPT_BREAK	0x1
 #define MWAITX_ECX_TIMER_ENABLE		BIT(1)
 #define MWAITX_MAX_LOOPS		((u32)-1)
-#define MWAITX_DISABLE_CSTATES		0xf
+#define MWAITX_DISABLE_CSTATES		0xf0
 
 static inline void __monitor(const void *eax, unsigned long ecx,
 			     unsigned long edx)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 599c273..28cb2b3 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -202,7 +202,7 @@
 	"    	lfence;\n"					\
 	"       jmp    902b;\n"					\
 	"       .align 16\n"					\
-	"903:	addl   $4, %%esp;\n"				\
+	"903:	lea    4(%%esp), %%esp;\n"			\
 	"       pushl  %[thunk_target];\n"			\
 	"       ret;\n"						\
 	"       .align 16\n"					\
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 78241b7..f6c4915 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -209,16 +209,20 @@ struct x86_pmu_capability {
 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
 #define IBSCTL_LVT_OFFSET_MASK		0x0F
 
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
 #define IBS_FETCH_RAND_EN	(1ULL<<57)
 #define IBS_FETCH_VAL		(1ULL<<49)
 #define IBS_FETCH_ENABLE	(1ULL<<48)
 #define IBS_FETCH_CNT		0xFFFF0000ULL
 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
 
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
 #define IBS_OP_CNT_CTL		(1ULL<<19)
 #define IBS_OP_VAL		(1ULL<<18)
 #define IBS_OP_ENABLE		(1ULL<<17)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4111edb..9718303 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -451,8 +451,10 @@ do {									\
 ({									\
 	int __gu_err;							\
 	__inttype(*(ptr)) __gu_val;					\
+	__typeof__(ptr) __gu_ptr = (ptr);				\
+	__typeof__(size) __gu_size = (size);				\
 	__uaccess_begin_nospec();					\
-	__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);	\
+	__get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT);	\
 	__uaccess_end();						\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
 	__builtin_expect(__gu_err, 0);					\
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 272a128..dfdd1ca 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
 
 /*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
  */
 static void __init lapic_cal_handler(struct clock_event_device *dev)
 {
@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 static int __init calibrate_APIC_clock(void)
 {
 	struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
-	void (*real_handler)(struct clock_event_device *dev);
+	u64 tsc_perj = 0, tsc_start = 0;
+	unsigned long jif_start;
 	unsigned long deltaj;
 	long delta, deltatsc;
 	int pm_referenced = 0;
@@ -830,29 +831,65 @@ static int __init calibrate_APIC_clock(void)
 	apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
 		    "calibrating APIC timer ...\n");
 
+	/*
+	 * There are platforms w/o global clockevent devices. Instead of
+	 * making the calibration conditional on that, use a polling based
+	 * approach everywhere.
+	 */
 	local_irq_disable();
 
-	/* Replace the global interrupt handler */
-	real_handler = global_clock_event->event_handler;
-	global_clock_event->event_handler = lapic_cal_handler;
-
 	/*
 	 * Setup the APIC counter to maximum. There is no way the lapic
 	 * can underflow in the 100ms detection time frame
 	 */
 	__setup_APIC_LVTT(0xffffffff, 0, 0);
 
-	/* Let the interrupts run */
+	/*
+	 * Methods to terminate the calibration loop:
+	 *  1) Global clockevent if available (jiffies)
+	 *  2) TSC if available and frequency is known
+	 */
+	jif_start = READ_ONCE(jiffies);
+
+	if (tsc_khz) {
+		tsc_start = rdtsc();
+		tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+	}
+
+	/*
+	 * Enable interrupts so the tick can fire, if a global
+	 * clockevent device is available
+	 */
 	local_irq_enable();
 
-	while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-		cpu_relax();
+	while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+		/* Wait for a tick to elapse */
+		while (1) {
+			if (tsc_khz) {
+				u64 tsc_now = rdtsc();
+				if ((tsc_now - tsc_start) >= tsc_perj) {
+					tsc_start += tsc_perj;
+					break;
+				}
+			} else {
+				unsigned long jif_now = READ_ONCE(jiffies);
+
+				if (time_after(jif_now, jif_start)) {
+					jif_start = jif_now;
+					break;
+				}
+			}
+			cpu_relax();
+		}
+
+		/* Invoke the calibration routine */
+		local_irq_disable();
+		lapic_cal_handler(NULL);
+		local_irq_enable();
+	}
 
 	local_irq_disable();
 
-	/* Restore the real event handler */
-	global_clock_event->event_handler = real_handler;
-
 	/* Build delta t1-t2 as apic timer counts down */
 	delta = lapic_cal_t1 - lapic_cal_t2;
 	apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
 	levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 
 	/*
-	 * PM timer calibration failed or not turned on
-	 * so lets try APIC timer based calibration
+	 * PM timer calibration failed or not turned on so lets try APIC
+	 * timer based calibration, if a global clockevent device is
+	 * available.
 	 */
-	if (!pm_referenced) {
+	if (!pm_referenced && global_clock_event) {
 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 
 		/*
@@ -1412,54 +1450,72 @@ static void lapic_setup_esr(void)
 			oldvalue, value);
 }
 
+#define APIC_IR_REGS		APIC_ISR_NR
+#define APIC_IR_BITS		(APIC_IR_REGS * 32)
+#define APIC_IR_MAPSIZE		(APIC_IR_BITS / BITS_PER_LONG)
+
+union apic_ir {
+	unsigned long	map[APIC_IR_MAPSIZE];
+	u32		regs[APIC_IR_REGS];
+};
+
+static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
+{
+	int i, bit;
+
+	/* Read the IRRs */
+	for (i = 0; i < APIC_IR_REGS; i++)
+		irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
+
+	/* Read the ISRs */
+	for (i = 0; i < APIC_IR_REGS; i++)
+		isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
+
+	/*
+	 * If the ISR map is not empty. ACK the APIC and run another round
+	 * to verify whether a pending IRR has been unblocked and turned
+	 * into a ISR.
+	 */
+	if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
+		/*
+		 * There can be multiple ISR bits set when a high priority
+		 * interrupt preempted a lower priority one. Issue an ACK
+		 * per set bit.
+		 */
+		for_each_set_bit(bit, isr->map, APIC_IR_BITS)
+			ack_APIC_irq();
+		return true;
+	}
+
+	return !bitmap_empty(irr->map, APIC_IR_BITS);
+}
+
+/*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now the CPU has serviced that pending interrupt and it
+ * might not have done the ack_APIC_irq() because it thought, interrupt
+ * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
+ * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
+ * a vector might get locked. It was noticed for timer irq (vector
+ * 0x31). Issue an extra EOI to clear ISR.
+ *
+ * If there are pending IRR bits they turn into ISR bits after a higher
+ * priority ISR bit has been acked.
+ */
 static void apic_pending_intr_clear(void)
 {
-	long long max_loops = cpu_khz ? cpu_khz : 1000000;
-	unsigned long long tsc = 0, ntsc;
-	unsigned int queued;
-	unsigned long value;
-	int i, j, acked = 0;
+	union apic_ir irr, isr;
+	unsigned int i;
 
-	if (boot_cpu_has(X86_FEATURE_TSC))
-		tsc = rdtsc();
-	/*
-	 * After a crash, we no longer service the interrupts and a pending
-	 * interrupt from previous kernel might still have ISR bit set.
-	 *
-	 * Most probably by now CPU has serviced that pending interrupt and
-	 * it might not have done the ack_APIC_irq() because it thought,
-	 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
-	 * does not clear the ISR bit and cpu thinks it has already serivced
-	 * the interrupt. Hence a vector might get locked. It was noticed
-	 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
-	 */
-	do {
-		queued = 0;
-		for (i = APIC_ISR_NR - 1; i >= 0; i--)
-			queued |= apic_read(APIC_IRR + i*0x10);
-
-		for (i = APIC_ISR_NR - 1; i >= 0; i--) {
-			value = apic_read(APIC_ISR + i*0x10);
-			for_each_set_bit(j, &value, 32) {
-				ack_APIC_irq();
-				acked++;
-			}
-		}
-		if (acked > 256) {
-			pr_err("LAPIC pending interrupts after %d EOI\n", acked);
-			break;
-		}
-		if (queued) {
-			if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
-				ntsc = rdtsc();
-				max_loops = (long long)cpu_khz << 10;
-				max_loops -= ntsc - tsc;
-			} else {
-				max_loops--;
-			}
-		}
-	} while (queued && max_loops > 0);
-	WARN_ON(max_loops <= 0);
+	/* 512 loops are way oversized and give the APIC a chance to obey. */
+	for (i = 0; i < 512; i++) {
+		if (!apic_check_and_ack(&irr, &isr))
+			return;
+	}
+	/* Dump the IRR/ISR content if that failed */
+	pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
 }
 
 /**
@@ -1482,6 +1538,14 @@ static void setup_local_APIC(void)
 		return;
 	}
 
+	/*
+	 * If this comes from kexec/kcrash the APIC might be enabled in
+	 * SPIV. Soft disable it before doing further initialization.
+	 */
+	value = apic_read(APIC_SPIV);
+	value &= ~APIC_SPIV_APIC_ENABLED;
+	apic_write(APIC_SPIV, value);
+
 #ifdef CONFIG_X86_32
 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
 	if (lapic_is_integrated() && apic->disable_esr) {
@@ -1527,6 +1591,7 @@ static void setup_local_APIC(void)
 	value &= ~APIC_TPRI_MASK;
 	apic_write(APIC_TASKPRI, value);
 
+	/* Clear eventually stale ISR/IRR bits */
 	apic_pending_intr_clear();
 
 	/*
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index afee386..caedd8d 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
 	return early_per_cpu(x86_cpu_to_apicid, cpu);
 }
 
-static inline unsigned long calculate_ldr(int cpu)
-{
-	unsigned long val, id;
-
-	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-	id = per_cpu(x86_bios_cpu_apicid, cpu);
-	val |= SET_APIC_LOGICAL_ID(id);
-
-	return val;
-}
-
 /*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
  */
 static void bigsmp_init_apic_ldr(void)
 {
-	unsigned long val;
-	int cpu = smp_processor_id();
-
-	apic_write(APIC_DFR, APIC_DFR_FLAT);
-	val = calculate_ldr(cpu);
-	apic_write(APIC_LDR, val);
 }
 
 static void bigsmp_setup_apic_routing(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4077e30..ab22ede 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2432,7 +2432,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
 	 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
 	 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
 	 */
-	return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+	if (!ioapic_initialized)
+		return gsi_top;
+	/*
+	 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+	 * updated. So simply return @from if ioapic_dynirq_base == 0.
+	 */
+	return ioapic_dynirq_base ? : from;
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 10e1d17..c352ca2 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -400,6 +400,17 @@ static int activate_reserved(struct irq_data *irqd)
 		if (!irqd_can_reserve(irqd))
 			apicd->can_reserve = false;
 	}
+
+	/*
+	 * Check to ensure that the effective affinity mask is a subset
+	 * the user supplied affinity mask, and warn the user if it is not
+	 */
+	if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
+			    irq_data_get_affinity_mask(irqd))) {
+		pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
+			irqd->irq);
+	}
+
 	return ret;
 }
 
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 7685444..1455179 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
 	struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-	cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+	if (cmsk)
+		cpumask_clear_cpu(dead_cpu, &cmsk->mask);
 	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index da1f5e7..f86f912 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 }
 
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "force"))
+		rdrand_force = true;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+	/*
+	 * Saving of the MSR used to hide the RDRAND support during
+	 * suspend/resume is done by arch/x86/power/cpu.c, which is
+	 * dependent on CONFIG_PM_SLEEP.
+	 */
+	if (!IS_ENABLED(CONFIG_PM_SLEEP))
+		return;
+
+	/*
+	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+	 * RDRAND support using the CPUID function directly.
+	 */
+	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+		return;
+
+	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+	/*
+	 * Verify that the CPUID change has occurred in case the kernel is
+	 * running virtualized and the hypervisor doesn't support the MSR.
+	 */
+	if (cpuid_ecx(1) & BIT(30)) {
+		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+		return;
+	}
+
+	clear_cpu_cap(c, X86_FEATURE_RDRAND);
+	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+	/*
+	 * Some BIOS implementations do not restore proper RDRAND support
+	 * across suspend and resume. Check on whether to hide the RDRAND
+	 * instruction support via CPUID.
+	 */
+	clear_rdrand_cpuid_bit(c);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
 	u64 value;
@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 		}
 	}
+
+	/*
+	 * Some BIOS implementations do not restore proper RDRAND support
+	 * across suspend and resume. Check on whether to hide the RDRAND
+	 * instruction support via CPUID.
+	 */
+	clear_rdrand_cpuid_bit(c);
 }
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 	case 0x10: init_amd_gh(c); break;
 	case 0x12: init_amd_ln(c); break;
 	case 0x15: init_amd_bd(c); break;
+	case 0x16: init_amd_jg(c); break;
 	case 0x17: init_amd_zn(c); break;
 	}
 
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 50d30966..5790671 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -53,7 +53,7 @@ int ftrace_arch_code_modify_post_process(void)
 union ftrace_code_union {
 	char code[MCOUNT_INSN_SIZE];
 	struct {
-		unsigned char e8;
+		unsigned char op;
 		int offset;
 	} __attribute__((packed));
 };
@@ -63,20 +63,23 @@ static int ftrace_calc_offset(long ip, long addr)
 	return (int)(addr - ip);
 }
 
-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *
+ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
 {
 	static union ftrace_code_union calc;
 
-	calc.e8		= 0xe8;
+	calc.op		= op;
 	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 
-	/*
-	 * No locking needed, this must be called via kstop_machine
-	 * which in essence is like running on a uniprocessor machine.
-	 */
 	return calc.code;
 }
 
+static unsigned char *
+ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+	return ftrace_text_replace(0xe8, ip, addr);
+}
+
 static inline int
 within(unsigned long addr, unsigned long start, unsigned long end)
 {
@@ -686,22 +689,6 @@ int __init ftrace_dyn_arch_init(void)
 	return 0;
 }
 
-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
-{
-	static union ftrace_code_union calc;
-
-	/* Jmp not a call (ignore the .e8) */
-	calc.e8		= 0xe9;
-	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-
-	/*
-	 * ftrace external locks synchronize the access to the static variable.
-	 */
-	return calc.code;
-}
-#endif
-
 /* Currently only x86_64 supports dynamic trampolines */
 #ifdef CONFIG_X86_64
 
@@ -923,8 +910,8 @@ static void *addr_from_call(void *ptr)
 		return NULL;
 
 	/* Make sure this is a call */
-	if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
-		pr_warn("Expected e8, got %x\n", calc.e8);
+	if (WARN_ON_ONCE(calc.op != 0xe8)) {
+		pr_warn("Expected e8, got %x\n", calc.op);
 		return NULL;
 	}
 
@@ -995,6 +982,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern void ftrace_graph_call(void);
 
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+{
+	return ftrace_text_replace(0xe9, ip, addr);
+}
+
 static int ftrace_mod_jmp(unsigned long ip, void *func)
 {
 	unsigned char *new;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 250cfa8..88dc38b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
 	 * we might write invalid pmds, when the kernel is relocated
 	 * cleanup_highmap() fixes this up along with the mappings
 	 * beyond _end.
+	 *
+	 * Only the region occupied by the kernel image has so far
+	 * been checked against the table of usable memory regions
+	 * provided by the firmware, so invalidate pages outside that
+	 * region. A page table entry that maps to a reserved area of
+	 * memory would allow processor speculation into that area,
+	 * and on some hardware (particularly the UV platform) even
+	 * speculative access to some reserved areas is caught as an
+	 * error, causing the BIOS to halt the system.
 	 */
 
 	pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-	for (i = 0; i < PTRS_PER_PMD; i++) {
+
+	/* invalidate pages before the kernel image */
+	for (i = 0; i < pmd_index((unsigned long)_text); i++)
+		pmd[i] &= ~_PAGE_PRESENT;
+
+	/* fixup pages that are part of the kernel image */
+	for (; i <= pmd_index((unsigned long)_end); i++)
 		if (pmd[i] & _PAGE_PRESENT)
 			pmd[i] += load_delta;
-	}
+
+	/* invalidate pages after the kernel image */
+	for (; i < PTRS_PER_PMD; i++)
+		pmd[i] &= ~_PAGE_PRESENT;
 
 	/*
 	 * Fixup phys_base - remove the memory encryption mask to obtain
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 013fe3d..2ec202c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -117,12 +117,8 @@ static u64 kvm_sched_clock_read(void)
 
 static inline void kvm_sched_clock_init(bool stable)
 {
-	if (!stable) {
-		pv_time_ops.sched_clock = kvm_clock_read;
+	if (!stable)
 		clear_sched_clock_stable();
-		return;
-	}
-
 	kvm_sched_clock_offset = kvm_clock_read();
 	pv_time_ops.sched_clock = kvm_sched_clock_read;
 
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index aeba778..516ec75 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -652,11 +652,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
 {
 	struct thread_struct *thread = &tsk->thread;
 	unsigned long val = 0;
-	int index = n;
 
 	if (n < HBP_NUM) {
+		int index = array_index_nospec(n, HBP_NUM);
 		struct perf_event *bp = thread->ptrace_bps[index];
-		index = array_index_nospec(index, HBP_NUM);
 
 		if (bp)
 			val = bp->hw.info.address;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b4866ba..90ecc10 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
 	x86_init.hyper.guest_late_init();
 
 	e820__reserve_resources();
-	e820__register_nosave_regions(max_low_pfn);
+	e820__register_nosave_regions(max_pfn);
 
 	x86_init.resources.reserve_resources();
 
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 04adc8d6..b2b87b9 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
 	irq_exit();
 }
 
+static int register_stop_handler(void)
+{
+	return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
+				    NMI_FLAG_FIRST, "smp_stop");
+}
+
 static void native_stop_other_cpus(int wait)
 {
 	unsigned long flags;
@@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
 		apic->send_IPI_allbutself(REBOOT_VECTOR);
 
 		/*
-		 * Don't wait longer than a second if the caller
-		 * didn't ask us to wait.
+		 * Don't wait longer than a second for IPI completion. The
+		 * wait request is not checked here because that would
+		 * prevent an NMI shutdown attempt in case that not all
+		 * CPUs reach shutdown state.
 		 */
 		timeout = USEC_PER_SEC;
-		while (num_online_cpus() > 1 && (wait || timeout--))
+		while (num_online_cpus() > 1 && timeout--)
 			udelay(1);
 	}
-	
+
 	/* if the REBOOT_VECTOR didn't work, try with the NMI */
-	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  {
-		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
-					 NMI_FLAG_FIRST, "smp_stop"))
-			/* Note: we ignore failures here */
-			/* Hope the REBOOT_IRQ is good enough */
-			goto finish;
-
-		/* sync above data before sending IRQ */
-		wmb();
-
-		pr_emerg("Shutting down cpus with NMI\n");
-
-		apic->send_IPI_allbutself(NMI_VECTOR);
-
+	if (num_online_cpus() > 1) {
 		/*
-		 * Don't wait longer than a 10 ms if the caller
-		 * didn't ask us to wait.
+		 * If NMI IPI is enabled, try to register the stop handler
+		 * and send the IPI. In any case try to wait for the other
+		 * CPUs to stop.
+		 */
+		if (!smp_no_nmi_ipi && !register_stop_handler()) {
+			/* Sync above data before sending IRQ */
+			wmb();
+
+			pr_emerg("Shutting down cpus with NMI\n");
+
+			apic->send_IPI_allbutself(NMI_VECTOR);
+		}
+		/*
+		 * Don't wait longer than 10 ms if the caller didn't
+		 * reqeust it. If wait is true, the machine hangs here if
+		 * one or more CPUs do not reach shutdown state.
 		 */
 		timeout = USEC_PER_MSEC * 10;
 		while (num_online_cpus() > 1 && (wait || timeout--))
 			udelay(1);
 	}
 
-finish:
 	local_irq_save(flags);
 	disable_local_APIC();
 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index deb576b..9119859 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -521,9 +521,12 @@ struct uprobe_xol_ops {
 	void	(*abort)(struct arch_uprobe *, struct pt_regs *);
 };
 
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
 {
-	return in_ia32_syscall() ? 4 : 8;
+	/*
+	 * Check registers for mode as in_xxx_syscall() does not apply here.
+	 */
+	return user_64bit_mode(regs) ? 8 : 4;
 }
 
 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -534,9 +537,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
 {
-	unsigned long new_sp = regs->sp - sizeof_long();
+	unsigned long new_sp = regs->sp - sizeof_long(regs);
 
-	if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
+	if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
 		return -EFAULT;
 
 	regs->sp = new_sp;
@@ -569,7 +572,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
 		long correction = utask->vaddr - utask->xol_vaddr;
 		regs->ip += correction;
 	} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
-		regs->sp += sizeof_long(); /* Pop incorrect return address */
+		regs->sp += sizeof_long(regs); /* Pop incorrect return address */
 		if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
 			return -ERESTART;
 	}
@@ -688,7 +691,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
 	 * "call" insn was executed out-of-line. Just restore ->sp and restart.
 	 * We could also restore ->ip and try to call branch_emulate_op() again.
 	 */
-	regs->sp += sizeof_long();
+	regs->sp += sizeof_long(regs);
 	return -ERESTART;
 }
 
@@ -1068,7 +1071,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 unsigned long
 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
 {
-	int rasize = sizeof_long(), nleft;
+	int rasize = sizeof_long(regs), nleft;
 	unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
 
 	if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4a688ef..e699f4d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
 	u32 eax, ebx, ecx, edx;
 
 	eax = 0x80000001;
 	ecx = 0;
 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
 	return edx & bit(X86_FEATURE_LM);
+#else
+	return false;
+#endif
 }
 
 #define GET_SMSTATE(type, smbase, offset)				  \
@@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 	return X86EMUL_CONTINUE;
 }
 
+#ifdef CONFIG_X86_64
 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 {
 	struct desc_struct desc;
@@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
 	return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
 				    u64 cr0, u64 cr3, u64 cr4)
@@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
+#ifdef CONFIG_X86_64
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct desc_struct desc;
@@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 	return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
@@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
 		return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
 	if (emulator_has_longmode(ctxt))
 		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
 	else
+#endif
 		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
 
 	if (ret != X86EMUL_CONTINUE) {
@@ -5358,6 +5368,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 done:
+	if (rc == X86EMUL_PROPAGATE_FAULT)
+		ctxt->have_exception = true;
 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 229d996..5842c5f 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
 	struct kvm_vcpu *vcpu = NULL;
 	int i;
 
-	if (vpidx < KVM_MAX_VCPUS)
-		vcpu = kvm_get_vcpu(kvm, vpidx);
+	if (vpidx >= KVM_MAX_VCPUS)
+		return NULL;
+
+	vcpu = kvm_get_vcpu(kvm, vpidx);
 	if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
 		return vcpu;
 	kvm_for_each_vcpu(i, vcpu, kvm)
@@ -689,6 +691,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
 		stimer_cleanup(&hv_vcpu->stimer[i]);
 }
 
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
+{
+	if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+		return false;
+	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
+}
+EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
+
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+			    struct hv_vp_assist_page *assist_page)
+{
+	if (!kvm_hv_assist_page_enabled(vcpu))
+		return false;
+	return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+				      assist_page, sizeof(*assist_page));
+}
+EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
+
 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
 {
 	struct hv_message *msg = &stimer->msg;
@@ -1040,21 +1060,41 @@ static u64 current_task_runtime_100ns(void)
 
 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 {
-	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
 	switch (msr) {
-	case HV_X64_MSR_VP_INDEX:
-		if (!host)
+	case HV_X64_MSR_VP_INDEX: {
+		struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+		int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+		u32 new_vp_index = (u32)data;
+
+		if (!host || new_vp_index >= KVM_MAX_VCPUS)
 			return 1;
-		hv->vp_index = (u32)data;
+
+		if (new_vp_index == hv_vcpu->vp_index)
+			return 0;
+
+		/*
+		 * The VP index is initialized to vcpu_index by
+		 * kvm_hv_vcpu_postcreate so they initially match.  Now the
+		 * VP index is changing, adjust num_mismatched_vp_indexes if
+		 * it now matches or no longer matches vcpu_idx.
+		 */
+		if (hv_vcpu->vp_index == vcpu_idx)
+			atomic_inc(&hv->num_mismatched_vp_indexes);
+		else if (new_vp_index == vcpu_idx)
+			atomic_dec(&hv->num_mismatched_vp_indexes);
+
+		hv_vcpu->vp_index = new_vp_index;
 		break;
+	}
 	case HV_X64_MSR_VP_ASSIST_PAGE: {
 		u64 gfn;
 		unsigned long addr;
 
 		if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
-			hv->hv_vapic = data;
-			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+			hv_vcpu->hv_vapic = data;
+			if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
 				return 1;
 			break;
 		}
@@ -1064,10 +1104,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 			return 1;
 		if (__clear_user((void __user *)addr, PAGE_SIZE))
 			return 1;
-		hv->hv_vapic = data;
+		hv_vcpu->hv_vapic = data;
 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
 		if (kvm_lapic_enable_pv_eoi(vcpu,
-					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
+					    sizeof(struct hv_vp_assist_page)))
 			return 1;
 		break;
 	}
@@ -1080,7 +1121,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 	case HV_X64_MSR_VP_RUNTIME:
 		if (!host)
 			return 1;
-		hv->runtime_offset = data - current_task_runtime_100ns();
+		hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
 		break;
 	case HV_X64_MSR_SCONTROL:
 	case HV_X64_MSR_SVERSION:
@@ -1172,11 +1213,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
 			  bool host)
 {
 	u64 data = 0;
-	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
 	switch (msr) {
 	case HV_X64_MSR_VP_INDEX:
-		data = hv->vp_index;
+		data = hv_vcpu->vp_index;
 		break;
 	case HV_X64_MSR_EOI:
 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
@@ -1185,10 +1226,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
 	case HV_X64_MSR_TPR:
 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
 	case HV_X64_MSR_VP_ASSIST_PAGE:
-		data = hv->hv_vapic;
+		data = hv_vcpu->hv_vapic;
 		break;
 	case HV_X64_MSR_VP_RUNTIME:
-		data = current_task_runtime_100ns() + hv->runtime_offset;
+		data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
 		break;
 	case HV_X64_MSR_SCONTROL:
 	case HV_X64_MSR_SVERSION:
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index d6aa969..0e66c12 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
 
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+			    struct hv_vp_assist_page *assist_page);
+
 static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
 							int timer_index)
 {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index faa2648..007bc65 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
 	__kvm_migrate_apic_timer(vcpu);
 	__kvm_migrate_pit_timer(vcpu);
 }
+
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+{
+	bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
+
+	return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
+}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index d5005cc..fd210cd 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
 	return mode != KVM_IRQCHIP_NONE;
 }
 
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 031bd7f..262e493 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -209,6 +209,9 @@ static void recalculate_apic_map(struct kvm *kvm)
 		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
 			new->phys_map[xapic_id] = apic;
 
+		if (!kvm_apic_sw_enabled(apic))
+			continue;
+
 		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 
 		if (apic_x2apic_mode(apic)) {
@@ -252,6 +255,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
 			recalculate_apic_map(apic->vcpu->kvm);
 		} else
 			static_key_slow_inc(&apic_sw_disabled.key);
+
+		recalculate_apic_map(apic->vcpu->kvm);
 	}
 }
 
@@ -2628,17 +2633,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
 	return 0;
 }
 
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
 {
 	u64 addr = data & ~KVM_MSR_ENABLED;
+	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
+	unsigned long new_len;
+
 	if (!IS_ALIGNED(addr, 4))
 		return 1;
 
 	vcpu->arch.pv_eoi.msr_val = data;
 	if (!pv_eoi_enabled(vcpu))
 		return 0;
-	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
-					 addr, sizeof(u8));
+
+	if (addr == ghc->gpa && len <= ghc->len)
+		new_len = ghc->len;
+	else
+		new_len = len;
+
+	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
 }
 
 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ed0ed39..ff6ef9c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
 	return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
 }
 
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
 void kvm_lapic_init(void);
 void kvm_lapic_exit(void);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cdc0c46..8894026 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1954,7 +1954,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
 	kvm->arch.n_used_mmu_pages += nr;
 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2704,7 +2704,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
 	LIST_HEAD(invalid_list);
 
@@ -5926,10 +5926,10 @@ int kvm_mmu_module_init(void)
 /*
  * Caculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 {
-	unsigned int nr_mmu_pages;
-	unsigned int  nr_pages = 0;
+	unsigned long nr_mmu_pages;
+	unsigned long nr_pages = 0;
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
 	int i;
@@ -5942,8 +5942,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 	}
 
 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-	nr_mmu_pages = max(nr_mmu_pages,
-			   (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
 	return nr_mmu_pages;
 }
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fab69c..6589228 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -69,7 +69,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 				u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
 		return kvm->arch.n_max_mmu_pages -
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e9ea2d4..9f72cc42 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
 	return false;
 }
 
-static bool valid_pat_type(unsigned t)
-{
-	return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
-}
-
 static bool valid_mtrr_type(unsigned t)
 {
 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 		return false;
 
 	if (msr == MSR_IA32_CR_PAT) {
-		for (i = 0; i < 8; i++)
-			if (!valid_pat_type((data >> (i * 8)) & 0xff))
-				return false;
-		return true;
+		return kvm_pat_valid(data);
 	} else if (msr == MSR_MTRRdefType) {
 		if (data & ~0xcff)
 			return false;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0f33f00..ac2cc2e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5622,6 +5622,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
+	kvm_load_guest_xcr0(vcpu);
 
 	/*
 	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5769,6 +5770,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 		kvm_before_interrupt(&svm->vcpu);
 
+	kvm_put_guest_xcr0(vcpu);
 	stgi();
 
 	/* Any pending NMI will happen here */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2e310ea..6f7b3ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4135,7 +4135,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
 				       &msr_info->data);
 	case MSR_IA32_XSS:
-		if (!vmx_xsaves_supported())
+		if (!vmx_xsaves_supported() ||
+		    (!msr_info->host_initiated &&
+		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
 			return 1;
 		msr_info->data = vcpu->arch.ia32_xss;
 		break;
@@ -4265,9 +4268,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 					      MSR_TYPE_W);
 		break;
 	case MSR_IA32_CR_PAT:
+		if (!kvm_pat_valid(data))
+			return 1;
+
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
-			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-				return 1;
 			vmcs_write64(GUEST_IA32_PAT, data);
 			vcpu->arch.pat = data;
 			break;
@@ -4301,7 +4305,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		return vmx_set_vmx_msr(vcpu, msr_index, data);
 	case MSR_IA32_XSS:
-		if (!vmx_xsaves_supported())
+		if (!vmx_xsaves_supported() ||
+		    (!msr_info->host_initiated &&
+		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
 			return 1;
 		/*
 		 * The only supported bit as of Skylake is bit 8, but
@@ -8750,6 +8757,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	gva_t gva = 0;
 	struct vmcs12 *vmcs12;
+	struct x86_exception e;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -8791,8 +8799,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 				vmx_instruction_info, true, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		kvm_write_guest_virt_system(vcpu, gva, &field_value,
-					    (is_long_mode(vcpu) ? 8 : 4), NULL);
+		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+						(is_long_mode(vcpu) ? 8 : 4),
+						&e))
+			kvm_inject_page_fault(vcpu, &e);
 	}
 
 	nested_vmx_succeed(vcpu);
@@ -10437,28 +10447,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
-	u32 exit_intr_info = 0;
-	u16 basic_exit_reason = (u16)vmx->exit_reason;
-
-	if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
-	      || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
+	if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
 		return;
 
-	if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
-		exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-	vmx->exit_intr_info = exit_intr_info;
+	vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
 	/* if exit due to PF check for async PF */
-	if (is_page_fault(exit_intr_info))
+	if (is_page_fault(vmx->exit_intr_info))
 		vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
 
 	/* Handle machine checks before interrupts are enabled */
-	if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
-	    is_machine_check(exit_intr_info))
+	if (is_machine_check(vmx->exit_intr_info))
 		kvm_machine_check();
 
 	/* We need to handle NMIs before interrupts are enabled */
-	if (is_nmi(exit_intr_info)) {
+	if (is_nmi(vmx->exit_intr_info)) {
 		kvm_before_interrupt(&vmx->vcpu);
 		asm("int $2");
 		kvm_after_interrupt(&vmx->vcpu);
@@ -10756,6 +10759,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
 		vmx_set_interrupt_shadow(vcpu, 0);
 
+	kvm_load_guest_xcr0(vcpu);
+
 	if (static_cpu_has(X86_FEATURE_PKU) &&
 	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
 	    vcpu->arch.pkru != vmx->host_pkru)
@@ -10808,7 +10813,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %%" _ASM_AX", %%cr2 \n\t"
 		"3: \n\t"
 		/* Check if vmlaunch of vmresume is needed */
-		"cmpl $0, %c[launched](%0) \n\t"
+		"cmpb $0, %c[launched](%0) \n\t"
 		/* Load guest registers.  Don't clobber flags. */
 		"mov %c[rax](%0), %%" _ASM_AX " \n\t"
 		"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
@@ -10971,10 +10976,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 			__write_pkru(vmx->host_pkru);
 	}
 
+	kvm_put_guest_xcr0(vcpu);
+
 	vmx->nested.nested_run_pending = 0;
 	vmx->idt_vectoring_info = 0;
 
 	vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
+	if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
+		kvm_machine_check();
+
 	if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
 		return;
 
@@ -12564,7 +12574,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
 		/* VM-entry exception error code */
 		if (has_error_code &&
-		    vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+		    vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
 			return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
 		/* VM-entry interruption-info field: reserved bits */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e10a7a4..6ae8a01 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -581,8 +581,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
 				       data, offset, len, access);
 }
 
+static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
+{
+	return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
+	       rsvd_bits(1, 2);
+}
+
 /*
- * Load the pae pdptrs.  Return true is they are all valid.
+ * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
  */
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 {
@@ -601,8 +607,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 	}
 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
 		if ((pdpte[i] & PT_PRESENT_MASK) &&
-		    (pdpte[i] &
-		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
+		    (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
 			ret = 0;
 			goto out;
 		}
@@ -713,7 +718,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
 			!vcpu->guest_xcr0_loaded) {
@@ -723,8 +728,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 		vcpu->guest_xcr0_loaded = 1;
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
 	if (vcpu->guest_xcr0_loaded) {
 		if (vcpu->arch.xcr0 != host_xcr0)
@@ -732,6 +738,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 		vcpu->guest_xcr0_loaded = 0;
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -784,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
+static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+	if (cr4 & CR4_RESERVED_BITS)
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
+		return -EINVAL;
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+		return -EINVAL;
+
+	return 0;
+}
+
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
 				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
 
-	if (cr4 & CR4_RESERVED_BITS)
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-		return 1;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+	if (kvm_valid_cr4(vcpu, cr4))
 		return 1;
 
 	if (is_long_mode(vcpu)) {
@@ -2494,7 +2509,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 		break;
 	case MSR_KVM_PV_EOI_EN:
-		if (kvm_lapic_enable_pv_eoi(vcpu, data))
+		if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
 			return 1;
 		break;
 
@@ -4116,7 +4131,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-					  u32 kvm_nr_mmu_pages)
+					 unsigned long kvm_nr_mmu_pages)
 {
 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
 		return -EINVAL;
@@ -4130,7 +4145,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
 	return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
 	return kvm->arch.n_max_mmu_pages;
 }
@@ -5014,6 +5029,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
 	/* kvm_write_guest_virt_system can pull in tons of pages. */
 	vcpu->arch.l1tf_flush_l1d = true;
 
+	/*
+	 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+	 * is returned, but our callers are not ready for that and they blindly
+	 * call kvm_inject_page_fault.  Ensure that they at least do not leak
+	 * uninitialized kernel stack memory into cr2 and error code.
+	 */
+	memset(exception, 0, sizeof(*exception));
 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 					   PFERR_WRITE_MASK, exception);
 }
@@ -6235,8 +6257,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
 						emulation_type))
 				return EMULATE_DONE;
-			if (ctxt->have_exception && inject_emulated_exception(vcpu))
+			if (ctxt->have_exception) {
+				/*
+				 * #UD should result in just EMULATION_FAILED, and trap-like
+				 * exception should not be encountered during decode.
+				 */
+				WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
+					     exception_type(ctxt->exception.vector) == EXCPT_TRAP);
+				inject_emulated_exception(vcpu);
 				return EMULATE_DONE;
+			}
 			if (emulation_type & EMULTYPE_SKIP)
 				return EMULATE_FAIL;
 			return handle_emulation_failure(vcpu, emulation_type);
@@ -6308,12 +6338,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
-		kvm_rip_write(vcpu, ctxt->eip);
-		if (r == EMULATE_DONE && ctxt->tf)
-			kvm_vcpu_do_singlestep(vcpu, &r);
 		if (!ctxt->have_exception ||
-		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+		    exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+			kvm_rip_write(vcpu, ctxt->eip);
+			if (r == EMULATE_DONE && ctxt->tf)
+				kvm_vcpu_do_singlestep(vcpu, &r);
 			__kvm_set_rflags(vcpu, ctxt->eflags);
+		}
 
 		/*
 		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -7224,9 +7255,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
 	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
 	struct desc_ptr dt;
 	struct kvm_segment seg;
 	unsigned long val;
@@ -7276,10 +7307,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
 	for (i = 0; i < 6; i++)
 		enter_smm_save_seg_64(vcpu, buf, i);
-#else
-	WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7290,9 +7319,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
 	memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		enter_smm_save_state_64(vcpu, buf);
 	else
+#endif
 		enter_smm_save_state_32(vcpu, buf);
 
 	/*
@@ -7350,8 +7381,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
 	kvm_update_cpuid(vcpu);
 	kvm_mmu_reset_context(vcpu);
@@ -7648,8 +7681,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		goto cancel_injection;
 	}
 
-	kvm_load_guest_xcr0(vcpu);
-
 	if (req_immediate_exit) {
 		kvm_make_request(KVM_REQ_EVENT, vcpu);
 		kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7702,8 +7733,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 
-	kvm_put_guest_xcr0(vcpu);
-
 	kvm_before_interrupt(vcpu);
 	kvm_x86_ops->handle_external_intr(vcpu);
 	kvm_after_interrupt(vcpu);
@@ -8216,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-			(sregs->cr4 & X86_CR4_OSXSAVE))
-		return  -EINVAL;
-
 	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
 		/*
 		 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8238,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 			return -EINVAL;
 	}
 
-	return 0;
+	return kvm_valid_cr4(vcpu, sregs->cr4);
 }
 
 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 1826ed9..3a91ea7 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -345,4 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
 	__this_cpu_write(current_vcpu, NULL);
 }
 
+
+static inline bool kvm_pat_valid(u64 data)
+{
+	if (data & 0xF8F8F8F8F8F8F8F8ull)
+		return false;
+	/* 0, 1, 4, 5, 6, 7 are valid values.  */
+	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
+}
+
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index 2dd1fe13..19f7079 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,5 +1,6 @@
 #include <linux/types.h>
 #include <linux/export.h>
+#include <asm/cpu.h>
 
 unsigned int x86_family(unsigned int sig)
 {
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f5b7f1b..614c2c6b 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
 		__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
 		/*
-		 * AMD, like Intel, supports the EAX hint and EAX=0xf
-		 * means, do not enter any deep C-state and we use it
+		 * AMD, like Intel's MWAIT version, supports the EAX hint and
+		 * EAX=0xf0 means, do not enter any deep C-state and we use it
 		 * here in delay() to minimize wakeup latency.
 		 */
 		__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4df3e5c..622d596 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -338,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
 
 		pud = pud_offset(p4d, addr);
 		if (pud_none(*pud)) {
-			addr += PUD_SIZE;
+			WARN_ON_ONCE(addr & ~PUD_MASK);
+			addr = round_up(addr + 1, PUD_SIZE);
 			continue;
 		}
 
 		pmd = pmd_offset(pud, addr);
 		if (pmd_none(*pmd)) {
-			addr += PMD_SIZE;
+			WARN_ON_ONCE(addr & ~PMD_MASK);
+			addr = round_up(addr + 1, PMD_SIZE);
 			continue;
 		}
 
@@ -643,6 +645,8 @@ void __init pti_init(void)
  */
 void pti_finalize(void)
 {
+	if (!boot_cpu_has(X86_FEATURE_PTI))
+		return;
 	/*
 	 * We need to clone everything (again) that maps parts of the
 	 * kernel image.
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 513ce09..3aa3149 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/perf_event.h>
 #include <linux/tboot.h>
+#include <linux/dmi.h>
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
@@ -24,7 +25,7 @@
 #include <asm/debugreg.h>
 #include <asm/cpu.h>
 #include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_32
 __visible unsigned long saved_context_ebx;
@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
 
 core_initcall(bsp_pm_check_init);
 
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
 {
-	int i = 0;
+	struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
 	struct saved_msr *msr_array;
+	int total_num;
+	int i, j;
 
-	if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
-		pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
-		return -EINVAL;
-	}
+	total_num = saved_msrs->num + num;
 
 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
 	if (!msr_array) {
@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < total_num; i++) {
-		msr_array[i].info.msr_no	= msr_id[i];
+	if (saved_msrs->array) {
+		/*
+		 * Multiple callbacks can invoke this function, so copy any
+		 * MSR save requests from previous invocations.
+		 */
+		memcpy(msr_array, saved_msrs->array,
+		       sizeof(struct saved_msr) * saved_msrs->num);
+
+		kfree(saved_msrs->array);
+	}
+
+	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+		msr_array[i].info.msr_no	= msr_id[j];
 		msr_array[i].valid		= false;
 		msr_array[i].info.reg.q		= 0;
 	}
-	saved_context.saved_msrs.num	= total_num;
-	saved_context.saved_msrs.array	= msr_array;
+	saved_msrs->num   = total_num;
+	saved_msrs->array = msr_array;
 
 	return 0;
 }
 
 /*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
  * Sometimes MSRs are modified by the BIOSen after suspended to
  * RAM, this might cause unexpected behavior after wakeup.
  * Thus we save/restore these specified MSRs across suspend/resume
@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
 
 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
-	return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+	return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
 }
 
 static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
 	{}
 };
 
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+	u32 cpuid_msr_id[] = {
+		MSR_AMD64_CPUID_FN_1,
+	};
+
+	pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+		c->family);
+
+	return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+	{
+		.vendor = X86_VENDOR_AMD,
+		.family = 0x15,
+		.model = X86_MODEL_ANY,
+		.feature = X86_FEATURE_ANY,
+		.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+	},
+	{
+		.vendor = X86_VENDOR_AMD,
+		.family = 0x16,
+		.model = X86_MODEL_ANY,
+		.feature = X86_FEATURE_ANY,
+		.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+	},
+	{}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+	const struct x86_cpu_id *m;
+	int ret = 0;
+
+	m = x86_match_cpu(msr_save_cpu_table);
+	if (m) {
+		pm_cpu_match_t fn;
+
+		fn = (pm_cpu_match_t)m->driver_data;
+		ret = fn(m);
+	}
+
+	return ret;
+}
+
 static int pm_check_save_msr(void)
 {
 	dmi_check_system(msr_save_dmi_table);
+	pm_cpu_check(msr_save_cpu_table);
+
 	return 0;
 }
 
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 8901a1f..b81b517 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -18,37 +18,41 @@
 KASAN_SANITIZE	:= n
 KCOV_INSTRUMENT := n
 
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
 # sure how to relocate those.
 ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_sha256.o		+= $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_purgatory.o	+= $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_string.o		+= $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_kexec-purgatory.o	+= $(CC_FLAGS_FTRACE)
+PURGATORY_CFLAGS_REMOVE		+= $(CC_FLAGS_FTRACE)
 endif
 
 ifdef CONFIG_STACKPROTECTOR
-CFLAGS_REMOVE_sha256.o		+= -fstack-protector
-CFLAGS_REMOVE_purgatory.o	+= -fstack-protector
-CFLAGS_REMOVE_string.o		+= -fstack-protector
-CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector
+PURGATORY_CFLAGS_REMOVE		+= -fstack-protector
 endif
 
 ifdef CONFIG_STACKPROTECTOR_STRONG
-CFLAGS_REMOVE_sha256.o		+= -fstack-protector-strong
-CFLAGS_REMOVE_purgatory.o	+= -fstack-protector-strong
-CFLAGS_REMOVE_string.o		+= -fstack-protector-strong
-CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector-strong
+PURGATORY_CFLAGS_REMOVE		+= -fstack-protector-strong
 endif
 
 ifdef CONFIG_RETPOLINE
-CFLAGS_REMOVE_sha256.o		+= $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_purgatory.o	+= $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_string.o		+= $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_kexec-purgatory.o	+= $(RETPOLINE_CFLAGS)
+PURGATORY_CFLAGS_REMOVE		+= $(RETPOLINE_CFLAGS)
 endif
 
+CFLAGS_REMOVE_purgatory.o	+= $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o		+= $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o		+= $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o			+= $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o		+= $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o			+= $(PURGATORY_CFLAGS)
+
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 		$(call if_changed,ld)
 
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 1804b27..66bcdee 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
 	efi.get_variable             = xen_efi_get_variable;
 	efi.get_next_variable        = xen_efi_get_next_variable;
 	efi.set_variable             = xen_efi_set_variable;
+	efi.set_variable_nonblocking = xen_efi_set_variable;
 	efi.query_variable_info      = xen_efi_query_variable_info;
+	efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
 	efi.update_capsule           = xen_efi_update_capsule;
 	efi.query_capsule_caps       = xen_efi_query_capsule_caps;
 	efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index a285fbd..15580e4 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -515,6 +515,7 @@ void cpu_reset(void)
 				      "add	%2, %2, %7\n\t"
 				      "addi	%0, %0, -1\n\t"
 				      "bnez	%0, 1b\n\t"
+				      "isync\n\t"
 				      /* Jump to identity mapping */
 				      "jx	%3\n"
 				      "2:\n\t"
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 04f19de..4092555 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
 // FIXME EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
 extern long common_exception_return;
 EXPORT_SYMBOL(common_exception_return);
 
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index becd793..64f4dcf 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
 	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
 				    struct request, rb_node))) {
 		struct bfq_queue *bfqq = bfq_init_rq(req);
-		struct bfq_data *bfqd = bfqq->bfqd;
+		struct bfq_data *bfqd;
 		struct request *prev, *next_rq;
 
+		if (!bfqq)
+			return;
+
+		bfqd = bfqq->bfqd;
+
 		/* Reposition request in its sort_list */
 		elv_rb_del(&bfqq->sort_list, req);
 		elv_rb_add(&bfqq->sort_list, req);
@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
 	struct bfq_queue *bfqq = bfq_init_rq(rq),
 		*next_bfqq = bfq_init_rq(next);
 
+	if (!bfqq)
+		return;
+
 	/*
 	 * If next and rq belong to the same bfq_queue and next is older
 	 * than rq, then reposition rq in the fifo (by substituting next
@@ -3938,7 +3946,6 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 static void bfq_update_dispatch_stats(struct request_queue *q,
 				      struct request *rq,
-				      struct bfq_queue *in_serv_queue,
 				      bool idle_timer_disabled)
 {
 	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
@@ -3960,17 +3967,15 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
 	 * bfqq_group(bfqq) exists as well.
 	 */
 	spin_lock_irq(q->queue_lock);
-	if (idle_timer_disabled)
+	if (bfqq && idle_timer_disabled)
 		/*
-		 * Since the idle timer has been disabled,
-		 * in_serv_queue contained some request when
-		 * __bfq_dispatch_request was invoked above, which
-		 * implies that rq was picked exactly from
-		 * in_serv_queue. Thus in_serv_queue == bfqq, and is
-		 * therefore guaranteed to exist because of the above
-		 * arguments.
+		 * It could be possible that current active
+		 * queue and group might got updated along with
+		 * request via. __bfq_dispatch_request.
+		 * So, always use current active request to
+		 * derive its associated bfq queue and group.
 		 */
-		bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
+		bfqg_stats_update_idle_time(bfqq_group(bfqq));
 	if (bfqq) {
 		struct bfq_group *bfqg = bfqq_group(bfqq);
 
@@ -3983,7 +3988,6 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
 #else
 static inline void bfq_update_dispatch_stats(struct request_queue *q,
 					     struct request *rq,
-					     struct bfq_queue *in_serv_queue,
 					     bool idle_timer_disabled) {}
 #endif
 
@@ -4006,7 +4010,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 
 	spin_unlock_irq(&bfqd->lock);
 
-	bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
+	bfq_update_dispatch_stats(hctx->queue, rq,
 				  idle_timer_disabled);
 
 	return rq;
@@ -4590,12 +4594,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 
 	spin_lock_irq(&bfqd->lock);
 	bfqq = bfq_init_rq(rq);
-	if (at_head || blk_rq_is_passthrough(rq)) {
+	if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
 		if (at_head)
 			list_add(&rq->queuelist, &bfqd->dispatch);
 		else
 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
-	} else { /* bfqq is assumed to be non null here */
+	} else {
 		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
 		/*
 		 * Update bfqq, because, if a queue merge has occurred
@@ -5421,6 +5425,18 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
 	return -ENOMEM;
 }
 
+static void bfq_registered_queue(struct request_queue *q)
+{
+	struct elevator_queue *e = q->elevator;
+	struct bfq_data *bfqd = e->elevator_data;
+
+	/*
+	 * Default to IOPS mode with no idling for SSDs
+	 */
+	if (blk_queue_nonrot(q))
+		bfqd->bfq_slice_idle = 0;
+}
+
 static void bfq_slab_kill(void)
 {
 	kmem_cache_destroy(bfq_pool);
@@ -5668,6 +5684,7 @@ static struct elevator_type iosched_bfq_mq = {
 		.init_hctx		= bfq_init_hctx,
 		.init_sched		= bfq_init_queue,
 		.exit_sched		= bfq_exit_queue,
+		.elevator_registered_fn = bfq_registered_queue,
 	},
 
 	.uses_mq =		true,
diff --git a/block/blk-core.c b/block/blk-core.c
index 365d17b..8f877ee 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -816,7 +816,8 @@ void blk_cleanup_queue(struct request_queue *q)
 	blk_exit_queue(q);
 
 	if (q->mq_ops)
-		blk_mq_free_queue(q);
+		blk_mq_exit_queue(q);
+
 	percpu_ref_exit(&q->q_usage_counter);
 
 	spin_lock_irq(lock);
@@ -1164,7 +1165,7 @@ int blk_init_allocated_queue(struct request_queue *q)
 {
 	WARN_ON_ONCE(q->mq_ops);
 
-	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
 	if (!q->fq)
 		return -ENOMEM;
 
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 7648794..256fa1c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -232,6 +232,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
 
 		/* release the tag's ownership to the req cloned from */
 		spin_lock_irqsave(&fq->mq_flush_lock, flags);
+
+		if (!refcount_dec_and_test(&flush_rq->ref)) {
+			fq->rq_status = error;
+			spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+			return;
+		}
+
+		if (fq->rq_status != BLK_STS_OK)
+			error = fq->rq_status;
+
 		hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
 		if (!q->elevator) {
 			blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
@@ -566,12 +576,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 EXPORT_SYMBOL(blkdev_issue_flush);
 
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
-		int node, int cmd_size)
+		int node, int cmd_size, gfp_t flags)
 {
 	struct blk_flush_queue *fq;
 	int rq_sz = sizeof(struct request);
 
-	fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+	fq = kzalloc_node(sizeof(*fq), flags, node);
 	if (!fq)
 		goto fail;
 
@@ -579,7 +589,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 		spin_lock_init(&fq->mq_flush_lock);
 
 	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
-	fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
 	if (!fq->flush_rq)
 		goto fail_rq;
 
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 9b6a150..c6529f2 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -562,15 +562,12 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
 	u64 now = ktime_to_ns(ktime_get());
 	bool issue_as_root = bio_issue_as_root_blkg(bio);
 	bool enabled = false;
+	int inflight = 0;
 
 	blkg = bio->bi_blkg;
 	if (!blkg)
 		return;
 
-	/* We didn't actually submit this bio, don't account it. */
-	if (bio->bi_status == BLK_STS_AGAIN)
-		return;
-
 	iolat = blkg_to_lat(bio->bi_blkg);
 	if (!iolat)
 		return;
@@ -587,45 +584,28 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
 		}
 		rqw = &iolat->rq_wait;
 
-		atomic_dec(&rqw->inflight);
-		if (iolat->min_lat_nsec == 0)
-			goto next;
-		iolatency_record_time(iolat, &bio->bi_issue, now,
-				      issue_as_root);
-		window_start = atomic64_read(&iolat->window_start);
-		if (now > window_start &&
-		    (now - window_start) >= iolat->cur_win_nsec) {
-			if (atomic64_cmpxchg(&iolat->window_start,
-					window_start, now) == window_start)
-				iolatency_check_latencies(iolat, now);
+		inflight = atomic_dec_return(&rqw->inflight);
+		WARN_ON_ONCE(inflight < 0);
+		/*
+		 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
+		 * submitted, so do not account for it.
+		 */
+		if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
+			iolatency_record_time(iolat, &bio->bi_issue, now,
+					      issue_as_root);
+			window_start = atomic64_read(&iolat->window_start);
+			if (now > window_start &&
+			    (now - window_start) >= iolat->cur_win_nsec) {
+				if (atomic64_cmpxchg(&iolat->window_start,
+					     window_start, now) == window_start)
+					iolatency_check_latencies(iolat, now);
+			}
 		}
-next:
 		wake_up(&rqw->wait);
 		blkg = blkg->parent;
 	}
 }
 
-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
-{
-	struct blkcg_gq *blkg;
-
-	blkg = bio->bi_blkg;
-	while (blkg && blkg->parent) {
-		struct rq_wait *rqw;
-		struct iolatency_grp *iolat;
-
-		iolat = blkg_to_lat(blkg);
-		if (!iolat)
-			goto next;
-
-		rqw = &iolat->rq_wait;
-		atomic_dec(&rqw->inflight);
-		wake_up(&rqw->wait);
-next:
-		blkg = blkg->parent;
-	}
-}
-
 static void blkcg_iolatency_exit(struct rq_qos *rqos)
 {
 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -637,7 +617,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
 
 static struct rq_qos_ops blkcg_iolatency_ops = {
 	.throttle = blkcg_iolatency_throttle,
-	.cleanup = blkcg_iolatency_cleanup,
 	.done_bio = blkcg_iolatency_done_bio,
 	.exit = blkcg_iolatency_exit,
 };
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index aafb442..0b7297a 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -10,6 +10,7 @@
 #include <linux/smp.h>
 
 #include <linux/blk-mq.h>
+#include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
 
@@ -21,6 +22,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
 {
 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
 						  kobj);
+
+	if (hctx->flags & BLK_MQ_F_BLOCKING)
+		cleanup_srcu_struct(hctx->srcu);
+	blk_free_flush_queue(hctx->fq);
+	sbitmap_free(&hctx->ctx_map);
 	free_cpumask_var(hctx->cpumask);
 	kfree(hctx->ctxs);
 	kfree(hctx);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70d839b..684acaa 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -844,7 +844,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 	 */
 	if (blk_mq_req_expired(rq, next))
 		blk_mq_rq_timed_out(rq, reserved);
-	if (refcount_dec_and_test(&rq->ref))
+
+	if (is_flush_rq(rq, hctx))
+		rq->end_io(rq, 0);
+	else if (refcount_dec_and_test(&rq->ref))
 		__blk_mq_free_request(rq);
 }
 
@@ -2157,12 +2160,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
 
-	if (hctx->flags & BLK_MQ_F_BLOCKING)
-		cleanup_srcu_struct(hctx->srcu);
-
 	blk_mq_remove_cpuhp(hctx);
-	blk_free_flush_queue(hctx->fq);
-	sbitmap_free(&hctx->ctx_map);
 }
 
 static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2203,12 +2201,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	 * runtime
 	 */
 	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
-					GFP_KERNEL, node);
+			GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
 	if (!hctx->ctxs)
 		goto unregister_cpu_notifier;
 
-	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
-			      node))
+	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
 		goto free_ctxs;
 
 	hctx->nr_ctx = 0;
@@ -2221,7 +2219,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
 		goto free_bitmap;
 
-	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+			GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
 	if (!hctx->fq)
 		goto exit_hctx;
 
@@ -2465,8 +2464,6 @@ void blk_mq_release(struct request_queue *q)
 	struct blk_mq_hw_ctx *hctx;
 	unsigned int i;
 
-	cancel_delayed_work_sync(&q->requeue_work);
-
 	/* hctx kobj stays in hctx */
 	queue_for_each_hw_ctx(q, hctx, i) {
 		if (!hctx)
@@ -2535,12 +2532,14 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
 		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
-					GFP_KERNEL, node);
+				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+				node);
 		if (!hctxs[i])
 			break;
 
-		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
-						node)) {
+		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
+					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+					node)) {
 			kfree(hctxs[i]);
 			hctxs[i] = NULL;
 			break;
@@ -2662,7 +2661,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 }
 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
 
-void blk_mq_free_queue(struct request_queue *q)
+/* tags can _not_ be used after returning from blk_mq_exit_queue */
+void blk_mq_exit_queue(struct request_queue *q)
 {
 	struct blk_mq_tag_set	*set = q->tag_set;
 
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9497b47..5ad9251 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ struct blk_mq_ctx {
 } ____cacheline_aligned_in_smp;
 
 void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_exit_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd..43bcd4e 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -148,24 +148,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
 	return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
 	/*
 	 * Hit max in previous round, stop here
 	 */
 	if (rqd->scaled_max)
-		return;
+		return false;
 
 	rqd->scale_step--;
 
 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+	return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
 	/*
 	 * Stop scaling down when we've hit the limit. This also prevents
@@ -173,7 +176,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 	 * keep up.
 	 */
 	if (rqd->max_depth == 1)
-		return;
+		return false;
 
 	if (rqd->scale_step < 0 && hard_throttle)
 		rqd->scale_step = 0;
@@ -182,6 +185,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
 	rqd->scaled_max = false;
 	rq_depth_calc_max_depth(rqd);
+	return true;
 }
 
 void rq_qos_exit(struct request_queue *q)
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02ef..98caba3 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -80,22 +80,19 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 {
-	struct rq_qos *cur, *prev = NULL;
-	for (cur = q->rq_qos; cur; cur = cur->next) {
-		if (cur == rqos) {
-			if (prev)
-				prev->next = rqos->next;
-			else
-				q->rq_qos = cur;
+	struct rq_qos **cur;
+
+	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+		if (*cur == rqos) {
+			*cur = rqos->next;
 			break;
 		}
-		prev = cur;
 	}
 }
 
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void rq_qos_cleanup(struct request_queue *, struct bio *);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3772671..bab47a1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -836,6 +836,9 @@ static void __blk_release_queue(struct work_struct *work)
 
 	blk_free_queue_stats(q->stats);
 
+	if (q->mq_ops)
+		cancel_delayed_work_sync(&q->requeue_work);
+
 	blk_exit_rl(q, &q->root_rl);
 
 	if (q->queue_tags)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0c62bf4..f1de8ba 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -307,7 +307,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-	rq_depth_scale_up(&rwb->rq_depth);
+	if (!rq_depth_scale_up(&rwb->rq_depth))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_wake_all(rwb);
@@ -316,7 +317,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+		return;
 	calc_wb_limits(rwb);
 	rwb->unknown_cnt = 0;
 	rwb_trace_step(rwb, "scale down");
diff --git a/block/blk.h b/block/blk.h
index 5e5729d..34fcead 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,6 +23,7 @@ struct blk_flush_queue {
 	unsigned int		flush_queue_delayed:1;
 	unsigned int		flush_pending_idx:1;
 	unsigned int		flush_running_idx:1;
+	blk_status_t 		rq_status;
 	unsigned long		flush_pending_since;
 	struct list_head	flush_queue[2];
 	struct list_head	flush_data_in_flight;
@@ -105,8 +106,14 @@ static inline void __blk_get_queue(struct request_queue *q)
 	kobject_get(&q->kobj);
 }
 
+static inline bool
+is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
+{
+	return hctx->fq->flush_rq == req;
+}
+
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
-		int node, int cmd_size);
+		int node, int cmd_size, gfp_t flags);
 void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
diff --git a/block/elevator.c b/block/elevator.c
index 3d3316f..3d88ab3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -856,6 +856,8 @@ int elv_register_queue(struct request_queue *q)
 		e->registered = 1;
 		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
 			e->type->ops.sq.elevator_registered_fn(q);
+		else if (e->uses_mq && e->type->ops.mq.elevator_registered_fn)
+			e->type->ops.mq.elevator_registered_fn(q);
 	}
 	return error;
 }
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index d5e21ce..69094d6 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -376,13 +376,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
  * hardware queue, but we may return a request that is for a
  * different hardware queue. This is because mq-deadline has shared
  * state for all hardware queues, in terms of sorting, FIFOs, etc.
- *
- * For a zoned block device, __dd_dispatch_request() may return NULL
- * if all the queued write requests are directed at zones that are already
- * locked due to on-going write requests. In this case, make sure to mark
- * the queue as needing a restart to ensure that the queue is run again
- * and the pending writes dispatched once the target zones for the ongoing
- * write requests are unlocked in dd_finish_request().
  */
 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 {
@@ -391,9 +384,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 
 	spin_lock(&dd->lock);
 	rq = __dd_dispatch_request(dd);
-	if (!rq && blk_queue_is_zoned(hctx->queue) &&
-	    !list_empty(&dd->fifo_list[WRITE]))
-		blk_mq_sched_mark_restart_hctx(hctx);
 	spin_unlock(&dd->lock);
 
 	return rq;
@@ -559,6 +549,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
  * spinlock so that the zone is never unlocked while deadline_fifo_request()
  * or deadline_next_request() are executing. This function is called for
  * all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
  */
 static void dd_finish_request(struct request *rq)
 {
@@ -570,6 +567,12 @@ static void dd_finish_request(struct request *rq)
 
 		spin_lock_irqsave(&dd->zone_lock, flags);
 		blk_req_zone_write_unlock(rq);
+		if (!list_empty(&dd->fifo_list[WRITE])) {
+			struct blk_mq_hw_ctx *hctx;
+
+			hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
+			blk_mq_sched_mark_restart_hctx(hctx);
+		}
 		spin_unlock_irqrestore(&dd->zone_lock, flags);
 	}
 }
diff --git a/build.config.aarch64 b/build.config.aarch64
new file mode 100644
index 0000000..523bbc0
--- /dev/null
+++ b/build.config.aarch64
@@ -0,0 +1,11 @@
+ARCH=arm64
+
+CLANG_TRIPLE=aarch64-linux-gnu-
+CROSS_COMPILE=aarch64-linux-androidkernel-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
+
+FILES="
+arch/arm64/boot/Image.gz
+vmlinux
+System.map
+"
diff --git a/build.config.common b/build.config.common
new file mode 100644
index 0000000..707de4f
--- /dev/null
+++ b/build.config.common
@@ -0,0 +1,9 @@
+BRANCH=android-4.19-q
+KERNEL_DIR=common
+
+CC=clang
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r365631c/bin
+
+EXTRA_CMDS=''
+STOP_SHIP_TRACEPRINTK=1
+LD=ld.lld
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index da780202..0cb6019 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -1,18 +1,5 @@
-ARCH=arm64
-BRANCH=android-4.19
-CC=clang
-CLANG_TRIPLE=aarch64-linux-gnu-
-CROSS_COMPILE=aarch64-linux-androidkernel-
+. ${ROOT_DIR}/common/build.config.common
+. ${ROOT_DIR}/common/build.config.aarch64
+
 DEFCONFIG=cuttlefish_defconfig
-EXTRA_CMDS=''
-KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
-LD=ld.lld
-LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
-FILES="
-arch/arm64/boot/Image.gz
-vmlinux
-System.map
-"
-STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index da47330..fed773c 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -1,18 +1,5 @@
-ARCH=x86_64
-BRANCH=android-4.19
-CC=clang
-CLANG_TRIPLE=x86_64-linux-gnu-
-CROSS_COMPILE=x86_64-linux-androidkernel-
+. ${ROOT_DIR}/common/build.config.common
+. ${ROOT_DIR}/common/build.config.x86_64
+
 DEFCONFIG=x86_64_cuttlefish_defconfig
-EXTRA_CMDS=''
-KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
-LD=ld.lld
-LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
-FILES="
-arch/x86/boot/bzImage
-vmlinux
-System.map
-"
-STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.x86_64 b/build.config.x86_64
new file mode 100644
index 0000000..df73a47
--- /dev/null
+++ b/build.config.x86_64
@@ -0,0 +1,11 @@
+ARCH=x86_64
+
+CLANG_TRIPLE=x86_64-linux-gnu-
+CROSS_COMPILE=x86_64-linux-androidkernel-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
+
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index fff74f1..61e562f 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
 	return max(start, end_page);
 }
 
-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
 	u8 *addr;
 
@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 	addr = skcipher_get_spot(addr, bsize);
 	scatterwalk_copychunks(addr, &walk->out, bsize,
 			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+	return 0;
 }
 
 int skcipher_walk_done(struct skcipher_walk *walk, int err)
 {
-	unsigned int n; /* bytes processed */
-	bool more;
+	unsigned int n = walk->nbytes;
+	unsigned int nbytes = 0;
 
-	if (unlikely(err < 0))
+	if (!n)
 		goto finish;
 
-	n = walk->nbytes - err;
-	walk->total -= n;
-	more = (walk->total != 0);
+	if (likely(err >= 0)) {
+		n -= err;
+		nbytes = walk->total - n;
+	}
 
 	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 				    SKCIPHER_WALK_SLOW |
@@ -131,7 +133,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 		memcpy(walk->dst.virt.addr, walk->page, n);
 		skcipher_unmap_dst(walk);
 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-		if (err) {
+		if (err > 0) {
 			/*
 			 * Didn't process all bytes.  Either the algorithm is
 			 * broken, or this was the last step and it turned out
@@ -139,27 +141,29 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
 			 * the algorithm requires it.
 			 */
 			err = -EINVAL;
-			goto finish;
-		}
-		skcipher_done_slow(walk, n);
-		goto already_advanced;
+			nbytes = 0;
+		} else
+			n = skcipher_done_slow(walk, n);
 	}
 
+	if (err > 0)
+		err = 0;
+
+	walk->total = nbytes;
+	walk->nbytes = 0;
+
 	scatterwalk_advance(&walk->in, n);
 	scatterwalk_advance(&walk->out, n);
-already_advanced:
-	scatterwalk_done(&walk->in, 0, more);
-	scatterwalk_done(&walk->out, 1, more);
+	scatterwalk_done(&walk->in, 0, nbytes);
+	scatterwalk_done(&walk->out, 1, nbytes);
 
-	if (more) {
+	if (nbytes) {
 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 		return skcipher_walk_next(walk);
 	}
-	err = 0;
-finish:
-	walk->nbytes = 0;
 
+finish:
 	/* Short-circuit for the common/fast path. */
 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 		goto out;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index fc44741..a448cdf 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
 	}
 
 	if (acpi_duplicate_processor_id(pr->acpi_id)) {
-		dev_err(&device->dev,
-			"Failed to get unique processor _UID (0x%x)\n",
-			pr->acpi_id);
+		if (pr->acpi_id == 0xff)
+			dev_info_once(&device->dev,
+				"Entry not well-defined, consider updating BIOS\n");
+		else
+			dev_err(&device->dev,
+				"Failed to get unique processor _UID (0x%x)\n",
+				pr->acpi_id);
 		return -ENODEV;
 	}
 
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index d73afb56..1a23e7a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -73,6 +73,12 @@ module_param(report_key_events, int, 0644);
 MODULE_PARM_DESC(report_key_events,
 	"0: none, 1: output changes, 2: brightness changes, 3: all");
 
+static int hw_changes_brightness = -1;
+module_param(hw_changes_brightness, int, 0644);
+MODULE_PARM_DESC(hw_changes_brightness,
+	"Set this to 1 on buggy hw which changes the brightness itself when "
+	"a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness");
+
 /*
  * Whether the struct acpi_video_device_attrib::device_id_scheme bit should be
  * assumed even if not actually set.
@@ -418,6 +424,14 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
 	return 0;
 }
 
+static int video_hw_changes_brightness(
+	const struct dmi_system_id *d)
+{
+	if (hw_changes_brightness == -1)
+		hw_changes_brightness = 1;
+	return 0;
+}
+
 static const struct dmi_system_id video_dmi_table[] = {
 	/*
 	 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -542,6 +556,21 @@ static const struct dmi_system_id video_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
 		},
 	},
+	/*
+	 * Some machines change the brightness themselves when a brightness
+	 * hotkey gets pressed, despite us telling them not to. In this case
+	 * acpi_video_device_notify() should only call backlight_force_update(
+	 * BACKLIGHT_UPDATE_HOTKEY) and not do anything else.
+	 */
+	{
+	 /* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */
+	 .callback = video_hw_changes_brightness,
+	 .ident = "Packard Bell EasyNote MZ35",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"),
+		},
+	},
 	{}
 };
 
@@ -1625,6 +1654,14 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
 	bus = video_device->video;
 	input = bus->input;
 
+	if (hw_changes_brightness > 0) {
+		if (video_device->backlight)
+			backlight_force_update(video_device->backlight,
+					       BACKLIGHT_UPDATE_HOTKEY);
+		acpi_notifier_call_chain(device, event, 0);
+		return;
+	}
+
 	switch (event) {
 	case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:	/* Cycle brightness */
 		brightness_switch_event(video_device, event);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d9ce4b1..41228e5 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -369,8 +369,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 	union acpi_object  *psd = NULL;
 	struct acpi_psd_package *pdomain;
 
-	status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
-			ACPI_TYPE_PACKAGE);
+	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
+					    &buffer, ACPI_TYPE_PACKAGE);
+	if (status == AE_NOT_FOUND)	/* _PSD is optional */
+		return 0;
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
@@ -907,8 +909,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
 			pcc_data[pcc_ss_id]->refcount--;
 			if (!pcc_data[pcc_ss_id]->refcount) {
 				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
-				pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
 				kfree(pcc_data[pcc_ss_id]);
+				pcc_data[pcc_ss_id] = NULL;
 			}
 		}
 	}
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index e967c11..222ea3f 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
 	if ((*ppos > max_size) ||
 	    (*ppos + count > max_size) ||
 	    (*ppos + count < count) ||
-	    (count > uncopied_bytes))
+	    (count > uncopied_bytes)) {
+		kfree(buf);
 		return -EINVAL;
+	}
 
 	if (copy_from_user(buf + (*ppos), user_buf, count)) {
 		kfree(buf);
@@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
 	}
 
+	kfree(buf);
 	return count;
 }
 
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c576a6f..94ded95 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
 		 * No IRQ known to the ACPI subsystem - maybe the BIOS /
 		 * driver reported one, then use it. Exit in any case.
 		 */
-		if (!acpi_pci_irq_valid(dev, pin))
+		if (!acpi_pci_irq_valid(dev, pin)) {
+			kfree(entry);
 			return 0;
+		}
 
 		if (acpi_isa_register_gsi(dev))
 			dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index da031b1..9dbf86a0 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -510,6 +510,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
 }
 
 /**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ *	   the table revision isn't new enough.
+ *	   1, any passed flag set
+ *	   0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+	struct acpi_table_header *table;
+	acpi_status status;
+	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+	struct acpi_pptt_processor *cpu_node = NULL;
+	int ret = -ENOENT;
+
+	status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+	if (ACPI_FAILURE(status)) {
+		pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n");
+		return ret;
+	}
+
+	if (table->revision >= rev)
+		cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+	if (cpu_node)
+		ret = (cpu_node->flags & flag) != 0;
+
+	acpi_put_table(table);
+
+	return ret;
+}
+
+/**
  * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
  * @cpu: Kernel logical cpu number
  *
@@ -574,6 +612,20 @@ int cache_setup_acpi(unsigned int cpu)
 }
 
 /**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ *         0, not a thread
+ *         -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ *         the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+	return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
+/**
  * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
  * @cpu: Kernel logical cpu number
  * @level: The topological level for which we would like a unique ID
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 021ce46..fa1c5a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -81,6 +81,12 @@ enum board_ids {
 	board_ahci_sb700,	/* for SB700 and SB800 */
 	board_ahci_vt8251,
 
+	/*
+	 * board IDs for Intel chipsets that support more than 6 ports
+	 * *and* end up needing the PCS quirk.
+	 */
+	board_ahci_pcs7,
+
 	/* aliases */
 	board_ahci_mcp_linux	= board_ahci_mcp65,
 	board_ahci_mcp67	= board_ahci_mcp65,
@@ -236,6 +242,12 @@ static const struct ata_port_info ahci_port_info[] = {
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_vt8251_ops,
 	},
+	[board_ahci_pcs7] = {
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
 };
 
 static const struct pci_device_id ahci_pci_tbl[] = {
@@ -280,26 +292,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
-	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -639,30 +651,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
 	ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
-static int ahci_pci_reset_controller(struct ata_host *host)
-{
-	struct pci_dev *pdev = to_pci_dev(host->dev);
-	int rc;
-
-	rc = ahci_reset_controller(host);
-	if (rc)
-		return rc;
-
-	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
-		struct ahci_host_priv *hpriv = host->private_data;
-		u16 tmp16;
-
-		/* configure PCS */
-		pci_read_config_word(pdev, 0x92, &tmp16);
-		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
-			tmp16 |= hpriv->port_map;
-			pci_write_config_word(pdev, 0x92, tmp16);
-		}
-	}
-
-	return 0;
-}
-
 static void ahci_pci_init_controller(struct ata_host *host)
 {
 	struct ahci_host_priv *hpriv = host->private_data;
@@ -865,7 +853,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
 	struct ata_host *host = pci_get_drvdata(pdev);
 	int rc;
 
-	rc = ahci_pci_reset_controller(host);
+	rc = ahci_reset_controller(host);
 	if (rc)
 		return rc;
 	ahci_pci_init_controller(host);
@@ -900,7 +888,7 @@ static int ahci_pci_device_resume(struct device *dev)
 		ahci_mcp89_apple_enable(pdev);
 
 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
-		rc = ahci_pci_reset_controller(host);
+		rc = ahci_reset_controller(host);
 		if (rc)
 			return rc;
 
@@ -1635,6 +1623,36 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap,
 		ap->target_lpm_policy = policy;
 }
 
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+	const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
+	u16 tmp16;
+
+	/*
+	 * Only apply the 6-port PCS quirk for known legacy platforms.
+	 */
+	if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
+		return;
+
+	/* Skip applying the quirk on Denverton and beyond */
+	if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+		return;
+
+	/*
+	 * port_map is determined from PORTS_IMPL PCI register which is
+	 * implemented as write or write-once register.  If the register
+	 * isn't programmed, ahci automatically generates it from number
+	 * of ports, which is good enough for PCS programming. It is
+	 * otherwise expected that platform firmware enables the ports
+	 * before the OS boots.
+	 */
+	pci_read_config_word(pdev, PCS_6, &tmp16);
+	if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+		tmp16 |= hpriv->port_map;
+		pci_write_config_word(pdev, PCS_6, tmp16);
+	}
+}
+
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	unsigned int board_id = ent->driver_data;
@@ -1747,6 +1765,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* save initial config */
 	ahci_pci_save_initial_config(pdev, hpriv);
 
+	/*
+	 * If platform firmware failed to enable ports, try to enable
+	 * them here.
+	 */
+	ahci_intel_pcs_quirk(pdev, hpriv);
+
 	/* prepare host */
 	if (hpriv->cap & HOST_CAP_NCQ) {
 		pi.flags |= ATA_FLAG_NCQ;
@@ -1856,7 +1880,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (rc)
 		return rc;
 
-	rc = ahci_pci_reset_controller(host);
+	rc = ahci_reset_controller(host);
 	if (rc)
 		return rc;
 
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 6a1515f..9290e78 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -261,6 +261,8 @@ enum {
 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
 
 	ICH_MAP				= 0x90, /* ICH MAP register */
+	PCS_6				= 0x92, /* 6 port PCS */
+	PCS_7				= 0x94, /* 7+ port PCS (Denverton) */
 
 	/* em constants */
 	EM_MAX_SLOTS			= 8,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d..5bece97 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
 		hpriv->phys[port] = NULL;
 		rc = 0;
 		break;
+	case -EPROBE_DEFER:
+		/* Do not complain yet */
+		break;
 
 	default:
 		dev_err(dev,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1984fc7..3a64fa4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1803,6 +1803,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
 	return 1;
 }
 
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+	struct request *rq = scmd->request;
+	u32 req_blocks;
+
+	if (!blk_rq_is_passthrough(rq))
+		return true;
+
+	req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+	if (n_blocks > req_blocks)
+		return false;
+
+	return true;
+}
+
 /**
  *	ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
  *	@qc: Storage for translated ATA taskfile
@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		scsi_10_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	case READ_6:
 	case WRITE_6:
@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		 */
 		if (!n_block)
 			n_block = 256;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	case READ_16:
 	case WRITE_16:
@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 		scsi_16_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
+		if (!ata_check_nblocks(scmd, n_block))
+			goto invalid_fld;
 		break;
 	default:
 		DPRINTK("no-byte command\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index c5ea0fc..873cc09 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 	unsigned int offset;
 	unsigned char *buf;
 
+	if (!qc->cursg) {
+		qc->curbytes = qc->nbytes;
+		return;
+	}
 	if (qc->curbytes == qc->nbytes - qc->sect_size)
 		ap->hsm_task_state = HSM_ST_LAST;
 
@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
 	if (qc->cursg_ofs == qc->cursg->length) {
 		qc->cursg = sg_next(qc->cursg);
+		if (!qc->cursg)
+			ap->hsm_task_state = HSM_ST_LAST;
 		qc->cursg_ofs = 0;
 	}
 }
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2..eefda51 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 	unsigned int ret;
 	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
-	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+	static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa5..8c37294 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@
 	  make the card work).
 
 config ATM_NICSTAR_USE_IDT77105
-	bool "Use IDT77015 PHY driver (25Mbps)"
+	bool "Use IDT77105 PHY driver (25Mbps)"
 	depends on ATM_NICSTAR
 	help
 	  Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 3b25a64..0b8e2a7d 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1618,6 +1618,8 @@ static void panel_attach(struct parport *port)
 	return;
 
 err_lcd_unreg:
+	if (scan_timer.function)
+		del_timer_sync(&scan_timer);
 	if (lcd.enabled)
 		charlcd_unregister(lcd.charlcd);
 err_unreg_device:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cd13be9..9dfab55 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -8,6 +8,7 @@
  * Copyright (c) 2006 Novell, Inc.
  */
 
+#include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/fwnode.h>
@@ -1653,12 +1654,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
  */
 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
 {
+	unsigned int ref;
+
 	/* see if we live in a "glue" directory */
 	if (!live_in_glue_dir(glue_dir, dev))
 		return;
 
 	mutex_lock(&gdp_mutex);
-	if (!kobject_has_children(glue_dir) && kref_read(&glue_dir->kref) == 1)
+	/**
+	 * There is a race condition between removing glue directory
+	 * and adding a new device under the glue directory.
+	 *
+	 * CPU1:                                         CPU2:
+	 *
+	 * device_add()
+	 *   get_device_parent()
+	 *     class_dir_create_and_add()
+	 *       kobject_add_internal()
+	 *         create_dir()    // create glue_dir
+	 *
+	 *                                               device_add()
+	 *                                                 get_device_parent()
+	 *                                                   kobject_get() // get glue_dir
+	 *
+	 * device_del()
+	 *   cleanup_glue_dir()
+	 *     kobject_del(glue_dir)
+	 *
+	 *                                               kobject_add()
+	 *                                                 kobject_add_internal()
+	 *                                                   create_dir() // in glue_dir
+	 *                                                     sysfs_create_dir_ns()
+	 *                                                       kernfs_create_dir_ns(sd)
+	 *
+	 *       sysfs_remove_dir() // glue_dir->sd=NULL
+	 *       sysfs_put()        // free glue_dir->sd
+	 *
+	 *                                                         // sd is freed
+	 *                                                         kernfs_new_node(sd)
+	 *                                                           kernfs_get(glue_dir)
+	 *                                                           kernfs_add_one()
+	 *                                                           kernfs_put()
+	 *
+	 * Before CPU1 remove last child device under glue dir, if CPU2 add
+	 * a new device under glue dir, the glue_dir kobject reference count
+	 * will be increase to 2 in kobject_get(k). And CPU2 has been called
+	 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+	 * and sysfs_put(). This result in glue_dir->sd is freed.
+	 *
+	 * Then the CPU2 will see a stale "empty" but still potentially used
+	 * glue dir around in kernfs_new_node().
+	 *
+	 * In order to avoid this happening, we also should make sure that
+	 * kernfs_node for glue_dir is released in CPU1 only when refcount
+	 * for glue_dir kobj is 1.
+	 */
+	ref = kref_read(&glue_dir->kref);
+	if (!kobject_has_children(glue_dir) && !--ref)
 		kobject_del(glue_dir);
 	kobject_put(glue_dir);
 	mutex_unlock(&gdp_mutex);
@@ -2897,6 +2949,8 @@ void device_shutdown(void)
 	wait_for_device_probe();
 	device_block_probing();
 
+	cpufreq_suspend();
+
 	spin_lock(&devices_kset->list_lock);
 	/*
 	 * Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b4a1e88..3fa026c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -635,6 +635,9 @@ store_soft_offline_page(struct device *dev,
 	pfn >>= PAGE_SHIFT;
 	if (!pfn_valid(pfn))
 		return -ENXIO;
+	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+	if (!pfn_to_online_page(pfn))
+		return -EIO;
 	ret = soft_offline_page(pfn_to_page(pfn), 0);
 	return ret == 0 ? count : ret;
 }
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0cb6c14..05d5986 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@
 
 config REGMAP_SOUNDWIRE
 	tristate
-	depends on SOUNDWIRE_BUS
+	depends on SOUNDWIRE
 
 config REGMAP_SCCB
 	tristate
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 5856f45..d0e05c01 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -340,6 +340,9 @@ static ssize_t regmap_data_read_file(struct file *file, char __user *user_buf,
 	else if (*ppos >= map->dump_address * map->debugfs_tot_len
 			+ map->dump_count * map->debugfs_tot_len)
 		return 0;
+	else if (*ppos < map->dump_address * map->debugfs_tot_len)
+		return 0;
+
 	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
 			new_count, ppos);
 }
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 982c7ac..6896e48 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -154,6 +154,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
 			else
 				ret = regmap_write(map, reg, d->mask_buf[i]);
+			/* some chips needs to clear ack reg after ack */
+			if (d->chip->clear_ack)
+				ret = regmap_write(map, reg, 0x0);
 			if (ret != 0)
 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
 					reg, ret);
@@ -364,6 +367,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
 			reg = chip->ack_base +
 				(i * map->reg_stride * data->irq_reg_stride);
 			ret = regmap_write(map, reg, data->status_buf[i]);
+			/* some chips needs to clear ack reg after ack */
+			if (chip->clear_ack)
+				ret = regmap_write(map, reg, 0x0);
 			if (ret != 0)
 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
 					reg, ret);
@@ -575,6 +581,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
 			else
 				ret = regmap_write(map, reg,
 					d->status_buf[i] & d->mask_buf[i]);
+			/* some chips needs to clear ack reg after ack */
+			if (chip->clear_ack)
+				ret = regmap_write(map, reg, 0x0);
 			if (ret != 0) {
 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
 					reg, ret);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 10b280f..7e91894 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -157,6 +157,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
 out1:
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(soc_device_register);
 
 /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
 void soc_device_unregister(struct soc_device *soc_dev)
@@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
 	device_unregister(&soc_dev->dev);
 	early_soc_dev_attr = NULL;
 }
+EXPORT_SYMBOL_GPL(soc_device_unregister);
 
 static int __init soc_bus_register(void)
 {
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 4a9a4d1..e71589e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3791,7 +3791,7 @@ static int compat_getdrvprm(int drive,
 	v.native_format = UDP->native_format;
 	mutex_unlock(&floppy_mutex);
 
-	if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+	if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
 		return -EFAULT;
 	return 0;
 }
@@ -3827,7 +3827,7 @@ static int compat_getdrvstat(int drive, bool poll,
 	v.bufblocks = UDRS->bufblocks;
 	mutex_unlock(&floppy_mutex);
 
-	if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+	if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
 		return -EFAULT;
 	return 0;
 Eintr:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cef8e00..126c2c5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1719,6 +1719,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
 	case LOOP_SET_FD:
 	case LOOP_CHANGE_FD:
 	case LOOP_SET_BLOCK_SIZE:
+	case LOOP_SET_DIRECT_IO:
 		err = lo_ioctl(bdev, mode, cmd, arg);
 		break;
 	default:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index fa60f26..bc2fa4e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -106,6 +106,7 @@ struct nbd_device {
 	struct nbd_config *config;
 	struct mutex config_lock;
 	struct gendisk *disk;
+	struct workqueue_struct *recv_workq;
 
 	struct list_head list;
 	struct task_struct *task_recv;
@@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
+#define NBD_DEF_BLKSIZE 1024
+
 static unsigned int nbds_max = 16;
 static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
 static int part_shift;
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -353,8 +355,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
 	}
 	config = nbd->config;
 
-	if (!mutex_trylock(&cmd->lock))
+	if (!mutex_trylock(&cmd->lock)) {
+		nbd_config_put(nbd);
 		return BLK_EH_RESET_TIMER;
+	}
 
 	if (config->num_connections > 1) {
 		dev_err_ratelimited(nbd_to_dev(nbd),
@@ -1023,7 +1027,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
 		/* We take the tx_mutex in an error path in the recv_work, so we
 		 * need to queue_work outside of the tx_mutex.
 		 */
-		queue_work(recv_workqueue, &args->work);
+		queue_work(nbd->recv_workq, &args->work);
 
 		atomic_inc(&config->live_connections);
 		wake_up(&config->conn_wait);
@@ -1124,6 +1128,10 @@ static void nbd_config_put(struct nbd_device *nbd)
 		kfree(nbd->config);
 		nbd->config = NULL;
 
+		if (nbd->recv_workq)
+			destroy_workqueue(nbd->recv_workq);
+		nbd->recv_workq = NULL;
+
 		nbd->tag_set.timeout = 0;
 		nbd->disk->queue->limits.discard_granularity = 0;
 		nbd->disk->queue->limits.discard_alignment = 0;
@@ -1152,6 +1160,14 @@ static int nbd_start_device(struct nbd_device *nbd)
 		return -EINVAL;
 	}
 
+	nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+					  WQ_MEM_RECLAIM | WQ_HIGHPRI |
+					  WQ_UNBOUND, 0, nbd->index);
+	if (!nbd->recv_workq) {
+		dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+		return -ENOMEM;
+	}
+
 	blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
 	nbd->task_recv = current;
 
@@ -1182,7 +1198,7 @@ static int nbd_start_device(struct nbd_device *nbd)
 		INIT_WORK(&args->work, recv_work);
 		args->nbd = nbd;
 		args->index = i;
-		queue_work(recv_workqueue, &args->work);
+		queue_work(nbd->recv_workq, &args->work);
 	}
 	nbd_size_update(nbd);
 	return error;
@@ -1202,8 +1218,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
 	mutex_unlock(&nbd->config_lock);
 	ret = wait_event_interruptible(config->recv_wq,
 					 atomic_read(&config->recv_threads) == 0);
-	if (ret)
+	if (ret) {
 		sock_shutdown(nbd);
+		flush_workqueue(nbd->recv_workq);
+	}
 	mutex_lock(&nbd->config_lock);
 	nbd_bdev_reset(bdev);
 	/* user requested, ignore socket errors */
@@ -1225,6 +1243,14 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
 		nbd_config_put(nbd);
 }
 
+static bool nbd_is_valid_blksize(unsigned long blksize)
+{
+	if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
+	    blksize > PAGE_SIZE)
+		return false;
+	return true;
+}
+
 /* Must be called with config_lock held */
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		       unsigned int cmd, unsigned long arg)
@@ -1240,8 +1266,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 	case NBD_SET_SOCK:
 		return nbd_add_socket(nbd, arg, false);
 	case NBD_SET_BLKSIZE:
-		if (!arg || !is_power_of_2(arg) || arg < 512 ||
-		    arg > PAGE_SIZE)
+		if (!arg)
+			arg = NBD_DEF_BLKSIZE;
+		if (!nbd_is_valid_blksize(arg))
 			return -EINVAL;
 		nbd_size_set(nbd, arg,
 			     div_s64(config->bytesize, arg));
@@ -1321,7 +1348,7 @@ static struct nbd_config *nbd_alloc_config(void)
 	atomic_set(&config->recv_threads, 0);
 	init_waitqueue_head(&config->recv_wq);
 	init_waitqueue_head(&config->conn_wait);
-	config->blksize = 1024;
+	config->blksize = NBD_DEF_BLKSIZE;
 	atomic_set(&config->live_connections, 0);
 	try_module_get(THIS_MODULE);
 	return config;
@@ -1757,6 +1784,12 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 	if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
 		u64 bsize =
 			nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
+		if (!bsize)
+			bsize = NBD_DEF_BLKSIZE;
+		if (!nbd_is_valid_blksize(bsize)) {
+			ret = -EINVAL;
+			goto out;
+		}
 		nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
 	}
 	if (info->attrs[NBD_ATTR_TIMEOUT]) {
@@ -1833,6 +1866,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
 	nbd_disconnect(nbd);
 	nbd_clear_sock(nbd);
 	mutex_unlock(&nbd->config_lock);
+	/*
+	 * Make sure recv thread has finished, so it does not drop the last
+	 * config ref and try to destroy the workqueue from inside the work
+	 * queue.
+	 */
+	flush_workqueue(nbd->recv_workq);
 	if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
 			       &nbd->config->runtime_flags))
 		nbd_config_put(nbd);
@@ -2213,20 +2252,12 @@ static int __init nbd_init(void)
 
 	if (nbds_max > 1UL << (MINORBITS - part_shift))
 		return -EINVAL;
-	recv_workqueue = alloc_workqueue("knbd-recv",
-					 WQ_MEM_RECLAIM | WQ_HIGHPRI |
-					 WQ_UNBOUND, 0);
-	if (!recv_workqueue)
-		return -ENOMEM;
 
-	if (register_blkdev(NBD_MAJOR, "nbd")) {
-		destroy_workqueue(recv_workqueue);
+	if (register_blkdev(NBD_MAJOR, "nbd"))
 		return -EIO;
-	}
 
 	if (genl_register_family(&nbd_genl_family)) {
 		unregister_blkdev(NBD_MAJOR, "nbd");
-		destroy_workqueue(recv_workqueue);
 		return -EINVAL;
 	}
 	nbd_dbg_init();
@@ -2268,7 +2299,6 @@ static void __exit nbd_cleanup(void)
 
 	idr_destroy(&nbd_index_idr);
 	genl_unregister_family(&nbd_genl_family);
-	destroy_workqueue(recv_workqueue);
 	unregister_blkdev(NBD_MAJOR, "nbd");
 }
 
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6f1d25c..0bc344d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
 	if (ret)
 		return ret;
 	if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
-		WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
 		blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
 		return -EINVAL;
 	}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4bc74e..55869b3 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -974,6 +974,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 	}
 	blkif->nr_ring_pages = nr_grefs;
 
+	err = -ENOMEM;
 	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
 		req = kzalloc(sizeof(*req), GFP_KERNEL);
 		if (!req)
@@ -996,7 +997,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 	err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
 	if (err) {
 		xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
-		return err;
+		goto fail;
 	}
 
 	return 0;
@@ -1016,8 +1017,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 		}
 		kfree(req);
 	}
-	return -ENOMEM;
-
+	return err;
 }
 
 static int connect_ring(struct backend_info *be)
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 01eb49f..692f8ec 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -80,8 +80,6 @@ static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
 {
 	int rc = 0;
 
-	BT_PWR_DBG("vreg_en for : %s", vreg->name);
-
 	if (!vreg->is_enabled) {
 		if (vreg->set_voltage_sup) {
 			rc = regulator_set_voltage(vreg->reg,
@@ -112,6 +110,8 @@ static int bt_vreg_enable(struct bt_power_vreg_data *vreg)
 		}
 		vreg->is_enabled = true;
 	}
+
+	BT_PWR_ERR("vreg_en successful for : %s", vreg->name);
 out:
 	return rc;
 }
@@ -123,8 +123,6 @@ static int bt_vreg_unvote(struct bt_power_vreg_data *vreg)
 	if (!vreg)
 		return rc;
 
-	BT_PWR_DBG("vreg_unvote for : %s", vreg->name);
-
 	if (vreg->is_enabled) {
 		if (vreg->set_voltage_sup) {
 			/* Set the min voltage to 0 */
@@ -141,9 +139,12 @@ static int bt_vreg_unvote(struct bt_power_vreg_data *vreg)
 			if (rc < 0) {
 				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
 						vreg->name, rc);
+				goto out;
 			}
 		}
 	}
+
+	BT_PWR_ERR("vreg_unvote successful for : %s", vreg->name);
 out:
 	return rc;
 }
@@ -155,8 +156,6 @@ static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
 	if (!vreg)
 		return rc;
 
-	BT_PWR_DBG("vreg_disable for : %s", vreg->name);
-
 	if (vreg->is_enabled) {
 		rc = regulator_disable(vreg->reg);
 		if (rc < 0) {
@@ -181,9 +180,12 @@ static int bt_vreg_disable(struct bt_power_vreg_data *vreg)
 			if (rc < 0) {
 				BT_PWR_ERR("vreg_set_mode(%s) failed rc=%d\n",
 						vreg->name, rc);
+				goto out;
 			}
 		}
 	}
+
+	BT_PWR_ERR("vreg_disable successful for : %s", vreg->name);
 out:
 	return rc;
 }
@@ -251,8 +253,6 @@ static int bt_configure_gpios(int on)
 	int rc = 0;
 	int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst;
 
-	BT_PWR_DBG("bt_gpio= %d on: %d", bt_reset_gpio, on);
-
 	if (on) {
 		rc = gpio_request(bt_reset_gpio, "bt_sys_rst_n");
 		if (rc) {
@@ -277,6 +277,8 @@ static int bt_configure_gpios(int on)
 		gpio_set_value(bt_reset_gpio, 0);
 		msleep(100);
 	}
+
+	BT_PWR_ERR("bt_gpio= %d on: %d is successful", bt_reset_gpio, on);
 	return rc;
 }
 
@@ -836,7 +838,7 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			if (!ret)
 				pwr_state = pwr_cntrl;
 		} else {
-			BT_PWR_ERR("BT chip state is already :%d no change d\n"
+			BT_PWR_ERR("BT state already:%d no change done\n"
 				, pwr_state);
 			ret = 0;
 		}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index ec9e03a..9e70f7c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -363,6 +363,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
 		return err;
 	}
 
+	/* Give the controller some time to get ready to receive the NVM */
+	msleep(10);
+
 	/* Download NVM configuration */
 	config.type = TLV_TYPE_NVM;
 	if (soc_type == QCA_WCN3990)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 1342f8e..8d1cd24 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -639,6 +639,26 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
 }
 EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
 
+int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	int ret;
+
+	/* According to the vendor driver, BT must be reset on close to avoid
+	 * firmware crash.
+	 */
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		ret = PTR_ERR(skb);
+		bt_dev_err(hdev, "HCI reset during shutdown failed");
+		return ret;
+	}
+	kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek);
+
 static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
 {
 	switch (device_baudrate) {
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index f5e36f3..852f27d 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -65,6 +65,7 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev);
 int btrtl_download_firmware(struct hci_dev *hdev,
 			    struct btrtl_device_info *btrtl_dev);
 int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_shutdown_realtek(struct hci_dev *hdev);
 int btrtl_get_uart_settings(struct hci_dev *hdev,
 			    struct btrtl_device_info *btrtl_dev,
 			    unsigned int *controller_baudrate,
@@ -93,6 +94,11 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev)
 	return -EOPNOTSUPP;
 }
 
+static inline int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+	return -EOPNOTSUPP;
+}
+
 static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
 					  struct btrtl_device_info *btrtl_dev,
 					  unsigned int *controller_baudrate,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 75cf605..08936bf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -391,6 +391,9 @@ static const struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8822CE Bluetooth devices */
+	{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
 	/* Silicon Wave based devices */
 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
 
@@ -1139,10 +1142,6 @@ static int btusb_open(struct hci_dev *hdev)
 	}
 
 	data->intf->needs_remote_wakeup = 1;
-	/* device specific wakeup source enabled and required for USB
-	 * remote wakeup while host is suspended
-	 */
-	device_wakeup_enable(&data->udev->dev);
 
 	if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
 		goto done;
@@ -1206,7 +1205,6 @@ static int btusb_close(struct hci_dev *hdev)
 		goto failed;
 
 	data->intf->needs_remote_wakeup = 0;
-	device_wakeup_disable(&data->udev->dev);
 	usb_autopm_put_interface(data->intf);
 
 failed:
@@ -3133,6 +3131,7 @@ static int btusb_probe(struct usb_interface *intf,
 #ifdef CONFIG_BT_HCIBTUSB_RTL
 	if (id->driver_info & BTUSB_REALTEK) {
 		hdev->setup = btrtl_setup_realtek;
+		hdev->shutdown = btrtl_shutdown_realtek;
 
 		/* Realtek devices lose their updated firmware over suspend,
 		 * but the USB hub doesn't notice any status change.
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index d5f8545..e31c02d 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
 	size_t pdata_size;
 };
 
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(hostdev);
+	struct acpi_device *child;
+
+	device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+	list_for_each_entry(child, &adev->children, node)
+		acpi_device_clear_enumerated(child);
+}
+
 /*
  * hisi_lpc_acpi_probe - probe children for ACPI FW
  * @hostdev: LPC host device pointer
@@ -556,8 +567,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
 	return 0;
 
 fail:
-	device_for_each_child(hostdev, NULL,
-			      hisi_lpc_acpi_remove_subdev);
+	hisi_lpc_acpi_remove(hostdev);
 	return ret;
 }
 
@@ -570,6 +580,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
 {
 	return -ENODEV;
 }
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
 #endif // CONFIG_ACPI
 
 /*
@@ -607,24 +621,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
 	range->fwnode = dev->fwnode;
 	range->flags = LOGIC_PIO_INDIRECT;
 	range->size = PIO_INDIRECT_SIZE;
+	range->hostdata = lpcdev;
+	range->ops = &hisi_lpc_ops;
+	lpcdev->io_host = range;
 
 	ret = logic_pio_register_range(range);
 	if (ret) {
 		dev_err(dev, "register IO range failed (%d)!\n", ret);
 		return ret;
 	}
-	lpcdev->io_host = range;
 
 	/* register the LPC host PIO resources */
 	if (acpi_device)
 		ret = hisi_lpc_acpi_probe(dev);
 	else
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
-	if (ret)
+	if (ret) {
+		logic_pio_unregister_range(range);
 		return ret;
+	}
 
-	lpcdev->io_host->hostdata = lpcdev;
-	lpcdev->io_host->ops = &hisi_lpc_ops;
+	dev_set_drvdata(dev, lpcdev);
 
 	io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
 	dev_info(dev, "registered range [%pa - %pa]\n",
@@ -633,6 +650,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
 	return ret;
 }
 
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+	struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+	struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+	if (acpi_device)
+		hisi_lpc_acpi_remove(dev);
+	else
+		of_platform_depopulate(dev);
+
+	logic_pio_unregister_range(range);
+
+	return 0;
+}
+
 static const struct of_device_id hisi_lpc_of_match[] = {
 	{ .compatible = "hisilicon,hip06-lpc", },
 	{ .compatible = "hisilicon,hip07-lpc", },
@@ -646,5 +680,6 @@ static struct platform_driver hisi_lpc_driver = {
 		.acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
 	},
 	.probe = hisi_lpc_probe,
+	.remove = hisi_lpc_remove,
 };
 builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index a19b806..42df20f 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -190,6 +190,7 @@ static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
 		return ret;
 	}
 
+	mhi_dev->mdm_state = (flags & ESOC_HOOK_MDM_CRASH);
 	return mhi_pci_probe(pci_dev, NULL);
 }
 
@@ -312,7 +313,8 @@ static void mhi_boot_monitor(void *data, async_cookie_t cookie)
 
 	/* wait for device to enter boot stage */
 	wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
-			   || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
+			   || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION
+			   || mhi_cntrl->power_down,
 			   timeout);
 
 	ipc_log_string(arch_info->boot_ipc_log, HLOG "Device current ee = %s\n",
@@ -334,8 +336,9 @@ int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
 	struct arch_info *arch_info = mhi_dev->arch_info;
 
-	/* start a boot monitor */
-	arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
+	/* start a boot monitor if not in crashed state */
+	if (!mhi_dev->mdm_state)
+		arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
 
 	return 0;
 }
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 67e12b5..9bb7717 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -243,7 +243,7 @@ static int mhi_runtime_suspend(struct device *dev)
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 	MHI_LOG("Exited with ret:%d\n", ret);
 
-	return ret;
+	return (ret < 0) ? -EBUSY : 0;
 }
 
 static int mhi_runtime_idle(struct device *dev)
@@ -302,7 +302,7 @@ static int mhi_runtime_resume(struct device *dev)
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 	MHI_LOG("Exited with :%d\n", ret);
 
-	return ret;
+	return (ret < 0) ? -EBUSY : 0;
 }
 
 static int mhi_system_resume(struct device *dev)
@@ -533,6 +533,19 @@ static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv)
 	return ret;
 }
 
+void mhi_qcom_store_hwinfo(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int i;
+
+	mhi_dev->serial_num = readl_relaxed(mhi_cntrl->bhi +
+			MHI_BHI_SERIAL_NUM_OFFS);
+
+	for (i = 0; i < ARRAY_SIZE(mhi_dev->oem_pk_hash); i++)
+		mhi_dev->oem_pk_hash[i] = readl_relaxed(mhi_cntrl->bhi +
+			MHI_BHI_OEMPKHASH(i));
+}
+
 static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
 {
 	enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl);
@@ -559,8 +572,9 @@ static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
 			return -EIO;
 	}
 
-	/* when coming out of SSR, initial ee state is not valid */
+	/* when coming out of SSR, initial states are not valid */
 	mhi_cntrl->ee = 0;
+	mhi_cntrl->power_down = false;
 
 	ret = mhi_arch_power_up(mhi_cntrl);
 	if (ret)
@@ -568,6 +582,10 @@ static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
 
 	ret = mhi_async_power_up(mhi_cntrl);
 
+	/* Update modem serial Info */
+	if (!ret)
+		mhi_qcom_store_hwinfo(mhi_cntrl);
+
 	/* power up create the dentry */
 	if (mhi_cntrl->dentry) {
 		debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
@@ -677,9 +695,45 @@ static ssize_t power_up_store(struct device *dev,
 }
 static DEVICE_ATTR_WO(power_up);
 
+static ssize_t serial_info_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct mhi_device *mhi_device = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int n;
+
+	n = scnprintf(buf, PAGE_SIZE, "Serial Number:%u\n",
+		      mhi_dev->serial_num);
+
+	return n;
+}
+static DEVICE_ATTR_RO(serial_info);
+
+static ssize_t oempkhash_info_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct mhi_device *mhi_device = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int i, n = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mhi_dev->oem_pk_hash); i++)
+		n += scnprintf(buf + n, PAGE_SIZE - n, "OEMPKHASH[%d]:%u\n",
+		      i, mhi_dev->oem_pk_hash[i]);
+
+	return n;
+}
+static DEVICE_ATTR_RO(oempkhash_info);
+
+
 static struct attribute *mhi_qcom_attrs[] = {
 	&dev_attr_timeout_ms.attr,
 	&dev_attr_power_up.attr,
+	&dev_attr_serial_info.attr,
+	&dev_attr_oempkhash_info.attr,
 	NULL
 };
 
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index 6fbbac9..3604863 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -14,6 +14,10 @@
 #define MHI_PCIE_VENDOR_ID (0x17cb)
 #define MHI_PCIE_DEBUG_ID (0xffff)
 
+#define MHI_BHI_SERIAL_NUM_OFFS (0x40)
+#define MHI_BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
+#define MHI_BHI_OEMPKHASH_SEG (16)
+
 /* runtime suspend timer */
 #define MHI_RPM_SUSPEND_TMR_MS (250)
 #define MHI_PCI_BAR_NUM (0)
@@ -44,10 +48,15 @@ struct mhi_dev {
 	int resn;
 	void *arch_info;
 	bool powered_on;
+	bool mdm_state;
 	dma_addr_t iova_start;
 	dma_addr_t iova_stop;
 	enum mhi_suspend_mode suspend_mode;
 
+	/* hardware info */
+	u32 serial_num;
+	u32 oem_pk_hash[MHI_BHI_OEMPKHASH_SEG];
+
 	unsigned int lpm_disable_depth;
 	/* lock to toggle low power modes */
 	spinlock_t lpm_lock;
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index d840768..0840148 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -17,6 +17,125 @@
 #include <linux/mhi.h>
 #include "mhi_internal.h"
 
+static void mhi_process_sfr(struct mhi_controller *mhi_cntrl,
+	struct file_info *info)
+{
+	struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
+	u8 *sfr_buf, *file_offset = info->file_offset;
+	u32 file_size = info->file_size;
+	u32 rem_seg_len = info->rem_seg_len;
+	u32 seg_idx = info->seg_idx;
+
+	sfr_buf = kzalloc(file_size + 1, GFP_KERNEL);
+	if (!sfr_buf)
+		return;
+
+	while (file_size) {
+		/* file offset starting from seg base */
+		if (!rem_seg_len) {
+			file_offset = mhi_buf[seg_idx].buf;
+			if (file_size > mhi_buf[seg_idx].len)
+				rem_seg_len = mhi_buf[seg_idx].len;
+			else
+				rem_seg_len = file_size;
+		}
+
+		if (file_size <= rem_seg_len) {
+			memcpy(sfr_buf, file_offset, file_size);
+			break;
+		}
+
+		memcpy(sfr_buf, file_offset, rem_seg_len);
+		sfr_buf += rem_seg_len;
+		file_size -= rem_seg_len;
+		rem_seg_len = 0;
+		seg_idx++;
+		if (seg_idx == mhi_cntrl->rddm_image->entries) {
+			MHI_ERR("invalid size for SFR file\n");
+			goto err;
+		}
+	}
+	sfr_buf[info->file_size] = '\0';
+
+	/* force sfr string to log in kernel msg */
+	MHI_ERR("%s\n", sfr_buf);
+err:
+	kfree(sfr_buf);
+}
+
+static int mhi_find_next_file_offset(struct mhi_controller *mhi_cntrl,
+	struct file_info *info, struct rddm_table_info *table_info)
+{
+	struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
+
+	if (info->rem_seg_len >= table_info->size) {
+		info->file_offset += table_info->size;
+		info->rem_seg_len -= table_info->size;
+		return 0;
+	}
+
+	info->file_size = table_info->size - info->rem_seg_len;
+	info->rem_seg_len = 0;
+	/* iterate over segments until eof is reached */
+	while (info->file_size) {
+		info->seg_idx++;
+		if (info->seg_idx == mhi_cntrl->rddm_image->entries) {
+			MHI_ERR("invalid size for file %s\n",
+					table_info->file_name);
+			return -EINVAL;
+		}
+		if (info->file_size > mhi_buf[info->seg_idx].len) {
+			info->file_size -= mhi_buf[info->seg_idx].len;
+		} else {
+			info->file_offset = mhi_buf[info->seg_idx].buf +
+				info->file_size;
+			info->rem_seg_len = mhi_buf[info->seg_idx].len -
+				info->file_size;
+			info->file_size = 0;
+		}
+	}
+
+	return 0;
+}
+
+void mhi_dump_sfr(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
+	struct rddm_header *rddm_header =
+		(struct rddm_header *)mhi_buf->buf;
+	struct rddm_table_info *table_info;
+	struct file_info info = {0};
+	u32 table_size, n;
+
+	if (rddm_header->header_size > sizeof(*rddm_header) ||
+			rddm_header->header_size < 8) {
+		MHI_ERR("invalid reported header size %u\n",
+				rddm_header->header_size);
+		return;
+	}
+
+	table_size = (rddm_header->header_size - 8) / sizeof(*table_info);
+	if (!table_size) {
+		MHI_ERR("invalid rddm table size %u\n", table_size);
+		return;
+	}
+
+	info.file_offset = (u8 *)rddm_header + rddm_header->header_size;
+	info.rem_seg_len = mhi_buf[0].len - rddm_header->header_size;
+	for (n = 0; n < table_size; n++) {
+		table_info = &rddm_header->table_info[n];
+
+		if (!strcmp(table_info->file_name, "Q6-SFR.bin")) {
+			info.file_size = table_info->size;
+			mhi_process_sfr(mhi_cntrl, &info);
+			return;
+		}
+
+		if (mhi_find_next_file_offset(mhi_cntrl, &info, table_info))
+			return;
+	}
+}
+EXPORT_SYMBOL(mhi_dump_sfr);
 
 /* setup rddm vector table for rddm transfer and program rxvec */
 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
@@ -306,7 +425,7 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
 	struct mhi_buf *mhi_buf = image_info->mhi_buf;
 
 	for (i = 0; i < image_info->entries; i++, mhi_buf++)
-		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+		mhi_free_contig_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
 				  mhi_buf->dma_addr);
 
 	kfree(image_info->mhi_buf);
@@ -347,7 +466,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
 			vec_size = sizeof(struct bhi_vec_entry) * i;
 
 		mhi_buf->len = vec_size;
-		mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+		mhi_buf->buf = mhi_alloc_contig_coherent(mhi_cntrl, vec_size,
 					&mhi_buf->dma_addr, GFP_KERNEL);
 		if (!mhi_buf->buf)
 			goto error_alloc_segment;
@@ -366,7 +485,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
 
 error_alloc_segment:
 	for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
-		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+		mhi_free_contig_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
 				  mhi_buf->dma_addr);
 
 error_alloc_mhi_buf:
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 6f88d91..7f68495 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -14,6 +14,14 @@
 #include <linux/mhi.h>
 #include "mhi_internal.h"
 
+const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
+	[MHI_MSG_LVL_VERBOSE] = "Verbose",
+	[MHI_MSG_LVL_INFO] = "Info",
+	[MHI_MSG_LVL_ERROR] = "Error",
+	[MHI_MSG_LVL_CRITICAL] = "Critical",
+	[MHI_MSG_LVL_MASK_ALL] = "Mask all",
+};
+
 const char * const mhi_ee_str[MHI_EE_MAX] = {
 	[MHI_EE_PBL] = "PBL",
 	[MHI_EE_SBL] = "SBL",
@@ -31,6 +39,7 @@ const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = {
 	[MHI_ST_TRANSITION_READY] = "READY",
 	[MHI_ST_TRANSITION_SBL] = "SBL",
 	[MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+	[MHI_ST_TRANSITION_DISABLE] = "DISABLE",
 };
 
 const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -54,10 +63,12 @@ static const char * const mhi_pm_state_str[] = {
 	[MHI_PM_BIT_M3] = "M3",
 	[MHI_PM_BIT_M3_EXIT] = "M3->M0",
 	[MHI_PM_BIT_FW_DL_ERR] = "FW DL Error",
+	[MHI_PM_BIT_DEVICE_ERR_DETECT] = "Device Error Detect",
 	[MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect",
 	[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
 	[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
 	[MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+	[MHI_PM_BIT_SHUTDOWN_NO_ACCESS] = "SHUTDOWN No Access",
 };
 
 struct mhi_bus mhi_bus;
@@ -72,6 +83,38 @@ const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
 	return mhi_pm_state_str[index];
 }
 
+static ssize_t log_level_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			TO_MHI_LOG_LEVEL_STR(mhi_cntrl->log_lvl));
+}
+
+static ssize_t log_level_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	enum MHI_DEBUG_LEVEL log_level;
+
+	if (kstrtou32(buf, 0, &log_level) < 0)
+		return -EINVAL;
+
+	mhi_cntrl->log_lvl = log_level;
+
+	MHI_LOG("IPC log level changed to: %s\n",
+		TO_MHI_LOG_LEVEL_STR(log_level));
+
+	return count;
+}
+static DEVICE_ATTR_RW(log_level);
+
 static ssize_t bus_vote_show(struct device *dev,
 			     struct device_attribute *attr,
 			     char *buf)
@@ -130,27 +173,28 @@ static ssize_t device_vote_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(device_vote);
 
-static struct attribute *mhi_vote_attrs[] = {
+static struct attribute *mhi_sysfs_attrs[] = {
+	&dev_attr_log_level.attr,
 	&dev_attr_bus_vote.attr,
 	&dev_attr_device_vote.attr,
 	NULL,
 };
 
-static const struct attribute_group mhi_vote_group = {
-	.attrs = mhi_vote_attrs,
+static const struct attribute_group mhi_sysfs_group = {
+	.attrs = mhi_sysfs_attrs,
 };
 
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
-				  &mhi_vote_group);
+				  &mhi_sysfs_group);
 }
 
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 
-	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_sysfs_group);
 
 	/* relinquish any pending votes for device */
 	while (atomic_read(&mhi_dev->dev_vote))
@@ -1256,7 +1300,7 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
 	return 0;
 
 error_ev_cfg:
-	kfree(mhi_cntrl->mhi_chan);
+	vfree(mhi_cntrl->mhi_chan);
 
 	return ret;
 }
@@ -1298,7 +1342,6 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	spin_lock_init(&mhi_cntrl->wlock);
 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
-	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
 	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
 	init_waitqueue_head(&mhi_cntrl->state_event);
 
@@ -1330,6 +1373,9 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 		mutex_init(&mhi_chan->mutex);
 		init_completion(&mhi_chan->completion);
 		rwlock_init(&mhi_chan->lock);
+
+		mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+		mhi_chan->bei = !!(mhi_event->intmod);
 	}
 
 	if (mhi_cntrl->bounce_buf) {
@@ -1399,7 +1445,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	kfree(mhi_cntrl->mhi_cmd);
 
 error_alloc_cmd:
-	kfree(mhi_cntrl->mhi_chan);
+	vfree(mhi_cntrl->mhi_chan);
 	kfree(mhi_cntrl->mhi_event);
 
 	return ret;
@@ -1412,7 +1458,7 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
 
 	kfree(mhi_cntrl->mhi_cmd);
 	kfree(mhi_cntrl->mhi_event);
-	kfree(mhi_cntrl->mhi_chan);
+	vfree(mhi_cntrl->mhi_chan);
 	kfree(mhi_cntrl->mhi_tsync);
 
 	device_del(&mhi_dev->dev);
@@ -1571,7 +1617,6 @@ static int mhi_driver_probe(struct device *dev)
 	struct mhi_event *mhi_event;
 	struct mhi_chan *ul_chan = mhi_dev->ul_chan;
 	struct mhi_chan *dl_chan = mhi_dev->dl_chan;
-	bool auto_start = false;
 	int ret;
 
 	/* bring device out of lpm */
@@ -1590,7 +1635,11 @@ static int mhi_driver_probe(struct device *dev)
 
 		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
 		mhi_dev->status_cb = mhi_drv->status_cb;
-		auto_start = ul_chan->auto_start;
+		if (ul_chan->auto_start) {
+			ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+			if (ret)
+				goto exit_probe;
+		}
 	}
 
 	if (dl_chan) {
@@ -1614,15 +1663,22 @@ static int mhi_driver_probe(struct device *dev)
 
 		/* ul & dl uses same status cb */
 		mhi_dev->status_cb = mhi_drv->status_cb;
-		auto_start = (auto_start || dl_chan->auto_start);
 	}
 
 	ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+	if (ret)
+		goto exit_probe;
 
-	if (!ret && auto_start)
-		mhi_prepare_for_transfer(mhi_dev);
+	if (dl_chan && dl_chan->auto_start)
+		mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+	mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+	return ret;
 
 exit_probe:
+	mhi_unprepare_from_transfer(mhi_dev);
+
 	mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
 
 	return ret;
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 84a9626..e4e80f9 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -429,6 +429,7 @@ enum MHI_ST_TRANSITION {
 	MHI_ST_TRANSITION_READY,
 	MHI_ST_TRANSITION_SBL,
 	MHI_ST_TRANSITION_MISSION_MODE,
+	MHI_ST_TRANSITION_DISABLE,
 	MHI_ST_TRANSITION_MAX,
 };
 
@@ -441,6 +442,11 @@ extern const char * const mhi_state_str[MHI_STATE_MAX];
 				  !mhi_state_str[state]) ? \
 				"INVALID_STATE" : mhi_state_str[state])
 
+extern const char * const mhi_log_level_str[MHI_MSG_LVL_MAX];
+#define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
+				  !mhi_log_level_str[level]) ? \
+				"Mask all" : mhi_log_level_str[level])
+
 enum {
 	MHI_PM_BIT_DISABLE,
 	MHI_PM_BIT_POR,
@@ -450,10 +456,12 @@ enum {
 	MHI_PM_BIT_M3,
 	MHI_PM_BIT_M3_EXIT,
 	MHI_PM_BIT_FW_DL_ERR,
+	MHI_PM_BIT_DEVICE_ERR_DETECT,
 	MHI_PM_BIT_SYS_ERR_DETECT,
 	MHI_PM_BIT_SYS_ERR_PROCESS,
 	MHI_PM_BIT_SHUTDOWN_PROCESS,
 	MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+	MHI_PM_BIT_SHUTDOWN_NO_ACCESS,
 	MHI_PM_BIT_MAX
 };
 
@@ -468,19 +476,23 @@ enum MHI_PM_STATE {
 	MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT),
 	/* firmware download failure state */
 	MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR),
+	/* error or shutdown detected or processing state */
+	MHI_PM_DEVICE_ERR_DETECT = BIT(MHI_PM_BIT_DEVICE_ERR_DETECT),
 	MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT),
 	MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS),
 	MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
 	/* link not accessible */
 	MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+	MHI_PM_SHUTDOWN_NO_ACCESS = BIT(MHI_PM_BIT_SHUTDOWN_NO_ACCESS),
 };
 
 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
 		MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT | \
+		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | \
+		MHI_PM_FW_DL_ERR)))
 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
-#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state >= MHI_PM_LD_ERR_FATAL_DETECT)
 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
 					mhi_cntrl->db_access)
 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
@@ -575,6 +587,7 @@ struct mhi_pm_transitions {
 struct state_transition {
 	struct list_head node;
 	enum MHI_ST_TRANSITION state;
+	enum MHI_PM_STATE pm_state;
 };
 
 struct mhi_ctxt {
@@ -652,7 +665,6 @@ struct mhi_chan {
 	struct mhi_ring buf_ring;
 	struct mhi_ring tre_ring;
 	u32 er_index;
-	u32 intmod;
 	enum mhi_ch_type type;
 	enum dma_data_direction dir;
 	struct db_cfg db_cfg;
@@ -660,6 +672,7 @@ struct mhi_chan {
 	enum MHI_XFER_TYPE xfer_type;
 	enum MHI_CH_STATE ch_state;
 	enum MHI_EV_CCS ccs;
+	bool bei; /* based on interrupt moderation, true if greater than 0 */
 	bool lpm_notify;
 	bool configured;
 	bool offload_ch;
@@ -733,7 +746,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 			       enum MHI_ST_TRANSITION state);
 void mhi_pm_st_worker(struct work_struct *work);
 void mhi_fw_load_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_process_sys_err(struct mhi_controller *mhi_cntrl);
 void mhi_low_priority_worker(struct work_struct *work);
 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
 void mhi_ctrl_ev_task(unsigned long data);
@@ -757,7 +770,7 @@ static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
 {
 	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
 	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
-	pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+	pm_wakeup_hard_event(&mhi_cntrl->mhi_dev->dev);
 }
 
 /* queue transfer buffer */
@@ -801,8 +814,8 @@ void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
 int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
 int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
 int mhi_early_notify_device(struct device *dev, void *data);
 
 /* timesync log support */
@@ -836,6 +849,30 @@ static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
 	atomic_sub(size, &mhi_cntrl->alloc_size);
 	dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle);
 }
+
+static inline void *mhi_alloc_contig_coherent(
+					struct mhi_controller *mhi_cntrl,
+					size_t size, dma_addr_t *dma_handle,
+					gfp_t gfp)
+{
+	void *buf = dma_alloc_attrs(mhi_cntrl->dev, size, dma_handle, gfp,
+					DMA_ATTR_FORCE_CONTIGUOUS);
+
+	if (buf)
+		atomic_add(size, &mhi_cntrl->alloc_size);
+
+	return buf;
+}
+static inline void mhi_free_contig_coherent(
+					struct mhi_controller *mhi_cntrl,
+					size_t size, void *vaddr,
+					dma_addr_t dma_handle)
+{
+	atomic_sub(size, &mhi_cntrl->alloc_size);
+	dma_free_attrs(mhi_cntrl->dev, size, vaddr, dma_handle,
+					DMA_ATTR_FORCE_CONTIGUOUS);
+}
+
 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
 static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl,
 				      struct mhi_device *mhi_dev)
@@ -871,6 +908,8 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
 int mhi_dtr_init(void);
 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 		      struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+			struct mhi_chan *mhi_chan);
 
 /* isr handlers */
 irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 8e1e2fd..11a6516 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -396,7 +396,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev,
 
 	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
 	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
-	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(mhi_chan->bei, 1, 0, 0);
 
 	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
 		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
@@ -479,7 +479,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
 	} else {
 		mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
 		mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
-		mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+		mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(mhi_chan->bei, 1, 0, 0);
 	}
 
 	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
@@ -514,7 +514,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl,
 	struct mhi_ring *buf_ring, *tre_ring;
 	struct mhi_tre *mhi_tre;
 	struct mhi_buf_info *buf_info;
-	int eot, eob, chain, bei;
+	int eot, eob, chain;
 	int ret;
 
 	buf_ring = &mhi_chan->buf_ring;
@@ -534,12 +534,11 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl,
 	eob = !!(flags & MHI_EOB);
 	eot = !!(flags & MHI_EOT);
 	chain = !!(flags & MHI_CHAIN);
-	bei = !!(mhi_chan->intmod);
 
 	mhi_tre = tre_ring->wp;
 	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
 	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
-	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(mhi_chan->bei, eot, eob, chain);
 
 	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
 		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
@@ -635,8 +634,16 @@ int mhi_destroy_device(struct device *dev, void *data)
 
 int mhi_early_notify_device(struct device *dev, void *data)
 {
-	struct mhi_device *mhi_dev = to_mhi_device(dev);
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_device *mhi_dev;
+	struct mhi_controller *mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	int dir;
+
+	if (dev->bus != &mhi_bus_type)
+		return 0;
+
+	mhi_dev = to_mhi_device(dev);
+	mhi_cntrl = mhi_dev->mhi_cntrl;
 
 	/* skip early notification */
 	if (!mhi_dev->early_notif)
@@ -646,6 +653,20 @@ int mhi_early_notify_device(struct device *dev, void *data)
 
 	mhi_notify(mhi_dev, MHI_CB_FATAL_ERROR);
 
+	/* send completions to any critical channels waiting on them */
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		/* wake all threads waiting for completion */
+		write_lock_irq(&mhi_chan->lock);
+		mhi_chan->ccs = MHI_EV_CC_INVALID;
+		complete_all(&mhi_chan->completion);
+		write_unlock_irq(&mhi_chan->lock);
+	}
+
 	return 0;
 }
 
@@ -1173,8 +1194,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 							MHI_PM_SYS_ERR_DETECT);
 				write_unlock_irq(&mhi_cntrl->pm_lock);
 				if (new_state == MHI_PM_SYS_ERR_DETECT)
-					schedule_work(
-						&mhi_cntrl->syserr_worker);
+					mhi_process_sys_err(mhi_cntrl);
 				break;
 			}
 			default:
@@ -1386,29 +1406,25 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 	struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
 	int result, ret = 0;
 
-	mutex_lock(&mhi_cntrl->pm_mutex);
-
 	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
 		MHI_LOG("No EV access, PM_STATE:%s\n",
 			to_mhi_pm_state_str(mhi_cntrl->pm_state));
 		ret = -EIO;
-		goto exit_bw_process;
+		goto exit_no_lock;
 	}
 
-	/*
-	 * BW change is not process during suspend since we're suspending link,
-	 * host will process it during resume
-	 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
-		ret = -EACCES;
-		goto exit_bw_process;
-	}
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		goto exit_no_lock;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
 
 	spin_lock_bh(&mhi_event->lock);
 	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 
 	if (ev_ring->rp == dev_rp) {
 		spin_unlock_bh(&mhi_event->lock);
+		MHI_VERB("no pending event found\n");
 		goto exit_bw_process;
 	}
 
@@ -1453,13 +1469,16 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 		mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
 			      MHI_BW_SCALE_RESULT(result,
 						  link_info.sequence_num));
+
+	mhi_cntrl->wake_put(mhi_cntrl, false);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
 exit_bw_process:
-	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
-
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 
+exit_no_lock:
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
 	return ret;
 }
 
@@ -1518,7 +1537,7 @@ void mhi_ctrl_ev_task(unsigned long data)
 		}
 		write_unlock_irq(&mhi_cntrl->pm_lock);
 		if (pm_state == MHI_PM_SYS_ERR_DETECT)
-			schedule_work(&mhi_cntrl->syserr_worker);
+			mhi_process_sys_err(mhi_cntrl);
 	}
 }
 
@@ -1569,7 +1588,8 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 		state = mhi_get_mhi_state(mhi_cntrl);
 		ee = mhi_cntrl->ee;
 		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
-		MHI_LOG("device ee:%s dev_state:%s\n",
+		MHI_LOG("local ee: %s device ee:%s dev_state:%s\n",
+			TO_MHI_EXEC_STR(ee),
 			TO_MHI_EXEC_STR(mhi_cntrl->ee),
 			TO_MHI_STATE_STR(state));
 	}
@@ -1582,11 +1602,14 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
 	/* if device in rddm don't bother processing sys error */
-	if (mhi_cntrl->ee == MHI_EE_RDDM) {
+	if (mhi_cntrl->ee == MHI_EE_RDDM && ee != MHI_EE_DISABLE_TRANSITION) {
 		if (mhi_cntrl->ee != ee) {
 			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 					     MHI_CB_EE_RDDM);
 			wake_up_all(&mhi_cntrl->state_event);
+
+			/* notify critical clients with early notifications */
+			mhi_control_error(mhi_cntrl);
 		}
 		goto exit_intvec;
 	}
@@ -1599,7 +1622,7 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
 			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 					     MHI_CB_FATAL_ERROR);
 		else
-			schedule_work(&mhi_cntrl->syserr_worker);
+			mhi_process_sys_err(mhi_cntrl);
 	}
 
 exit_intvec:
@@ -1618,7 +1641,8 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
 	wake_up_all(&mhi_cntrl->state_event);
 	MHI_VERB("Exit\n");
 
-	schedule_work(&mhi_cntrl->low_priority_worker);
+	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+		schedule_work(&mhi_cntrl->low_priority_worker);
 
 	return IRQ_WAKE_THREAD;
 }
@@ -1688,8 +1712,8 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
 	return 0;
 }
 
-static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-				 struct mhi_chan *mhi_chan)
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+			struct mhi_chan *mhi_chan)
 {
 	int ret = 0;
 
@@ -1730,15 +1754,16 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 		goto error_pm_state;
 	}
 
+	atomic_inc(&mhi_cntrl->pending_pkts);
 	mhi_cntrl->wake_toggle(mhi_cntrl);
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+		mhi_trigger_resume(mhi_cntrl);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
-	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
-	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
 
 	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
 	if (ret) {
 		MHI_ERR("Failed to send start chan cmd\n");
-		goto error_pm_state;
+		goto error_dec_pendpkt;
 	}
 
 	ret = wait_for_completion_timeout(&mhi_chan->completion,
@@ -1747,9 +1772,11 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 		MHI_ERR("Failed to receive cmd completion for chan:%d\n",
 			mhi_chan->chan);
 		ret = -EIO;
-		goto error_pm_state;
+		goto error_dec_pendpkt;
 	}
 
+	atomic_dec(&mhi_cntrl->pending_pkts);
+
 	write_lock_irq(&mhi_chan->lock);
 	mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
 	write_unlock_irq(&mhi_chan->lock);
@@ -1795,6 +1822,8 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 
 	return 0;
 
+error_dec_pendpkt:
+	atomic_dec(&mhi_cntrl->pending_pkts);
 error_pm_state:
 	if (!mhi_chan->offload_ch)
 		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
@@ -1953,15 +1982,16 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
 		goto error_invalid_state;
 	}
 
+	atomic_inc(&mhi_cntrl->pending_pkts);
 	mhi_cntrl->wake_toggle(mhi_cntrl);
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+		mhi_trigger_resume(mhi_cntrl);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
-	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
-	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
 	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
 	if (ret) {
 		MHI_ERR("Failed to send reset chan cmd\n");
-		goto error_invalid_state;
+		goto error_dec_pendpkt;
 	}
 
 	/* even if it fails we will still reset */
@@ -1970,6 +2000,8 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
 	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
 		MHI_ERR("Failed to receive cmd completion, still resetting\n");
 
+error_dec_pendpkt:
+	atomic_dec(&mhi_cntrl->pending_pkts);
 error_invalid_state:
 	if (!mhi_chan->offload_ch) {
 		mhi_reset_chan(mhi_cntrl, mhi_chan);
@@ -1984,7 +2016,8 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
 	struct mhi_controller *mhi_cntrl = m->private;
 
 	seq_printf(m,
-		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u M3_Fast:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
+		   "[%llu ns]: pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u M3_Fast:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
+		   sched_clock(),
 		   to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		   TO_MHI_STATE_STR(mhi_cntrl->dev_state),
 		   TO_MHI_EXEC_STR(mhi_cntrl->ee),
@@ -2004,6 +2037,8 @@ int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d)
 
 	int i;
 
+	seq_printf(m, "[%llu ns]:\n", sched_clock());
+
 	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
 	mhi_event = mhi_cntrl->mhi_event;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
@@ -2035,6 +2070,8 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
 	struct mhi_chan_ctxt *chan_ctxt;
 	int i;
 
+	seq_printf(m, "[%llu ns]:\n", sched_clock());
+
 	mhi_chan = mhi_cntrl->mhi_chan;
 	chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
 	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
@@ -2076,7 +2113,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
 		if (!mhi_chan)
 			continue;
 
-		ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan);
+		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
 		if (ret) {
 			MHI_ERR("Error moving chan %s,%d to START state\n",
 				mhi_chan->name, mhi_chan->chan);
@@ -2144,15 +2181,16 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
 		goto error_chan_state;
 	}
 
+	atomic_inc(&mhi_cntrl->pending_pkts);
 	mhi_cntrl->wake_toggle(mhi_cntrl);
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+		mhi_trigger_resume(mhi_cntrl);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
-	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
-	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
 
 	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
 	if (ret) {
 		MHI_ERR("Failed to send start chan cmd\n");
-		goto error_chan_state;
+		goto error_dec_pendpkt;
 	}
 
 	ret = wait_for_completion_timeout(&mhi_chan->completion,
@@ -2161,7 +2199,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
 		MHI_ERR("Failed to receive cmd completion for chan:%d\n",
 			mhi_chan->chan);
 		ret = -EIO;
-		goto error_chan_state;
+		goto error_dec_pendpkt;
 	}
 
 	ret = 0;
@@ -2169,6 +2207,8 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
 	MHI_VERB("chan:%d successfully transition to state:%s\n",
 		 mhi_chan->chan, cmd == MHI_CMD_START_CHAN ? "START" : "STOP");
 
+error_dec_pendpkt:
+	atomic_dec(&mhi_cntrl->pending_pkts);
 error_chan_state:
 	mutex_unlock(&mhi_chan->mutex);
 
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 6018b1a..638174f 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -33,10 +33,13 @@
  *     M0 <--> M0
  *     M0 -> FW_DL_ERR
  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
- * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L1: DEVICE_ERR_DETECT -> SYS_ERR_DETECT
+ *     SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ *     SHUTDOWN_PROCESS -> DISABLE
  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
- *     LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ *     LD_ERR_FATAL_DETECT -> SHUTDOWN_NO_ACCESS
+ *     SHUTDOWN_NO_ACCESS -> DISABLE
  */
 static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L0 States */
@@ -47,50 +50,62 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	{
 		MHI_PM_POR,
 		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_FW_DL_ERR | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M0,
 		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_FW_DL_ERR | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M2,
-		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_M0 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_ENTER,
-		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_M3 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3,
-		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_M3_EXIT | MHI_PM_DEVICE_ERR_DETECT |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_EXIT,
-		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_M0 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_FW_DL_ERR,
-		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_FW_DL_ERR | MHI_PM_DEVICE_ERR_DETECT |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L1 States */
 	{
+		MHI_PM_DEVICE_ERR_DETECT,
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
 		MHI_PM_SYS_ERR_DETECT,
 		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_SYS_ERR_PROCESS,
 		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L2 States */
 	{
@@ -100,7 +115,11 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L3 States */
 	{
 		MHI_PM_LD_ERR_FATAL_DETECT,
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_SHUTDOWN_NO_ACCESS,
+		MHI_PM_DISABLE
 	},
 };
 
@@ -136,9 +155,6 @@ enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
 	MHI_VERB("Transition to pm state from:%s to:%s\n",
 		 to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state));
 
-	if (MHI_REG_ACCESS_VALID(cur_state) || MHI_REG_ACCESS_VALID(state))
-		mhi_timesync_log(mhi_cntrl);
-
 	mhi_cntrl->pm_state = state;
 	return mhi_cntrl->pm_state;
 }
@@ -183,10 +199,12 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
 		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
 	} else {
 		/* if resources requested already, then increment and exit */
-		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
-			return;
-
 		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) {
+			spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+			return;
+		}
+
 		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
 		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
 		    !mhi_cntrl->wake_set) {
@@ -202,18 +220,24 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
 {
 	unsigned long flags;
 
-	MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0");
+	MHI_ASSERT((mhi_is_active(mhi_cntrl->mhi_dev) &&
+		   atomic_read(&mhi_cntrl->dev_wake) == 0), "dev_wake == 0");
 
 	/* resources not dropping to 0, decrement and exit */
-	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
-		return;
-
 	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) {
+		if (!override)
+			mhi_cntrl->ignore_override = true;
+		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+		return;
+	}
+
 	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
-	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
-	    mhi_cntrl->wake_set) {
+	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && (!override ||
+	    mhi_cntrl->ignore_override) && mhi_cntrl->wake_set) {
 		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
 		mhi_cntrl->wake_set = false;
+		mhi_cntrl->ignore_override = false;
 	}
 	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
 }
@@ -352,14 +376,17 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
 	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 
-		write_lock_irq(&mhi_chan->lock);
-		if (mhi_chan->db_cfg.reset_req)
+		if (mhi_chan->db_cfg.reset_req) {
+			write_lock_irq(&mhi_chan->lock);
 			mhi_chan->db_cfg.db_mode = true;
+			write_unlock_irq(&mhi_chan->lock);
+		}
 
+		read_lock_irq(&mhi_chan->lock);
 		/* only ring DB if ring is not empty */
 		if (tre_ring->base && tre_ring->wp  != tre_ring->rp)
 			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		write_unlock_irq(&mhi_chan->lock);
+		read_unlock_irq(&mhi_chan->lock);
 	}
 
 	mhi_cntrl->wake_put(mhi_cntrl, false);
@@ -432,22 +459,24 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 {
 	int i, ret;
+	enum mhi_ee ee = 0;
 	struct mhi_event *mhi_event;
 
 	MHI_LOG("Processing Mission Mode Transition\n");
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
-		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+		ee = mhi_get_exec_env(mhi_cntrl);
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
-	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+	if (!MHI_IN_MISSION_MODE(ee))
 		return -EIO;
 
-	wake_up_all(&mhi_cntrl->state_event);
-
 	mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 			     MHI_CB_EE_MISSION_MODE);
+	mhi_cntrl->ee = ee;
+
+	wake_up_all(&mhi_cntrl->state_event);
 
 	/* force MHI to be in M0 state before continuing */
 	ret = __mhi_device_get_sync(mhi_cntrl);
@@ -486,13 +515,16 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	/* setup support for time sync */
 	mhi_init_timesync(mhi_cntrl);
 
+	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+		mhi_timesync_log(mhi_cntrl);
+
 	MHI_LOG("Adding new devices\n");
 
 	/* add supported devices */
 	mhi_create_devices(mhi_cntrl);
 
 	/* setup sysfs nodes for userspace votes */
-	mhi_create_vote_sysfs(mhi_cntrl);
+	mhi_create_sysfs(mhi_cntrl);
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 
@@ -522,20 +554,9 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		to_mhi_pm_state_str(transition_state));
 
 	/* We must notify MHI control driver so it can clean up first */
-	if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
-		/*
-		 * if controller support rddm, we do not process
-		 * sys error state, instead we will jump directly
-		 * to rddm state
-		 */
-		if (mhi_cntrl->rddm_image) {
-			MHI_LOG(
-				"Controller Support RDDM, skipping SYS_ERR_PROCESS\n");
-			return;
-		}
+	if (transition_state == MHI_PM_SYS_ERR_PROCESS)
 		mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 				     MHI_CB_SYS_ERROR);
-	}
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 	write_lock_irq(&mhi_cntrl->pm_lock);
@@ -545,6 +566,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
 		mhi_cntrl->dev_state = MHI_STATE_RESET;
 	}
+	/* notify controller of power down regardless of state transitions */
+	mhi_cntrl->power_down = true;
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
 	/* wake up any threads waiting for state transitions */
@@ -602,11 +625,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	MHI_LOG("Finish resetting channels\n");
 
 	/* remove support for userspace votes */
-	mhi_destroy_vote_sysfs(mhi_cntrl);
+	mhi_destroy_sysfs(mhi_cntrl);
 
 	MHI_LOG("Waiting for all pending threads to complete\n");
 	wake_up_all(&mhi_cntrl->state_event);
-	flush_work(&mhi_cntrl->st_worker);
 	flush_work(&mhi_cntrl->fw_worker);
 	flush_work(&mhi_cntrl->low_priority_worker);
 
@@ -696,7 +718,28 @@ int mhi_debugfs_trigger_reset(void *data, u64 val)
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
 	if (cur_state == MHI_PM_SYS_ERR_DETECT)
-		schedule_work(&mhi_cntrl->syserr_worker);
+		mhi_process_sys_err(mhi_cntrl);
+
+	return 0;
+}
+
+/* queue disable transition work item */
+int mhi_queue_disable_transition(struct mhi_controller *mhi_cntrl,
+				 enum MHI_PM_STATE pm_state)
+{
+	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+	unsigned long flags;
+
+	if (!item)
+		return -ENOMEM;
+
+	item->pm_state = pm_state;
+	item->state = MHI_ST_TRANSITION_DISABLE;
+	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+	list_add_tail(&item->node, &mhi_cntrl->transition_list);
+	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+	schedule_work(&mhi_cntrl->st_worker);
 
 	return 0;
 }
@@ -753,23 +796,22 @@ void mhi_low_priority_worker(struct work_struct *work)
 		 TO_MHI_EXEC_STR(mhi_cntrl->ee));
 
 	/* check low priority event rings and process events */
-	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
-		if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
-			mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
-	}
+	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node)
+		mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
 }
 
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_process_sys_err(struct mhi_controller *mhi_cntrl)
 {
-	struct mhi_controller *mhi_cntrl = container_of(work,
-							struct mhi_controller,
-							syserr_worker);
+	/*
+	 * if controller supports rddm, we do not process sys error state,
+	 * instead we will jump directly to rddm state
+	 */
+	if (mhi_cntrl->rddm_image) {
+		MHI_LOG("Controller supports RDDM, skipping SYS_ERR_PROCESS\n");
+		return;
+	}
 
-	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
-		to_mhi_pm_state_str(mhi_cntrl->pm_state),
-		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
-
-	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+	mhi_queue_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
 }
 
 void mhi_pm_st_worker(struct work_struct *work)
@@ -810,6 +852,9 @@ void mhi_pm_st_worker(struct work_struct *work)
 		case MHI_ST_TRANSITION_READY:
 			mhi_ready_state_transition(mhi_cntrl);
 			break;
+		case MHI_ST_TRANSITION_DISABLE:
+			mhi_pm_disable_transition(mhi_cntrl, itr->pm_state);
+			break;
 		default:
 			break;
 		}
@@ -823,6 +868,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 	u32 val;
 	enum mhi_ee current_ee;
 	enum MHI_ST_TRANSITION next_state;
+	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 
 	MHI_LOG("Requested to power on\n");
 
@@ -838,6 +884,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
 	}
 
+	/* clear votes before proceeding for power up */
+	atomic_set(&mhi_dev->dev_vote, 0);
+	atomic_set(&mhi_dev->bus_vote, 0);
+
 	mutex_lock(&mhi_cntrl->pm_mutex);
 	mhi_cntrl->pm_state = MHI_PM_DISABLE;
 
@@ -926,19 +976,24 @@ EXPORT_SYMBOL(mhi_async_power_up);
 /* Transition MHI into error state and notify critical clients */
 void mhi_control_error(struct mhi_controller *mhi_cntrl)
 {
-	enum MHI_PM_STATE cur_state;
+	enum MHI_PM_STATE cur_state, transition_state;
 
 	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 
+	/* link is not down if device is in RDDM */
+	transition_state = (mhi_cntrl->ee == MHI_EE_RDDM) ?
+		MHI_PM_DEVICE_ERR_DETECT : MHI_PM_LD_ERR_FATAL_DETECT;
+
 	write_lock_irq(&mhi_cntrl->pm_lock);
-	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_LD_ERR_FATAL_DETECT);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
-	if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) {
+	/* proceed if we move to device error or are already in error state */
+	if (!MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 		MHI_ERR("Failed to transition to state:%s from:%s\n",
-			to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+			to_mhi_pm_state_str(transition_state),
 			to_mhi_pm_state_str(cur_state));
 		goto exit_control_error;
 	}
@@ -959,6 +1014,7 @@ EXPORT_SYMBOL(mhi_control_error);
 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 {
 	enum MHI_PM_STATE cur_state;
+	enum MHI_PM_STATE transition_state = MHI_PM_SHUTDOWN_PROCESS;
 
 	/* if it's not graceful shutdown, force MHI to a linkdown state */
 	if (!graceful) {
@@ -972,8 +1028,14 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 			MHI_ERR("Failed to move to state:%s from:%s\n",
 				to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
 				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		transition_state = MHI_PM_SHUTDOWN_NO_ACCESS;
 	}
-	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+	mhi_queue_disable_transition(mhi_cntrl, transition_state);
+
+	MHI_LOG("Wait for shutdown to complete\n");
+	flush_work(&mhi_cntrl->st_worker);
 
 	mhi_deinit_debugfs(mhi_cntrl);
 
@@ -1002,7 +1064,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
 			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
 			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
 
-	return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
+	return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
 }
 EXPORT_SYMBOL(mhi_sync_power_up);
 
@@ -1095,10 +1157,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
-		mutex_unlock(&itr->mutex);
 	}
 
 	return 0;
@@ -1135,6 +1195,11 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 		return -EBUSY;
 	}
 
+	/* wait here if controller wants device to be in M2 before proceeding */
+	wait_event_timeout(mhi_cntrl->state_event,
+			   mhi_cntrl->dev_state == MHI_STATE_M2,
+			   msecs_to_jiffies(mhi_cntrl->m2_timeout_ms));
+
 	/* disable ctrl event processing */
 	tasklet_disable(&mhi_cntrl->mhi_event->task);
 
@@ -1201,10 +1266,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
-		mutex_unlock(&itr->mutex);
 	}
 
 	return 0;
@@ -1240,10 +1303,8 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 
 	/* notify any clients we enter lpm */
 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
-		mutex_lock(&itr->mutex);
 		if (itr->mhi_dev)
 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
-		mutex_unlock(&itr->mutex);
 	}
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
@@ -1316,10 +1377,8 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
 	if (notify_client) {
 		list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
 					 node) {
-			mutex_lock(&itr->mutex);
 			if (itr->mhi_dev)
 				mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
-			mutex_unlock(&itr->mutex);
 		}
 	}
 
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index cfe4f82..1749f48 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -852,6 +852,9 @@ static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
 		   mhi_netdev->abuffers, mhi_netdev->kbuffers,
 		   mhi_netdev->rbuffers);
 
+	seq_printf(m, "chaining SKBs:%s\n", (mhi_netdev->chain) ?
+		   "enabled" : "disabled");
+
 	return 0;
 }
 
@@ -866,7 +869,7 @@ static const struct file_operations debugfs_stats = {
 	.read = seq_read,
 };
 
-int mhi_netdev_debugfs_chain(void *data, u64 val)
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
 {
 	struct mhi_netdev *mhi_netdev = data;
 	struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 162d8a6..2462147 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -235,14 +235,15 @@ enum mhi_sat_state {
 	SAT_DISCONNECTED, /* rpmsg link is down */
 	SAT_FATAL_DETECT, /* device is down as fatal error was detected early */
 	SAT_ERROR, /* device is down after error or graceful shutdown */
-	SAT_DISABLED, /* set if rpmsg link goes down after device is down */
+	SAT_DISABLED, /* no further processing: wait for device removal */
 };
 
 #define MHI_SAT_ACTIVE(cntrl) (cntrl->state == SAT_RUNNING)
-#define MHI_SAT_FATAL_DETECT(cntrl) (cntrl->state == SAT_FATAL_DETECT)
+#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
 #define MHI_SAT_ALLOW_CONNECTION(cntrl) (cntrl->state == SAT_READY || \
 					 cntrl->state == SAT_DISCONNECTED)
-#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
+#define MHI_SAT_ALLOW_SYS_ERR(cntrl) (cntrl->state == SAT_RUNNING || \
+					cntrl->state == SAT_FATAL_DETECT)
 
 struct mhi_sat_cntrl {
 	struct list_head node;
@@ -462,10 +463,10 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
 			code = MHI_EV_CC_SUCCESS;
 
 iommu_map_cmd_completion:
-			MHI_SAT_LOG("IOMMU MAP 0x%llx CMD processing %s\n",
-				   MHI_TRE_GET_PTR(pkt),
-				   (code == MHI_EV_CC_SUCCESS) ? "successful" :
-				   "failed");
+			MHI_SAT_LOG("IOMMU MAP 0x%llx len:%d CMD %s:%llx\n",
+				    MHI_TRE_GET_PTR(pkt), MHI_TRE_GET_SIZE(pkt),
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failed", iova);
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(iova);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -503,9 +504,9 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
 			if (!ret)
 				code = MHI_EV_CC_SUCCESS;
 
-			MHI_SAT_LOG("CTXT UPDATE CMD %s:%d processing %s\n",
-				buf.name, id, (code == MHI_EV_CC_SUCCESS) ?
-				"successful" : "failed");
+			MHI_SAT_LOG("CTXT UPDATE CMD %s:%d %s\n", buf.name, id,
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failed");
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -532,9 +533,9 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
 				code = MHI_EV_CC_SUCCESS;
 			}
 
-			MHI_SAT_LOG("START CHANNEL %d CMD processing %s\n",
-				id, (code == MHI_EV_CC_SUCCESS) ? "successful" :
-				"failure");
+			MHI_SAT_LOG("START CHANNEL %d CMD %s\n", id,
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failure");
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -549,17 +550,15 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
 						   SAT_CTXT_TYPE_CHAN);
 
 			MHI_SAT_ASSERT(!sat_dev,
-				"No device with given channel ID\n");
+					"No device with given channel ID\n");
 
 			MHI_SAT_ASSERT(!sat_dev->chan_started,
-				"Resetting unstarted channel!");
+					"Resetting unstarted channel!");
 
 			mhi_unprepare_from_transfer(sat_dev->mhi_dev);
 			sat_dev->chan_started = false;
 
-			MHI_SAT_LOG(
-				"RESET CHANNEL %d CMD processing successful\n",
-				id);
+			MHI_SAT_LOG("RESET CHANNEL %d CMD successful\n", id);
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(
@@ -788,8 +787,10 @@ static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 
 	/* find controller packet was sent for */
 	sat_cntrl = find_sat_cntrl_by_id(subsys, hdr->dev_id);
-
-	MHI_SAT_ASSERT(!sat_cntrl, "Packet for unknown device!\n");
+	if (!sat_cntrl) {
+		MHI_SAT_ERR("Message for unknown device!\n");
+		return 0;
+	}
 
 	/* handle events directly regardless of controller active state */
 	if (hdr->msg_id == SAT_MSG_ID_EVT) {
@@ -893,6 +894,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 	if (!subsys)
 		return -EINVAL;
 
+	mutex_lock(&subsys->cntrl_mutex);
+
 	MHI_SUBSYS_LOG("Received RPMSG probe\n");
 
 	dev_set_drvdata(&rpdev->dev, subsys);
@@ -905,6 +908,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 		schedule_work(&sat_cntrl->connect_work);
 	spin_unlock_irq(&subsys->cntrl_lock);
 
+	mutex_unlock(&subsys->cntrl_mutex);
+
 	return 0;
 }
 
@@ -936,10 +941,15 @@ static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev,
 
 	MHI_SAT_LOG("Device fatal error detected\n");
 	spin_lock_irqsave(&sat_cntrl->state_lock, flags);
-	if (MHI_SAT_ACTIVE(sat_cntrl))
+	if (MHI_SAT_ACTIVE(sat_cntrl)) {
 		sat_cntrl->error_cookie = async_schedule(mhi_sat_error_worker,
 							 sat_cntrl);
-	sat_cntrl->state = SAT_FATAL_DETECT;
+		sat_cntrl->state = SAT_FATAL_DETECT;
+	} else {
+		/* rpmsg link down or HELLO not sent or an error occurred */
+		sat_cntrl->state = SAT_DISABLED;
+	}
+
 	spin_unlock_irqrestore(&sat_cntrl->state_lock, flags);
 }
 
@@ -964,7 +974,7 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
 
 	/* send sys_err if first device is removed */
 	spin_lock_irq(&sat_cntrl->state_lock);
-	if (MHI_SAT_ACTIVE(sat_cntrl) || MHI_SAT_FATAL_DETECT(sat_cntrl))
+	if (MHI_SAT_ALLOW_SYS_ERR(sat_cntrl))
 		send_sys_err = true;
 	sat_cntrl->state = SAT_ERROR;
 	spin_unlock_irq(&sat_cntrl->state_lock);
@@ -978,6 +988,13 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
 		return;
 	}
 
+	/*
+	 * cancel any pending work as it is possible that work gets queued
+	 * when rpmsg probe comes in before controller is removed
+	 */
+	cancel_work_sync(&sat_cntrl->connect_work);
+	cancel_work_sync(&sat_cntrl->process_work);
+
 	/* remove address mappings */
 	mutex_lock(&sat_cntrl->list_mutex);
 	list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) {
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index e31eaa4..7dbb4d7 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -45,6 +45,7 @@ struct uci_dev {
 	struct uci_chan ul_chan;
 	struct uci_chan dl_chan;
 	size_t mtu;
+	size_t actual_mtu; /* maximum size of incoming buffer */
 	int ref_count;
 	bool enabled;
 	u32 tiocm;
@@ -115,22 +116,24 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev)
 	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
 	int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
 	size_t mtu = uci_dev->mtu;
+	size_t actual_mtu = uci_dev->actual_mtu;
 	void *buf;
 	struct uci_buf *uci_buf;
 	int ret = -EIO, i;
 
 	for (i = 0; i < nr_trbs; i++) {
-		buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
+		buf = kmalloc(mtu, GFP_KERNEL);
 		if (!buf)
 			return -ENOMEM;
 
-		uci_buf = buf + mtu;
+		uci_buf = buf + actual_mtu;
 		uci_buf->data = buf;
 
-		MSG_VERB("Allocated buf %d of %d size %ld\n", i, nr_trbs, mtu);
+		MSG_VERB("Allocated buf %d of %d size %ld\n", i, nr_trbs,
+			 actual_mtu);
 
-		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
-					 MHI_EOT);
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf,
+					 actual_mtu, MHI_EOT);
 		if (ret) {
 			kfree(buf);
 			MSG_ERR("Failed to queue buffer %d\n", i);
@@ -155,10 +158,14 @@ static long mhi_uci_ioctl(struct file *file,
 	if (cmd == TIOCMGET) {
 		spin_lock_bh(&uci_chan->lock);
 		ret = uci_dev->tiocm;
-		uci_dev->tiocm = 0;
 		spin_unlock_bh(&uci_chan->lock);
 	} else if (uci_dev->enabled) {
 		ret = mhi_ioctl(mhi_dev, cmd, arg);
+		if (!ret) {
+			spin_lock_bh(&uci_chan->lock);
+			uci_dev->tiocm = mhi_dev->tiocm;
+			spin_unlock_bh(&uci_chan->lock);
+		}
 	}
 
 	mutex_unlock(&uci_dev->mutex);
@@ -421,8 +428,8 @@ static ssize_t mhi_uci_read(struct file *file,
 
 		if (uci_dev->enabled)
 			ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
-						 uci_buf->data, uci_dev->mtu,
-						 MHI_EOT);
+						 uci_buf->data,
+						 uci_dev->actual_mtu, MHI_EOT);
 		else
 			ret = -ERESTARTSYS;
 
@@ -609,6 +616,7 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
 	}
 
 	uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
+	uci_dev->actual_mtu = uci_dev->mtu -  sizeof(struct uci_buf);
 	mhi_device_set_devdata(mhi_dev, uci_dev);
 	uci_dev->enabled = true;
 
@@ -652,7 +660,7 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
 	}
 
 	spin_lock_irqsave(&uci_chan->lock, flags);
-	buf = mhi_result->buf_addr + uci_dev->mtu;
+	buf = mhi_result->buf_addr + uci_dev->actual_mtu;
 	buf->data = mhi_result->buf_addr;
 	buf->len = mhi_result->bytes_xferd;
 	list_add_tail(&buf->node, &uci_chan->pending);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e4fe954..e95b263 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1022,10 +1022,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
 	if (error)
 		return 0;
 
-	if (val)
-		ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
-	else
-		ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+	ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
 
 	return 0;
 }
@@ -1688,7 +1685,7 @@ static int sysc_probe(struct platform_device *pdev)
 
 	error = sysc_init_dts_quirks(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_get_clocks(ddata);
 	if (error)
@@ -1696,27 +1693,27 @@ static int sysc_probe(struct platform_device *pdev)
 
 	error = sysc_map_and_check_registers(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_init_sysc_mask(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_init_idlemodes(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_init_syss_mask(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_init_pdata(ddata);
 	if (error)
-		goto unprepare;
+		return error;
 
 	error = sysc_init_resets(ddata);
 	if (error)
-		return error;
+		goto unprepare;
 
 	pm_runtime_enable(ddata->dev);
 	error = sysc_init_module(ddata);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3a28561..684666d 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -42,6 +42,10 @@
 #include <linux/debugfs.h>
 #include <linux/pm_qos.h>
 #include <linux/stat.h>
+#include <linux/cpumask.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fastrpc.h>
 
 #define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
 #define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
@@ -49,6 +53,7 @@
 #define ADSP_MMAP_HEAP_ADDR 4
 #define ADSP_MMAP_REMOTE_HEAP_ADDR 8
 #define ADSP_MMAP_ADD_PAGES 0x1000
+#define ADSP_MMAP_ADD_PAGES_LLC  0x3000
 #define FASTRPC_DMAHANDLE_NOMAP (16)
 
 #define FASTRPC_ENOSUCH 39
@@ -99,7 +104,7 @@
 #define RH_CID ADSP_DOMAIN_ID
 
 #define PERF_KEYS \
-	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
+	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke"
 #define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1)
 #define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2)
 #define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -107,9 +112,12 @@
 #define FASTRPC_LATENCY_CTRL_ENB  (1)
 
 /* timeout in us for busy polling after early response from remote processor */
-#define FASTRPC_EARLY_TIMEOUT (4000)
+#define FASTRPC_POLL_TIME (4000)
 
-/* timeout for polling for completion signal after user early completion hint */
+/* timeout in us for polling without preempt */
+#define FASTRPC_POLL_TIME_WITHOUT_PREEMPT (500)
+
+/* timeout in us for polling completion signal after user early hint */
 #define FASTRPC_USER_EARLY_HINT_TIMEOUT (500)
 
 /* Early wake up poll completion number received from remote processor */
@@ -217,6 +225,11 @@ struct secure_vm {
 	int vmcount;
 };
 
+struct qos_cores {
+	int *coreno;
+	int corecount;
+};
+
 struct fastrpc_file;
 
 struct fastrpc_buf {
@@ -256,9 +269,9 @@ struct smq_invoke_ctx {
 	unsigned int *attrs;
 	struct fastrpc_mmap **maps;
 	struct fastrpc_buf *buf;
-	struct fastrpc_buf *lbuf;
 	size_t used;
 	struct fastrpc_file *fl;
+	uint32_t handle;
 	uint32_t sc;
 	struct overlap *overs;
 	struct overlap **overps;
@@ -268,10 +281,11 @@ struct smq_invoke_ctx {
 	uint64_t ctxid;
 	/* response flags from remote processor */
 	enum fastrpc_response_flags rspFlags;
-	/* user hint of completion time */
+	/* user hint of completion time in us */
 	uint32_t earlyWakeTime;
 	/* work done status flag */
 	bool isWorkDone;
+	bool pm_awake_voted;
 };
 
 struct fastrpc_ctx_lst {
@@ -361,6 +375,8 @@ struct fastrpc_apps {
 	bool legacy_remote_heap;
 	/* Unique job id for each message */
 	uint64_t jobid[NUM_CHANNELS];
+	struct wakeup_source *wake_source;
+	struct qos_cores silvercores;
 };
 
 struct fastrpc_mmap {
@@ -442,6 +458,8 @@ struct fastrpc_file {
 	/* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
 	int dev_minor;
 	char *debug_buf;
+	/* Flag to enable PM wake/relax voting for every remote invoke */
+	int wake_enable;
 };
 
 static struct fastrpc_apps gfa;
@@ -514,6 +532,9 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
 static int hlosvm[1] = {VMID_HLOS};
 static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
+static void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted);
+static void fastrpc_pm_relax(bool *pm_awake_voted);
+
 static inline int64_t getnstimediff(struct timespec *start)
 {
 	int64_t ns;
@@ -566,7 +587,7 @@ static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
 
 static inline int poll_on_early_response(struct smq_invoke_ctx *ctx)
 {
-	uint32_t ii = 0;
+	int ii, jj, err = -EIO;
 	uint32_t sc = ctx->sc;
 	struct smq_invoke_buf *list;
 	struct smq_phy_page *pages;
@@ -584,20 +605,27 @@ static inline int poll_on_early_response(struct smq_invoke_ctx *ctx)
 	crclist = (uint32_t *)(fdlist + M_FDLIST);
 	poll = (uint32_t *)(crclist + M_CRCLIST);
 
-	/* poll on memory for actual completion after receiving
+	/*
+	 * poll on memory for actual completion after receiving
 	 * early response from DSP. Return failure on timeout.
-	 **/
+	 */
 	preempt_disable();
-	for (ii = 0; ii < FASTRPC_EARLY_TIMEOUT; ii++) {
-		mem_barrier();
+	for (ii = 0, jj = 0; ii < FASTRPC_POLL_TIME; ii++, jj++) {
 		if (*poll == FASTRPC_EARLY_WAKEUP_POLL) {
-			preempt_enable_no_resched();
-			return 0;
+			err = 0;
+			break;
 		}
-		udelay(1); // busy wait for 1 us
+		if (jj == FASTRPC_POLL_TIME_WITHOUT_PREEMPT) {
+			/* limit preempt disable time with no rescheduling */
+			preempt_enable();
+			mem_barrier();
+			preempt_disable();
+			jj = 0;
+		}
+		udelay(1);
 	}
 	preempt_enable_no_resched();
-	return -EIO;
+	return err;
 }
 
 static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
@@ -633,6 +661,7 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
 			hyp_assign_phys(buf->phys, buf_page_size(buf->size),
 				srcVM, 2, destVM, destVMperm, 1);
 		}
+		trace_fastrpc_dma_free(fl->cid, buf->phys, buf->size);
 		dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
 					buf->phys, buf->dma_attr);
 	}
@@ -830,14 +859,17 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
 				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
 
 		if (me->dev == NULL) {
-			pr_err("failed to free remote heap allocation\n");
+			pr_err("adsprpc: %s: %s: failed to free remote heap allocation\n",
+				current->comm, __func__);
 			return;
 		}
+		trace_fastrpc_dma_free(-1, map->phys, map->size);
 		if (map->phys) {
 			dma_free_attrs(me->dev, map->size, (void *)map->va,
 			(dma_addr_t)map->phys, (unsigned long)map->attr);
 		}
 	} else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
+		trace_fastrpc_dma_unmap(fl->cid, map->phys, map->size);
 		if (!IS_ERR_OR_NULL(map->table))
 			dma_buf_unmap_attachment(map->attach, map->table,
 					DMA_BIDIRECTIONAL);
@@ -861,7 +893,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
 			hyp_assign_phys(map->phys, buf_page_size(map->size),
 				srcVM, 2, destVM, destVMperm, 1);
 		}
-
+		trace_fastrpc_dma_unmap(fl->cid, map->phys, map->size);
 		if (!IS_ERR_OR_NULL(map->table))
 			dma_buf_unmap_attachment(map->attach, map->table,
 					DMA_BIDIRECTIONAL);
@@ -918,6 +950,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 					len, (unsigned long) map->attr));
 		if (err)
 			goto bail;
+		trace_fastrpc_dma_alloc(fl->cid, (uint64_t)region_phys, len,
+			(unsigned long)map->attr, mflags);
 		map->phys = (uintptr_t)region_phys;
 		map->size = len;
 		map->va = (uintptr_t)region_vaddr;
@@ -948,10 +982,13 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 		if (err)
 			goto bail;
 		map->phys = sg_dma_address(map->table->sgl);
+		map->size = len;
+		trace_fastrpc_dma_map(fl->cid, fd, map->phys, map->size,
+			len, mflags, map->attach->dma_map_attrs);
 	} else {
 		if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
-			pr_info("adsprpc: buffer mapped with persist attr %x\n",
-				(unsigned int)map->attr);
+			pr_info("adsprpc: %s: buffer mapped with persist attr 0x%x\n",
+				__func__, (unsigned int)map->attr);
 			map->refs = 2;
 		}
 		VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
@@ -1023,6 +1060,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 		} else {
 			map->size = buf_page_size(len);
 		}
+		trace_fastrpc_dma_map(fl->cid, fd, map->phys, map->size,
+			len, mflags, map->attach->dma_map_attrs);
 
 		vmid = fl->apps->channel[fl->cid].vmid;
 		if (!sess->smmu.enabled && !vmid) {
@@ -1113,13 +1152,16 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
 		VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
 	}
 	if (err) {
-		err = -ENOMEM;
+		err = ENOMEM;
 		pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
 			current->comm, __func__, size, buf->virt);
 		goto bail;
 	}
 	if (fl->sctx->smmu.cb)
 		buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
+	trace_fastrpc_dma_alloc(fl->cid, buf->phys, size,
+		dma_attr, (int)rflags);
+
 	vmid = fl->apps->channel[fl->cid].vmid;
 	if (vmid) {
 		int srcVM[1] = {VMID_HLOS};
@@ -1308,19 +1350,21 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
 			goto bail;
 	}
 	ctx->crc = (uint32_t *)invokefd->crc;
+	ctx->handle = invoke->handle;
 	ctx->sc = invoke->sc;
 	if (bufs) {
 		VERIFY(err, 0 == context_build_overlap(ctx));
 		if (err)
 			goto bail;
 	}
-	ctx->retval = -1;
+	ctx->retval = 0xDECAF;
 	ctx->pid = current->pid;
 	ctx->tgid = fl->tgid;
 	init_completion(&ctx->work);
 	ctx->magic = FASTRPC_CTX_MAGIC;
 	ctx->rspFlags = NORMAL_RESPONSE;
 	ctx->isWorkDone = false;
+	ctx->pm_awake_voted = false;
 
 	spin_lock(&fl->hlock);
 	hlist_add_head(&ctx->hn, &clst->pending);
@@ -1342,7 +1386,8 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
 		pr_err("adsprpc: out of context memory\n");
 		goto bail;
 	}
-
+	trace_fastrpc_context_alloc((uint64_t)ctx,
+		ctx->ctxid | fl->pd, ctx->handle, ctx->sc);
 	*po = ctx;
 bail:
 	if (ctx && err)
@@ -1366,17 +1411,6 @@ static void context_free(struct smq_invoke_ctx *ctx)
 	struct fastrpc_apps *me = &gfa;
 	int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
 		    REMOTE_SCALARS_OUTBUFS(ctx->sc);
-	spin_lock(&ctx->fl->hlock);
-	hlist_del_init(&ctx->hn);
-	spin_unlock(&ctx->fl->hlock);
-	mutex_lock(&ctx->fl->map_mutex);
-	for (i = 0; i < nbufs; ++i)
-		fastrpc_mmap_free(ctx->maps[i], 0);
-	mutex_unlock(&ctx->fl->map_mutex);
-	fastrpc_buf_free(ctx->buf, 1);
-	fastrpc_buf_free(ctx->lbuf, 1);
-	ctx->magic = 0;
-	ctx->ctxid = 0;
 
 	spin_lock(&me->ctxlock);
 	for (i = 0; i < FASTRPC_CTX_MAX; i++) {
@@ -1387,12 +1421,30 @@ static void context_free(struct smq_invoke_ctx *ctx)
 	}
 	spin_unlock(&me->ctxlock);
 
+	spin_lock(&ctx->fl->hlock);
+	hlist_del_init(&ctx->hn);
+	spin_unlock(&ctx->fl->hlock);
+
+	mutex_lock(&ctx->fl->map_mutex);
+	for (i = 0; i < nbufs; ++i)
+		fastrpc_mmap_free(ctx->maps[i], 0);
+	mutex_unlock(&ctx->fl->map_mutex);
+
+	fastrpc_buf_free(ctx->buf, 1);
+	kfree(ctx->lrpra);
+	ctx->lrpra = NULL;
+	ctx->magic = 0;
+	ctx->ctxid = 0;
+
+	trace_fastrpc_context_free((uint64_t)ctx,
+		ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc);
 	kfree(ctx);
 }
 
 static void context_notify_user(struct smq_invoke_ctx *ctx,
 		int retval, uint32_t rspFlags, uint32_t earlyWakeTime)
 {
+	fastrpc_pm_awake(ctx->fl->wake_enable, &ctx->pm_awake_voted);
 	ctx->retval = retval;
 	switch (rspFlags) {
 	case NORMAL_RESPONSE:
@@ -1416,6 +1468,8 @@ static void context_notify_user(struct smq_invoke_ctx *ctx,
 		break;
 	}
 	ctx->rspFlags = (enum fastrpc_response_flags)rspFlags;
+	trace_fastrpc_context_complete(ctx->fl->cid, (uint64_t)ctx, retval,
+		ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc);
 	complete(&ctx->work);
 }
 
@@ -1427,10 +1481,16 @@ static void fastrpc_notify_users(struct fastrpc_file *me)
 	spin_lock(&me->hlock);
 	hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
 		ictx->isWorkDone = true;
+		trace_fastrpc_context_complete(me->cid, (uint64_t)ictx,
+			ictx->retval, ictx->msg.invoke.header.ctx,
+			ictx->handle, ictx->sc);
 		complete(&ictx->work);
 	}
 	hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
 		ictx->isWorkDone = true;
+		trace_fastrpc_context_complete(me->cid, (uint64_t)ictx,
+			ictx->retval, ictx->msg.invoke.header.ctx,
+			ictx->handle, ictx->sc);
 		complete(&ictx->work);
 	}
 	spin_unlock(&me->hlock);
@@ -1446,12 +1506,18 @@ static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
 	hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
 		if (ictx->msg.pid) {
 			ictx->isWorkDone = true;
+			trace_fastrpc_context_complete(me->cid, (uint64_t)ictx,
+				ictx->retval, ictx->msg.invoke.header.ctx,
+				ictx->handle, ictx->sc);
 			complete(&ictx->work);
 		}
 	}
 	hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
 		if (ictx->msg.pid) {
 			ictx->isWorkDone = true;
+			trace_fastrpc_context_complete(me->cid, (uint64_t)ictx,
+				ictx->retval, ictx->msg.invoke.header.ctx,
+				ictx->handle, ictx->sc);
 			complete(&ictx->work);
 		}
 	}
@@ -1546,7 +1612,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
 
 static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 {
-	remote_arg64_t *rpra;
+	remote_arg64_t *rpra, *lrpra;
 	remote_arg_t *lpra = ctx->lpra;
 	struct smq_invoke_buf *list;
 	struct smq_phy_page *pages, *ipage;
@@ -1569,6 +1635,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 	/* calculate size of the metadata */
 	rpra = NULL;
+	lrpra = NULL;
 	list = smq_invoke_buf_start(rpra, sc);
 	pages = smq_phy_page_start(sc, list);
 	ipage = pages;
@@ -1612,11 +1679,12 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	/* allocate new local rpra buffer */
 	lrpralen = (size_t)&list[0];
 	if (lrpralen) {
-		err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
+		lrpra = kzalloc(lrpralen, GFP_KERNEL);
+		VERIFY(err, !IS_ERR_OR_NULL(lrpra));
 		if (err)
 			goto bail;
 	}
-	ctx->lrpra = ctx->lbuf->virt;
+	ctx->lrpra = lrpra;
 
 	/* calculate len required for copying */
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
@@ -1983,6 +2051,8 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
 		goto bail;
 	}
 	err = rpmsg_send(channel_ctx->rpdev->ept, (void *)msg, sizeof(*msg));
+	trace_fastrpc_rpmsg_send(fl->cid, (uint64_t)ctx, msg->invoke.header.ctx,
+		handle, ctx->sc, msg->invoke.page.addr, msg->invoke.page.size);
 	LOG_FASTRPC_GLINK_MSG(channel_ctx->ipc_log_ctx,
 		"sent pkt %pK (sz %d): ctx 0x%llx, handle 0x%x, sc 0x%x (rpmsg err %d)",
 		(void *)msg, sizeof(*msg),
@@ -2014,7 +2084,25 @@ static void fastrpc_init(struct fastrpc_apps *me)
 	me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
 }
 
-static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
+static inline void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted)
+{
+	struct fastrpc_apps *me = &gfa;
+
+	if (!fl_wake_enable || *pm_awake_voted)
+		return;
+	__pm_stay_awake(me->wake_source);
+	*pm_awake_voted = true;
+}
+
+static inline void fastrpc_pm_relax(bool *pm_awake_voted)
+{
+	struct fastrpc_apps *me = &gfa;
+
+	if (!(*pm_awake_voted))
+		return;
+	__pm_relax(me->wake_source);
+	*pm_awake_voted = false;
+}
 
 static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx,
 						uint32_t kernel)
@@ -2023,10 +2111,9 @@ static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx,
 
 	if (kernel)
 		wait_for_completion(&ctx->work);
-	else {
-		interrupted =
-		wait_for_completion_interruptible(&ctx->work);
-	}
+	else
+		interrupted = wait_for_completion_interruptible(&ctx->work);
+
 	return interrupted;
 }
 
@@ -2034,12 +2121,21 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
 		 int *pInterrupted, uint32_t kernel)
 {
 	int interrupted = 0, err = 0;
-	uint32_t jj;
+	int jj;
 	bool wait_resp;
 	uint32_t wTimeout = FASTRPC_USER_EARLY_HINT_TIMEOUT;
-	uint32_t wakeTime = ctx->earlyWakeTime;
+	uint32_t wakeTime = 0;
 
-	while (ctx && !ctx->isWorkDone) {
+	if (!ctx) {
+		/* This failure is not expected */
+		err = *pInterrupted = EFAULT;
+		pr_err("Error %d: adsprpc: %s: %s: ctx is NULL, cannot wait for response\n",
+					err, current->comm, __func__);
+		return;
+	}
+	wakeTime = ctx->earlyWakeTime;
+
+	do {
 		switch (ctx->rspFlags) {
 		/* try polling on completion with timeout */
 		case USER_EARLY_SIGNAL:
@@ -2070,11 +2166,11 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
 
 			/* Mark job done if poll on memory successful */
 			/* Wait for completion if poll on memory timoeut */
-			if (!err)
+			if (!err) {
 				ctx->isWorkDone = true;
-			else if (!ctx->isWorkDone) {
-				pr_info("poll timeout ctxid 0x%llx\n",
-					 ctx->ctxid);
+			} else if (!ctx->isWorkDone) {
+				pr_info("adsprpc: %s: %s: poll timeout for handle 0x%x, sc 0x%x\n",
+				__func__, current->comm, ctx->handle, ctx->sc);
 				interrupted = fastrpc_wait_for_response(ctx,
 									kernel);
 				*pInterrupted = interrupted;
@@ -2092,11 +2188,12 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
 			break;
 
 		default:
-			pr_err("adsprpc: unsupported response flags 0x%x\n",
-				 ctx->rspFlags);
+			*pInterrupted = EBADR;
+			pr_err("Error: adsprpc: %s: unsupported response flags 0x%x for handle 0x%x, sc 0x%x\n",
+			current->comm, ctx->rspFlags, ctx->handle, ctx->sc);
 			return;
 		} /* end of switch */
-	} /* end of while loop */
+	} while (!ctx->isWorkDone);
 }
 
 static void fastrpc_update_invoke_count(uint32_t handle, int64_t *perf_counter,
@@ -2123,12 +2220,12 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 {
 	struct smq_invoke_ctx *ctx = NULL;
 	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
-	int cid = fl->cid;
-	int interrupted = 0;
-	int err = 0;
+	int err = 0, interrupted = 0, cid = fl->cid;
 	struct timespec invoket = {0};
 	int64_t *perf_counter = NULL;
+	bool pm_awake_voted = false;
 
+	fastrpc_pm_awake(fl->wake_enable, &pm_awake_voted);
 	if (fl->profile) {
 		perf_counter = getperfcounter(fl, PERF_COUNT);
 		getnstimeofday(&invoket);
@@ -2146,25 +2243,28 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 		}
 	}
 
-	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS && fl->sctx != NULL);
+	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS && fl->sctx != NULL);
 	if (err) {
-		pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+		pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
 			__func__, current->comm);
-		err = -EBADR;
+		err = EBADR;
 		goto bail;
 	}
 
 	if (!kernel) {
-		VERIFY(err, 0 == context_restore_interrupted(fl, inv,
-								&ctx));
+		err = context_restore_interrupted(fl, inv, &ctx);
 		if (err)
 			goto bail;
 		if (fl->sctx->smmu.faults)
 			err = FASTRPC_ENOSUCH;
 		if (err)
 			goto bail;
-		if (ctx)
+		if (ctx) {
+			trace_fastrpc_context_restore(cid, (uint64_t)ctx,
+				ctx->msg.invoke.header.ctx,
+				ctx->handle, ctx->sc);
 			goto wait;
+		}
 	}
 
 	VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
@@ -2194,14 +2294,19 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (err)
 		goto bail;
  wait:
+	fastrpc_pm_relax(&pm_awake_voted);
 	fastrpc_wait_for_completion(ctx, &interrupted, kernel);
+	pm_awake_voted = ctx->pm_awake_voted;
 	VERIFY(err, 0 == (err = interrupted));
 	if (err)
 		goto bail;
 
-	VERIFY(err, ctx->isWorkDone);
-	if (err)
+	if (!ctx->isWorkDone) {
+		err = EPROTO;
+		pr_err("Error: adsprpc: %s: %s: WorkDone state is invalid for handle 0x%x, sc 0x%x\n",
+			__func__, current->comm, invoke->handle, ctx->sc);
 		goto bail;
+	}
 
 	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
 	inv_args(ctx);
@@ -2217,8 +2322,11 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (err)
 		goto bail;
  bail:
-	if (ctx && interrupted == -ERESTARTSYS)
+	if (ctx && interrupted == -ERESTARTSYS) {
+		trace_fastrpc_context_interrupt(cid, (uint64_t)ctx,
+			ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc);
 		context_save_interrupted(ctx);
+	}
 	else if (ctx)
 		context_free(ctx);
 	if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
@@ -2227,6 +2335,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (fl->profile && !interrupted)
 		fastrpc_update_invoke_count(invoke->handle, perf_counter,
 						&invoket);
+	fastrpc_pm_relax(&pm_awake_voted);
 	return err;
 }
 
@@ -2576,10 +2685,11 @@ static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl)
 }
 
 static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
-				uint32_t *dsp_attr, uint32_t dsp_attr_size,
+				uint32_t *dsp_attr_buf,
+				uint32_t dsp_attr_buf_len,
 				uint32_t domain)
 {
-	int err = 0, dsp_cap_buff_size, dsp_support = 0;
+	int err = 0, dsp_support = 0;
 	struct fastrpc_ioctl_invoke_crc ioctl;
 	remote_arg_t ra[2];
 	struct kstat sb;
@@ -2605,7 +2715,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
 		dsp_support = 0;
 		break;
 	}
-	dsp_attr[0] = dsp_support;
+	dsp_attr_buf[0] = dsp_support;
 
 	if (dsp_support == 0)
 		goto bail;
@@ -2614,11 +2724,10 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
 	if (err)
 		goto bail;
 
-	dsp_cap_buff_size = dsp_attr_size - sizeof(uint32_t);
-	ra[0].buf.pv = (void *)&dsp_cap_buff_size;
-	ra[0].buf.len = sizeof(dsp_cap_buff_size);
-	ra[1].buf.pv = (void *)(&dsp_attr[1]);
-	ra[1].buf.len = dsp_cap_buff_size * sizeof(uint32_t);
+	ra[0].buf.pv = (void *)&dsp_attr_buf_len;
+	ra[0].buf.len = sizeof(dsp_attr_buf_len);
+	ra[1].buf.pv = (void *)(&dsp_attr_buf[1]);
+	ra[1].buf.len = dsp_attr_buf_len * sizeof(uint32_t);
 	ioctl.inv.handle = FASTRPC_STATIC_HANDLE_DSP_UTILITIES;
 	ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 1);
 	ioctl.inv.pra = ra;
@@ -2650,7 +2759,7 @@ static int fastrpc_get_info_from_kernel(
 		 * and cache on kernel
 		 */
 		err = fastrpc_get_info_from_dsp(fl, dsp_cap->dsp_attributes,
-				sizeof(dsp_cap->dsp_attributes),
+				FASTRPC_MAX_DSP_ATTRIBUTES - 1,
 				domain);
 		if (err)
 			goto bail;
@@ -2955,6 +3064,11 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
 	VERIFY(err, cid == fl->cid);
 	if (err)
 		goto bail;
+	if (!me->channel[fl->cid].spd[session].ispdup &&
+		me->channel[fl->cid].spd[session].pdrhandle) {
+		err = -ENOTCONN;
+		goto bail;
+	}
 	if (me->channel[fl->cid].spd[session].pdrcount !=
 		me->channel[fl->cid].spd[session].prevpdrcount) {
 		err = fastrpc_mmap_remove_ssr(fl);
@@ -2964,20 +3078,10 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
 		me->channel[fl->cid].spd[session].prevpdrcount =
 				me->channel[fl->cid].spd[session].pdrcount;
 	}
-	if (!me->channel[fl->cid].spd[session].ispdup &&
-		me->channel[fl->cid].spd[session].pdrhandle) {
-		err = -ENOTCONN;
-		goto bail;
-	}
 bail:
 	return err;
 }
 
-static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
-			     size_t len, struct fastrpc_mmap **ppmap);
-
-static void fastrpc_mmap_add(struct fastrpc_mmap *map);
-
 static inline void get_fastrpc_ioctl_mmap_64(
 			struct fastrpc_ioctl_mmap_64 *mmap64,
 			struct fastrpc_ioctl_mmap *immap)
@@ -3015,14 +3119,15 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			 __func__, current->comm);
-		err = -EBADR;
+		err = EBADR;
 		goto bail;
 	}
 	mutex_lock(&fl->internal_map_mutex);
 
 	spin_lock(&fl->hlock);
 	hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
-		if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
+		if (rbuf->raddr && ((rbuf->flags == ADSP_MMAP_ADD_PAGES) ||
+				    (rbuf->flags == ADSP_MMAP_ADD_PAGES_LLC))) {
 			if ((rbuf->raddr == ud->vaddrout) &&
 				(rbuf->size == ud->size)) {
 				free = rbuf;
@@ -3077,7 +3182,7 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			__func__, current->comm);
-		err = -EBADR;
+		err = EBADR;
 		goto bail;
 	}
 	mutex_lock(&fl->map_mutex);
@@ -3110,13 +3215,14 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n",
 			__func__, current->comm);
-		err = -EBADR;
+		err = EBADR;
 		goto bail;
 	}
 	mutex_lock(&fl->internal_map_mutex);
-	if (ud->flags == ADSP_MMAP_ADD_PAGES) {
+	if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
+	    (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)) {
 		if (ud->vaddrin) {
-			err = -EINVAL;
+			err = EINVAL;
 			pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
 					current->comm, __func__);
 			goto bail;
@@ -3125,6 +3231,8 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 					DMA_ATTR_DELAYED_UNMAP |
 					DMA_ATTR_NO_KERNEL_MAPPING |
 					DMA_ATTR_FORCE_NON_COHERENT;
+		if (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)
+			dma_attr |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
 		err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
 								1, &rbuf);
 		if (err)
@@ -3187,9 +3295,12 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
 				break;
 			}
 		}
-		VERIFY(err, idx < chan->sesscount);
-		if (err)
+		if (idx >= chan->sesscount) {
+			err = EUSERS;
+			pr_err("adsprpc: ERROR %d: %s: max concurrent sessions limit (%d) already reached on %s\n",
+				err, __func__, chan->sesscount, chan->subsys);
 			goto bail;
+		}
 		chan->session[idx].smmu.faults = 0;
 	} else {
 		VERIFY(err, me->dev != NULL);
@@ -3296,27 +3407,31 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 	struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)data;
 	struct smq_invoke_rspv2 *rspv2 = NULL;
 	struct fastrpc_apps *me = &gfa;
-	uint32_t index, flags = 0, earlyWakeTime = 0;
-	int err = 0;
+	uint32_t index, rspFlags = 0, earlyWakeTime = 0;
+	int err = 0, cid = -1;
 
 	VERIFY(err, (rsp && len >= sizeof(*rsp)));
 	if (err)
 		goto bail;
 
-#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
-	int cid = -1;
-
-	cid = get_cid_from_rpdev(rpdev);
-	if (cid >= 0 && cid < NUM_CHANNELS) {
-		LOG_FASTRPC_GLINK_MSG(gcinfo[cid].ipc_log_ctx,
-			"recvd pkt %pK (sz %d): ctx 0x%llx, retVal %d",
-			data, len, rsp->ctx, rsp->retval);
-	}
-#endif
-
 	if (len >= sizeof(struct smq_invoke_rspv2))
 		rspv2 = (struct smq_invoke_rspv2 *)data;
 
+	if (rspv2) {
+		earlyWakeTime = rspv2->earlyWakeTime;
+		rspFlags = rspv2->flags;
+	}
+	cid = get_cid_from_rpdev(rpdev);
+	trace_fastrpc_rpmsg_response(cid, rsp->ctx,
+		rsp->retval, rspFlags, earlyWakeTime);
+#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
+	if (cid >= 0 && cid < NUM_CHANNELS) {
+		LOG_FASTRPC_GLINK_MSG(gcinfo[cid].ipc_log_ctx,
+		"recvd pkt %pK (sz %d): ctx 0x%llx, retVal %d, flags %u, earlyWake %u",
+		data, len, rsp->ctx, rsp->retval, rspFlags, earlyWakeTime);
+	}
+#endif
+
 	index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
 	VERIFY(err, index < FASTRPC_CTX_MAX);
 	if (err)
@@ -3335,11 +3450,9 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 		VERIFY(err, rspv2->version == FASTRPC_RSP_VERSION2);
 		if (err)
 			goto bail;
-		flags = rspv2->flags;
-		earlyWakeTime = rspv2->earlyWakeTime;
 	}
 	context_notify_user(me->ctxtable[index], rsp->retval,
-				 flags, earlyWakeTime);
+				 rspFlags, earlyWakeTime);
 bail:
 	if (err)
 		pr_err("adsprpc: ERROR: %s: invalid response (data %pK, len %d) from remote subsystem (err %d)\n",
@@ -3687,9 +3800,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
 
 	VERIFY(err, fl && fl->sctx && fl->cid >= 0 && fl->cid < NUM_CHANNELS);
 	if (err) {
-		pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+		pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
 			__func__, current->comm);
-		err = -EBADR;
+		err = EBADR;
 		return err;
 	}
 	cid = fl->cid;
@@ -3829,8 +3942,8 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
 		fl->cid = cid;
 		fl->ssrcount = fl->apps->channel[cid].ssrcount;
 		mutex_lock(&fl->apps->channel[cid].smd_mutex);
-		VERIFY(err, 0 == (err = fastrpc_session_alloc_locked(
-				&fl->apps->channel[cid], 0, &fl->sctx)));
+		err = fastrpc_session_alloc_locked(&fl->apps->channel[cid],
+				0, &fl->sctx);
 		mutex_unlock(&fl->apps->channel[cid].smd_mutex);
 		if (err)
 			goto bail;
@@ -3848,6 +3961,9 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
 {
 	int err = 0;
 	unsigned int latency;
+	cpumask_t mask;
+	struct fastrpc_apps *me = &gfa;
+	u32 len = me->silvercores.corecount, i = 0;
 
 	VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
 	if (err)
@@ -3863,6 +3979,12 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
 		VERIFY(err, latency != 0);
 		if (err)
 			goto bail;
+		cpumask_clear(&mask);
+		for (i = 0; i < len; i++)
+			cpumask_set_cpu(me->silvercores.coreno[i], &mask);
+		fl->pm_qos_req.type = PM_QOS_REQ_AFFINE_CORES;
+		cpumask_copy(&fl->pm_qos_req.cpus_affine, &mask);
+
 		if (!fl->qos_request) {
 			pm_qos_add_request(&fl->pm_qos_req,
 				PM_QOS_CPU_DMA_LATENCY, latency);
@@ -3876,8 +3998,11 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
 	case FASTRPC_CONTROL_KALLOC:
 		cp->kalloc.kalloc_support = 1;
 		break;
+	case FASTRPC_CONTROL_WAKELOCK:
+		fl->wake_enable = cp->wp.enable;
+		break;
 	default:
-		err = -ENOTTY;
+		err = -EBADRQC;
 		break;
 	}
 bail:
@@ -3936,7 +4061,7 @@ static int fastrpc_getperf(struct fastrpc_ioctl_perf *ioctl_perf,
 				param, sizeof(*ioctl_perf));
 	if (err)
 		goto bail;
-	ioctl_perf->numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
+	ioctl_perf->numkeys = PERF_KEY_MAX;
 	if (ioctl_perf->keys) {
 		char *keys = PERF_KEYS;
 
@@ -3991,6 +4116,7 @@ static int fastrpc_control(struct fastrpc_ioctl_control *cp,
 bail:
 	return err;
 }
+
 static int fastrpc_get_dsp_info(struct fastrpc_ioctl_dsp_capabilities *dsp_cap,
 				void *param, struct fastrpc_file *fl)
 {
@@ -4200,7 +4326,7 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
 	ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
 	cid = ctx - &me->channel[0];
 	if (code == SUBSYS_BEFORE_SHUTDOWN) {
-		pr_debug("adsprpc: %s: %s subsystem is restarting\n",
+		pr_info("adsprpc: %s: %s subsystem is restarting\n",
 			__func__, gcinfo[cid].subsys);
 		mutex_lock(&me->channel[cid].smd_mutex);
 		ctx->ssrcount++;
@@ -4215,10 +4341,10 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
 				me->channel[RH_CID].ramdumpenabled = 1;
 			}
 		}
-		pr_debug("adsprpc: %s: received RAMDUMP notification for %s\n",
+		pr_info("adsprpc: %s: received RAMDUMP notification for %s\n",
 			__func__, gcinfo[cid].subsys);
 	} else if (code == SUBSYS_AFTER_POWERUP) {
-		pr_debug("adsprpc: %s: %s subsystem is up\n",
+		pr_info("adsprpc: %s: %s subsystem is up\n",
 			__func__, gcinfo[cid].subsys);
 		ctx->issubsystemup = 1;
 	}
@@ -4235,7 +4361,7 @@ static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
 
 	spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
 	if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
-		pr_debug("adsprpc: %s: %s (%s) is down for PDR on %s\n",
+		pr_info("adsprpc: %s: %s (%s) is down for PDR on %s\n",
 			__func__, spd->spdname, spd->servloc_name,
 			gcinfo[spd->cid].subsys);
 		mutex_lock(&me->channel[spd->cid].smd_mutex);
@@ -4253,11 +4379,11 @@ static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
 				me->channel[RH_CID].ramdumpenabled = 1;
 			}
 		}
-		pr_debug("adsprpc: %s: received %s RAMDUMP notification for %s (%s)\n",
+		pr_info("adsprpc: %s: received %s RAMDUMP notification for %s (%s)\n",
 			__func__, gcinfo[spd->cid].subsys,
 			spd->spdname, spd->servloc_name);
 	} else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
-		pr_debug("adsprpc: %s: %s (%s) is up on %s\n",
+		pr_info("adsprpc: %s: %s (%s) is up on %s\n",
 			__func__, spd->spdname, spd->servloc_name,
 			gcinfo[spd->cid].subsys);
 		spd->ispdup = 1;
@@ -4323,12 +4449,12 @@ static int fastrpc_get_service_location_notify(struct notifier_block *nb,
 	}
 
 	if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
-		pr_debug("adsprpc: %s: %s (%s) PDR service for %s is up\n",
+		pr_info("adsprpc: %s: %s (%s) PDR service for %s is up\n",
 			__func__, spd->servloc_name, pdr->domain_list[i].name,
 			gcinfo[spd->cid].subsys);
 		spd->ispdup = 1;
 	} else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
-		pr_debug("adsprpc: %s: %s (%s) PDR service for %s is uninitialized\n",
+		pr_info("adsprpc: %s: %s (%s) PDR service for %s is uninitialized\n",
 			__func__, spd->servloc_name, pdr->domain_list[i].name,
 			gcinfo[spd->cid].subsys);
 	}
@@ -4470,6 +4596,39 @@ static void init_secure_vmid_list(struct device *dev, char *prop_name,
 	}
 }
 
+static void init_qos_cores_list(struct device *dev, char *prop_name,
+						struct qos_cores *silvercores)
+{
+	int err = 0;
+	u32 len = 0, i = 0;
+	u32 *coreslist = NULL;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		goto bail;
+	if (len == 0)
+		goto bail;
+	len /= sizeof(u32);
+	VERIFY(err, NULL != (coreslist = kcalloc(len, sizeof(u32),
+						 GFP_KERNEL)));
+	if (err)
+		goto bail;
+	for (i = 0; i < len; i++) {
+		err = of_property_read_u32_index(dev->of_node, prop_name, i,
+								&coreslist[i]);
+		if (err) {
+			pr_err("adsprpc: %s: failed to read QOS cores list\n",
+								 __func__);
+			goto bail;
+		}
+	}
+	silvercores->coreno = coreslist;
+	silvercores->corecount = len;
+bail:
+	if (err) {
+		kfree(coreslist);
+	}
+}
+
 static void configure_secure_channels(uint32_t secure_domains)
 {
 	struct fastrpc_apps *me = &gfa;
@@ -4507,6 +4666,8 @@ static int fastrpc_probe(struct platform_device *pdev)
 					"qcom,msm-fastrpc-compute")) {
 		init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
 							&gcinfo[0].rhvm);
+		init_qos_cores_list(dev, "qcom,qos-cores",
+							&me->silvercores);
 
 
 		of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
@@ -4587,7 +4748,7 @@ static int fastrpc_probe(struct platform_device *pdev)
 				__func__, ret, AUDIO_PDR_ADSP_SERVICE_NAME,
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
 		else
-			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+			pr_info("adsprpc: %s: service location enabled for %s (%s)\n",
 				__func__, AUDIO_PDR_ADSP_SERVICE_NAME,
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
 	}
@@ -4608,7 +4769,7 @@ static int fastrpc_probe(struct platform_device *pdev)
 				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
 				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME);
 		else
-			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+			pr_info("adsprpc: %s: service location enabled for %s (%s)\n",
 				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
 				SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME);
 	}
@@ -4629,7 +4790,7 @@ static int fastrpc_probe(struct platform_device *pdev)
 				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
 				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME);
 		else
-			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+			pr_info("adsprpc: %s: service location enabled for %s (%s)\n",
 				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
 				SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME);
 	}
@@ -4766,11 +4927,19 @@ static int __init fastrpc_device_init(void)
 
 	err = register_rpmsg_driver(&fastrpc_rpmsg_client);
 	if (err) {
-		pr_err("adsprpc: register_rpmsg_driver: failed with err %d\n",
-			err);
+		pr_err("adsprpc: %s: register_rpmsg_driver failed with err %d\n",
+			__func__, err);
 		goto device_create_bail;
 	}
 	me->rpmsg_register = 1;
+
+	me->wake_source = wakeup_source_register("adsprpc");
+	VERIFY(err, !IS_ERR_OR_NULL(me->wake_source));
+	if (err) {
+		pr_err("adsprpc: Error: %s: wakeup_source_register failed with err %d\n",
+					__func__, PTR_ERR(me->wake_source));
+		goto device_create_bail;
+	}
 	return 0;
 device_create_bail:
 	for (i = 0; i < NUM_CHANNELS; i++) {
@@ -4821,6 +4990,8 @@ static void __exit fastrpc_device_exit(void)
 	unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
 	if (me->rpmsg_register == 1)
 		unregister_rpmsg_driver(&fastrpc_rpmsg_client);
+	if (me->wake_source)
+		wakeup_source_unregister(me->wake_source);
 	debugfs_remove_recursive(debugfs_root);
 }
 
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 571f585..c1e5af9 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -234,23 +234,32 @@ struct fastrpc_ioctl_perf {			/* kernel performance data */
 	uintptr_t keys;
 };
 
-#define FASTRPC_CONTROL_LATENCY	(1)
+enum fastrpc_control_type {
+	FASTRPC_CONTROL_LATENCY		=	1,
+	FASTRPC_CONTROL_SMMU		=	2,
+	FASTRPC_CONTROL_KALLOC		=	3,
+	FASTRPC_CONTROL_WAKELOCK	=	4,
+};
+
 struct fastrpc_ctrl_latency {
 	uint32_t enable;	/* latency control enable */
 	uint32_t latency;	/* latency request in us */
 };
 
-#define FASTRPC_CONTROL_KALLOC	(3)
 struct fastrpc_ctrl_kalloc {
 	uint32_t kalloc_support;  /* Remote memory allocation from kernel */
 };
 
-/* FASTRPC_CONTROL value 2 is reserved in user space */
+struct fastrpc_ctrl_wakelock {
+	uint32_t enable;	/* wakelock control enable */
+};
+
 struct fastrpc_ioctl_control {
 	uint32_t req;
 	union {
 		struct fastrpc_ctrl_latency lp;
 		struct fastrpc_ctrl_kalloc kalloc;
+		struct fastrpc_ctrl_wakelock wp;
 	};
 };
 
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 66d7bd7..dba4802 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -19,6 +19,7 @@
 
 #define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
 
+#define MAX_USERSPACE_BUF_SIZ	100000
 struct diag_mask_info msg_mask;
 struct diag_mask_info msg_bt_mask;
 struct diag_mask_info log_mask;
@@ -56,6 +57,9 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = {
 	{ .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }
 };
 
+static int diag_save_user_msg_mask(struct diag_md_session_t *info);
+static int diag_save_user_log_mask(struct diag_md_session_t *info);
+
 static int __diag_multisim_mask_init(struct diag_mask_info *mask_info,
 	int mask_len, int subid_index);
 
@@ -222,6 +226,7 @@ static void diag_send_log_mask_update(uint8_t peripheral,
 			pr_debug("diag: In %s, invalid log_mask status\n",
 				__func__);
 			mutex_unlock(&mask->lock);
+			mutex_unlock(&mask_info->lock);
 			return;
 		}
 		if (sub_index >= 0 && preset_id > 0)
@@ -1064,7 +1069,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
 	uint8_t msg_mask_tbl_count = 0;
 	struct diag_ssid_range_t ssid_range;
 	struct diag_multisim_masks *ms_ptr = NULL;
-	int preset = 0;
+	int preset = 0, ret_val = 0;
 
 	mutex_lock(&driver->md_session_lock);
 	info = diag_md_session_get_pid(pid);
@@ -1187,8 +1192,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
 	mutex_unlock(&driver->msg_mask_lock);
 	mutex_unlock(&mask_info->lock);
 	mutex_unlock(&driver->md_session_lock);
-	if (diag_check_update(APPS_DATA, pid))
-		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
 	/*
 	 * Apps processor must send the response to this command. Frame the
 	 * response.
@@ -1212,11 +1216,23 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
 		goto end;
 	if (mask_size + write_len > dest_len)
 		mask_size = dest_len - write_len;
-	memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+	if (mask_size && src_len >= header_len + mask_size)
+		memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
 	write_len += mask_size;
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (i == APPS_DATA)
+		if (i == APPS_DATA) {
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
+			ret_val = diag_save_user_msg_mask(info);
+			if (ret_val < 0)
+				pr_err("diag: unable to save msg mask to update userspace clients err:%d\n",
+					ret_val);
+			mutex_unlock(&driver->md_session_lock);
+			if (diag_check_update(APPS_DATA, pid))
+				diag_update_userspace_clients(MSG_MASKS_TYPE);
 			continue;
+		}
 		if (!diag_check_update(i, pid))
 			continue;
 		if (i > NUM_PERIPHERALS)
@@ -1257,7 +1273,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
 	uint8_t msg_mask_tbl_count = 0;
 	uint32_t rt_mask;
 	struct diag_multisim_masks *ms_ptr = NULL;
-	int preset = 0;
+	int preset = 0, ret_val = 0;
 
 	mutex_lock(&driver->md_session_lock);
 	info = diag_md_session_get_pid(pid);
@@ -1326,8 +1342,6 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
 	mutex_unlock(&driver->msg_mask_lock);
 	mutex_unlock(&mask_info->lock);
 	mutex_unlock(&driver->md_session_lock);
-	if (diag_check_update(APPS_DATA, pid))
-		diag_update_userspace_clients(MSG_MASKS_TYPE);
 
 	/*
 	 * Apps processor must send the response to this command. Frame the
@@ -1348,8 +1362,19 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
 		write_len += header_len;
 	}
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (i == APPS_DATA)
+		if (i == APPS_DATA) {
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
+			ret_val = diag_save_user_msg_mask(info);
+			if (ret_val)
+				pr_err("diag: unable to save msg mask to update userspace clients err:%d\n",
+					ret_val);
+			mutex_unlock(&driver->md_session_lock);
+			if (diag_check_update(APPS_DATA, pid))
+				diag_update_userspace_clients(MSG_MASKS_TYPE);
 			continue;
+		}
 		if (!diag_check_update(i, pid))
 			continue;
 		if (i > NUM_PERIPHERALS)
@@ -1889,7 +1914,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
 	int i, peripheral, write_len = 0;
 	int status = LOG_STATUS_SUCCESS;
 	int sub_index = INVALID_INDEX, read_len = 0, payload_len = 0;
-	int rsp_header_len = 0, preset = 0;
+	int rsp_header_len = 0, preset = 0, ret_val = 0;
 	uint32_t mask_size = 0;
 	struct diag_log_config_req_t *req;
 	struct diag_log_config_set_rsp_t rsp;
@@ -2031,8 +2056,6 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
 	}
 	mutex_unlock(&mask_info->lock);
 	mutex_unlock(&driver->md_session_lock);
-	if (diag_check_update(APPS_DATA, pid))
-		diag_update_userspace_clients(LOG_MASKS_TYPE);
 
 	/*
 	 * Apps processor must send the response to this command. Frame the
@@ -2077,8 +2100,19 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
 	write_len += payload_len;
 
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (i == APPS_DATA)
+		if (i == APPS_DATA) {
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
+			ret_val = diag_save_user_log_mask(info);
+			if (ret_val < 0)
+				pr_err("diag: unable to save log mask to update userspace clients err:%d\n",
+					ret_val);
+			mutex_unlock(&driver->md_session_lock);
+			if (diag_check_update(APPS_DATA, pid))
+				diag_update_userspace_clients(LOG_MASKS_TYPE);
 			continue;
+		}
 		if (!diag_check_update(i, pid))
 			continue;
 		if (i > NUM_PERIPHERALS)
@@ -2108,7 +2142,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
 	struct diag_log_config_rsp_sub_t rsp;
 	struct diag_log_config_rsp_sub_t *req;
 	int write_len = 0, i, peripheral;
-	int preset = 0, sub_index = INVALID_INDEX;
+	int preset = 0, sub_index = INVALID_INDEX, ret_val = 0;
 	struct diag_md_session_t *info = NULL;
 	struct diag_multisim_masks *ms_ptr = NULL;
 
@@ -2163,8 +2197,6 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
 	}
 	mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
 	mutex_unlock(&driver->md_session_lock);
-	if (diag_check_update(APPS_DATA, pid))
-		diag_update_userspace_clients(LOG_MASKS_TYPE);
 
 	/*
 	 * Apps processor must send the response to this command. Frame the
@@ -2194,8 +2226,19 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
 		preset = req->preset_id;
 	}
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
-		if (i == APPS_DATA)
+		if (i == APPS_DATA) {
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+								APPS_DATA);
+			ret_val = diag_save_user_log_mask(info);
+			if (ret_val < 0)
+				pr_err("diag: unable to save log  mask to update userspace clients err:%d\n",
+					ret_val);
+			mutex_unlock(&driver->md_session_lock);
+			if (diag_check_update(APPS_DATA, pid))
+				diag_update_userspace_clients(LOG_MASKS_TYPE);
 			continue;
+		}
 		if (!diag_check_update(i, pid))
 			continue;
 		if (i > NUM_PERIPHERALS)
@@ -2477,6 +2520,17 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
 			return -ENOMEM;
 		}
 		kmemleak_not_leak(mask_info->update_buf);
+		mask_info->update_buf_client = kzalloc(MAX_USERSPACE_BUF_SIZ,
+							GFP_KERNEL);
+		if (!mask_info->update_buf_client) {
+			kfree(mask_info->update_buf);
+			mask_info->update_buf = NULL;
+			kfree(mask_info->ptr);
+			mask_info->ptr = NULL;
+			return -ENOMEM;
+		}
+		kmemleak_not_leak(mask_info->update_buf_client);
+		mask_info->update_buf_client_len = 0;
 	}
 	return 0;
 }
@@ -2491,6 +2545,8 @@ static void __diag_mask_exit(struct diag_mask_info *mask_info)
 	mask_info->ptr = NULL;
 	kfree(mask_info->update_buf);
 	mask_info->update_buf = NULL;
+	kfree(mask_info->update_buf_client);
+	mask_info->update_buf_client = NULL;
 	mutex_unlock(&mask_info->lock);
 }
 
@@ -2626,6 +2682,7 @@ void diag_log_mask_free(struct diag_mask_info *mask_info)
 static int diag_msg_mask_init(void)
 {
 	int err = 0, i;
+	struct diag_md_session_t *session_info = NULL;
 
 	mutex_init(&msg_mask.lock);
 	err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
@@ -2643,6 +2700,14 @@ static int diag_msg_mask_init(void)
 		driver->max_ssid_count[i] = 0;
 	mutex_unlock(&driver->msg_mask_lock);
 
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+							APPS_DATA);
+	err = diag_save_user_msg_mask(session_info);
+	if (err < 0)
+		pr_err("diag: unable to save msg mask to update userspace clients err:%d\n",
+			err);
+	mutex_unlock(&driver->md_session_lock);
 	return 0;
 }
 
@@ -2830,6 +2895,8 @@ static void diag_msg_mask_exit(void)
 		ms_ptr = ms_ptr->next;
 	}
 	msg_mask.ms_ptr = NULL;
+	kfree(msg_mask.update_buf_client);
+	msg_mask.update_buf_client = NULL;
 	mutex_unlock(&driver->msg_mask_lock);
 }
 
@@ -2871,6 +2938,7 @@ static void diag_build_time_mask_exit(void)
 static int diag_log_mask_init(void)
 {
 	int err = 0, i;
+	struct diag_md_session_t *session_info = NULL;
 
 	mutex_init(&log_mask.lock);
 	err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
@@ -2883,7 +2951,14 @@ static int diag_log_mask_init(void)
 
 	for (i = 0; i < NUM_PERIPHERALS; i++)
 		driver->num_equip_id[i] = 0;
-
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
+							APPS_DATA);
+	err = diag_save_user_log_mask(session_info);
+	if (err < 0)
+		pr_err("diag: unable to save log  mask to update userspace clients err:%d\n",
+			err);
+	mutex_unlock(&driver->md_session_lock);
 	return 0;
 }
 
@@ -2916,6 +2991,8 @@ static void diag_log_mask_exit(void)
 		ms_ptr = ms_ptr->next;
 	}
 	log_mask.ms_ptr = NULL;
+	kfree(log_mask.update_buf_client);
+	log_mask.update_buf_client = NULL;
 }
 
 static int diag_event_mask_init(void)
@@ -3023,6 +3100,59 @@ static void diag_event_mask_exit(void)
 int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
 			       struct diag_md_session_t *info)
 {
+	struct diag_mask_info *mask_info = NULL;
+	int err = 0;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf_client) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf_client: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf_client);
+		return -EINVAL;
+	}
+
+	err = copy_to_user(buf, mask_info->update_buf_client,
+				mask_info->update_buf_client_len);
+	if (err) {
+		pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+		       __func__, err);
+	}
+	return err ? err : mask_info->update_buf_client_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	struct diag_mask_info *mask_info = NULL;
+	int err = 0;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf_client) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf_client: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf_client);
+		return -EINVAL;
+	}
+
+	err = copy_to_user(buf, mask_info->update_buf_client,
+				mask_info->update_buf_client_len);
+	if (err) {
+		pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+		       __func__, err);
+	}
+	return err ? err : mask_info->update_buf_client_len;
+}
+
+static int diag_save_user_msg_mask(struct diag_md_session_t *info)
+{
 	int i, err = 0, len = 0;
 	int copy_len = 0, total_len = 0;
 	struct diag_msg_mask_userspace_t header;
@@ -3031,9 +3161,6 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
 	unsigned char *ptr = NULL;
 	uint8_t msg_mask_tbl_count = 0;
 
-	if (!buf || count == 0)
-		return -EINVAL;
-
 	mask_info = (!info) ? &msg_mask : info->msg_mask;
 	if (!mask_info)
 		return -EIO;
@@ -3079,27 +3206,23 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
 		len += copy_len;
 		mutex_unlock(&mask->lock);
 		/* + sizeof(int) to account for data_type already in buf */
-		if (total_len + sizeof(int) + len > count) {
-			pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
-			       __func__, total_len, count);
+		if (total_len + sizeof(int) + len > MAX_USERSPACE_BUF_SIZ) {
+			pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d,\n",
+			       __func__, total_len);
 			err = -ENOMEM;
 			break;
 		}
-		err = copy_to_user(buf + total_len, (void *)ptr, len);
-		if (err) {
-			pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
-			       __func__, err);
-			break;
-		}
+		memcpy(mask_info->update_buf_client + total_len,
+			(void *)ptr, len);
 		total_len += len;
 	}
+	mask_info->update_buf_client_len = total_len;
 	mutex_unlock(&driver->msg_mask_lock);
 	mutex_unlock(&mask_info->lock);
 	return err ? err : total_len;
 }
 
-int diag_copy_to_user_log_mask(char __user *buf, size_t count,
-			       struct diag_md_session_t *info)
+static int diag_save_user_log_mask(struct diag_md_session_t *info)
 {
 	int i, err = 0, len = 0;
 	int copy_len = 0, total_len = 0;
@@ -3108,9 +3231,6 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
 	struct diag_mask_info *mask_info = NULL;
 	unsigned char *ptr = NULL;
 
-	if (!buf || count == 0)
-		return -EINVAL;
-
 	mask_info = (!info) ? &log_mask : info->log_mask;
 	if (!mask_info)
 		return -EIO;
@@ -3150,20 +3270,17 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
 		len += copy_len;
 		mutex_unlock(&mask->lock);
 		/* + sizeof(int) to account for data_type already in buf */
-		if (total_len + sizeof(int) + len > count) {
-			pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
-			       __func__, total_len, count);
+		if (total_len + sizeof(int) + len > MAX_USERSPACE_BUF_SIZ) {
+			pr_err("diag: In %s, unable to send log masks to user space, total_len: %d\n",
+			       __func__, total_len);
 			err = -ENOMEM;
 			break;
 		}
-		err = copy_to_user(buf + total_len, (void *)ptr, len);
-		if (err) {
-			pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
-			       __func__, err);
-			break;
-		}
+		memcpy(mask_info->update_buf_client + total_len,
+			(void *)ptr, len);
 		total_len += len;
 	}
+	mask_info->update_buf_client_len = total_len;
 	mutex_unlock(&mask_info->lock);
 
 	return err ? err : total_len;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 0481222..aa77fb78 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -177,7 +177,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 {
 	int i, peripheral, pid = 0;
 	uint8_t found = 0;
-	unsigned long flags;
+	unsigned long flags, flags_sec;
 	struct diag_md_info *ch = NULL;
 	struct diag_md_session_t *session_info = NULL;
 
@@ -209,6 +209,16 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 	}
 
 	spin_lock_irqsave(&ch->lock, flags);
+	if (peripheral == APPS_DATA) {
+		spin_lock_irqsave(&driver->diagmem_lock, flags_sec);
+		if (!hdlc_data.allocated && !non_hdlc_data.allocated) {
+			spin_unlock_irqrestore(&driver->diagmem_lock,
+				flags_sec);
+			spin_unlock_irqrestore(&ch->lock, flags);
+			mutex_unlock(&driver->md_session_lock);
+			return -EINVAL;
+		}
+	}
 	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
 		if (ch->tbl[i].buf != buf)
 			continue;
@@ -220,14 +230,16 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 		ch->tbl[i].len = 0;
 		ch->tbl[i].ctx = 0;
 	}
-	spin_unlock_irqrestore(&ch->lock, flags);
 
 	if (found) {
+		if (peripheral == APPS_DATA)
+			spin_unlock_irqrestore(&driver->diagmem_lock,
+				flags_sec);
+		spin_unlock_irqrestore(&ch->lock, flags);
 		mutex_unlock(&driver->md_session_lock);
 		return -ENOMEM;
 	}
 
-	spin_lock_irqsave(&ch->lock, flags);
 	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
 		if (ch->tbl[i].len == 0) {
 			ch->tbl[i].buf = buf;
@@ -237,6 +249,8 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
 			diag_ws_on_read(DIAG_WS_MUX, len);
 		}
 	}
+	if (peripheral == APPS_DATA)
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags_sec);
 	spin_unlock_irqrestore(&ch->lock, flags);
 	mutex_unlock(&driver->md_session_lock);
 
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
index 24d33bb..1288642 100644
--- a/drivers/char/diag/diag_mux.h
+++ b/drivers/char/diag/diag_mux.h
@@ -65,4 +65,5 @@ int diag_mux_close_peripheral(int proc, uint8_t peripheral);
 int diag_mux_open_all(struct diag_logger_t *logger);
 int diag_mux_close_all(void);
 int diag_mux_switch_logging(int proc, int *new_mode, int *peripheral_mask);
+void diag_notify_md_client(uint8_t proc, uint8_t peripheral, int data);
 #endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 682c7a6..772456e 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -395,14 +395,16 @@ static void diag_usb_notifier(void *priv, unsigned int event,
 
 	switch (event) {
 	case USB_DIAG_CONNECT:
-		pr_info("diag: USB channel %s: Received Connect event\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: USB channel %s: Received Connect event\n",
 			usb_info->name);
 		diag_usb_event_add(usb_info, USB_DIAG_CONNECT);
 		queue_work(usb_info->usb_wq,
 			   &usb_info->event_work);
 		break;
 	case USB_DIAG_DISCONNECT:
-		pr_info("diag: USB channel %s: Received Disconnect event\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: USB channel %s: Received Disconnect event\n",
 			usb_info->name);
 		diag_usb_event_add(usb_info, USB_DIAG_DISCONNECT);
 		queue_work(usb_info->usb_wq,
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 6d84722..a0e664c 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -701,6 +701,8 @@ struct diag_mask_info {
 	int mask_len;
 	uint8_t *update_buf;
 	int update_buf_len;
+	uint8_t *update_buf_client;
+	int update_buf_client_len;
 	uint8_t status;
 	struct mutex lock;
 };
@@ -798,6 +800,7 @@ struct diagchar_dev {
 	unsigned int poolsize_dci;
 	unsigned int poolsize_user;
 	spinlock_t diagmem_lock;
+	wait_queue_head_t hdlc_wait_q;
 	/* Buffers for masks */
 	struct mutex diag_cntl_mutex;
 	/* Members for Sending response */
@@ -905,6 +908,17 @@ extern struct diagchar_dev *driver;
 extern int wrap_enabled;
 extern uint16_t wrap_count;
 
+struct diag_apps_data_t {
+	void *buf;
+	uint32_t len;
+	int ctxt;
+	uint8_t allocated;
+	uint8_t flushed;
+};
+
+extern struct diag_apps_data_t hdlc_data;
+extern struct diag_apps_data_t non_hdlc_data;
+
 void diag_get_timestamp(char *time_str);
 void check_drain_timer(void);
 int diag_get_remote(int remote_info);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 9d5417d..31769e7 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -133,14 +133,8 @@ static int timer_in_progress;
 static int diag_mask_clear_param = 1;
 module_param(diag_mask_clear_param, int, 0644);
 
-struct diag_apps_data_t {
-	void *buf;
-	uint32_t len;
-	int ctxt;
-};
-
-static struct diag_apps_data_t hdlc_data;
-static struct diag_apps_data_t non_hdlc_data;
+struct diag_apps_data_t hdlc_data;
+struct diag_apps_data_t non_hdlc_data;
 static struct mutex apps_data_mutex;
 
 #define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
@@ -202,14 +196,25 @@ static void diag_drain_apps_data(struct diag_apps_data_t *data)
 	if (!data || !data->buf)
 		return;
 
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
+	if (data->flushed) {
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		return;
+	}
+	data->flushed = 1;
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 	err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 			     data->ctxt);
-	spin_lock_irqsave(&driver->diagmem_lock, flags);
-	if (err)
+
+	if (err) {
+		spin_lock_irqsave(&driver->diagmem_lock, flags);
 		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
-	data->buf = NULL;
-	data->len = 0;
-	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		data->buf = NULL;
+		data->len = 0;
+		data->allocated = 0;
+		data->flushed = 0;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+	}
 }
 
 void diag_update_user_client_work_fn(struct work_struct *work)
@@ -282,6 +287,7 @@ static void diag_mempool_init(void)
 	diagmem_init(driver, POOL_TYPE_DCI);
 
 	spin_lock_init(&driver->diagmem_lock);
+	init_waitqueue_head(&driver->hdlc_wait_q);
 }
 
 static void diag_mempool_exit(void)
@@ -3087,37 +3093,42 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 	send.last = (void *)(buf + len - 1);
 	send.terminate = 1;
 
-	if (!data->buf)
+wait_for_buffer:
+	wait_event_interruptible(driver->hdlc_wait_q,
+			(data->flushed == 0));
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
+	if (data->flushed) {
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		goto wait_for_buffer;
+	}
+	if (!data->buf) {
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
 					  POOL_TYPE_HDLC);
-	if (!data->buf) {
-		ret = PKT_DROP;
-		goto fail_ret;
+		if (!data->buf) {
+			ret = PKT_DROP;
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+			goto fail_ret;
+		}
+		data->allocated = 1;
+		data->flushed = 0;
 	}
 
 	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+		data->flushed = 1;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 				     data->ctxt);
 		if (err) {
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		data->buf = NULL;
-		data->len = 0;
-		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
-					APF_DIAG_PADDING,
-					  POOL_TYPE_HDLC);
-		if (!data->buf) {
-			ret = PKT_DROP;
-			goto fail_ret;
-		}
+		goto wait_for_buffer;
 	}
 
 	enc.dest = data->buf + data->len;
 	enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
 	diag_hdlc_encode(&send, &enc);
-
 	/*
 	 * This is to check if after HDLC encoding, we are still within
 	 * the limits of aggregation buffer. If not, we write out the
@@ -3126,21 +3137,32 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 	 */
 	if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
 					       DIAG_MAX_HDLC_BUF_SIZE)) {
+		data->flushed = 1;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 				     data->ctxt);
 		if (err) {
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		data->buf = NULL;
-		data->len = 0;
+wait_for_agg_buff:
+		wait_event_interruptible(driver->hdlc_wait_q,
+			(data->flushed == 0));
+		spin_lock_irqsave(&driver->diagmem_lock, flags);
+		if (data->flushed) {
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+			goto wait_for_agg_buff;
+		}
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
 					 POOL_TYPE_HDLC);
 		if (!data->buf) {
 			ret = PKT_DROP;
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 			goto fail_ret;
 		}
+		data->allocated = 1;
+		data->flushed = 0;
 
 		enc.dest = data->buf + data->len;
 		enc.dest_last = (void *)(data->buf + data->len +
@@ -3154,23 +3176,27 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
 			DIAG_MAX_HDLC_BUF_SIZE;
 
 	if (pkt_type == DATA_TYPE_RESPONSE) {
+		data->flushed = 1;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 				     data->ctxt);
 		if (err) {
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		data->buf = NULL;
-		data->len = 0;
+		return PKT_ALLOC;
 	}
-
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 	return PKT_ALLOC;
 
 fail_free_buf:
 	spin_lock_irqsave(&driver->diagmem_lock, flags);
-	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	if (data->allocated)
+		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
 	data->buf = NULL;
 	data->len = 0;
+	data->allocated = 0;
+	data->flushed = 0;
 	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 fail_ret:
 	return ret;
@@ -3196,33 +3222,36 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
 		       __func__, buf, len);
 		return -EIO;
 	}
-
+wait_for_buffer:
+	wait_event_interruptible(driver->hdlc_wait_q,
+			(data->flushed == 0));
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
+	if (data->flushed) {
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+		goto wait_for_buffer;
+	}
 	if (!data->buf) {
 		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
 					APF_DIAG_PADDING,
 					  POOL_TYPE_HDLC);
 		if (!data->buf) {
 			ret = PKT_DROP;
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 			goto fail_ret;
 		}
+		data->allocated = 1;
+		data->flushed = 0;
 	}
-
 	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+		data->flushed = 1;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 				     data->ctxt);
 		if (err) {
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		data->buf = NULL;
-		data->len = 0;
-		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
-					APF_DIAG_PADDING,
-					  POOL_TYPE_HDLC);
-		if (!data->buf) {
-			ret = PKT_DROP;
-			goto fail_ret;
-		}
+		goto wait_for_buffer;
 	}
 
 	header.start = CONTROL_CHAR;
@@ -3235,23 +3264,27 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
 	*(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
 	data->len += sizeof(uint8_t);
 	if (pkt_type == DATA_TYPE_RESPONSE) {
+		data->flushed = 1;
+		spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 				     data->ctxt);
 		if (err) {
 			ret = -EIO;
 			goto fail_free_buf;
 		}
-		data->buf = NULL;
-		data->len = 0;
+		return PKT_ALLOC;
 	}
-
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 	return PKT_ALLOC;
 
 fail_free_buf:
 	spin_lock_irqsave(&driver->diagmem_lock, flags);
-	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	if (data->allocated)
+		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
 	data->buf = NULL;
 	data->len = 0;
+	data->allocated = 0;
+	data->flushed = 0;
 	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 fail_ret:
 	return ret;
@@ -3682,19 +3715,24 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	if (driver->data_ready[index] & MSG_MASKS_TYPE) {
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
 		mutex_lock(&driver->md_session_lock);
 		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
 								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
-			goto exit;
+			goto end;
 		}
+		if (!session_info)
+			mutex_unlock(&driver->md_session_lock);
 		write_len = diag_copy_to_user_msg_mask(buf + ret, count,
 						       session_info);
-		mutex_unlock(&driver->md_session_lock);
+		if (session_info)
+			mutex_unlock(&driver->md_session_lock);
 		if (write_len > 0)
 			ret += write_len;
+		mutex_lock(&driver->diagchar_mutex);
 		driver->data_ready[index] ^= MSG_MASKS_TYPE;
 		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
@@ -3703,13 +3741,14 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
 		mutex_lock(&driver->md_session_lock);
 		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
 								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
-			goto exit;
+			goto end;
 		}
 		if (session_info && session_info->event_mask &&
 		    session_info->event_mask->ptr) {
@@ -3718,7 +3757,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 					session_info->event_mask->mask_len);
 			if (ret == -EFAULT) {
 				mutex_unlock(&driver->md_session_lock);
-				goto exit;
+				goto end;
 			}
 		} else {
 			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
@@ -3726,10 +3765,11 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 						event_mask.mask_len);
 			if (ret == -EFAULT) {
 				mutex_unlock(&driver->md_session_lock);
-				goto exit;
+				goto end;
 			}
 		}
 		mutex_unlock(&driver->md_session_lock);
+		mutex_lock(&driver->diagchar_mutex);
 		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
 		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
@@ -3738,20 +3778,24 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	if (driver->data_ready[index] & LOG_MASKS_TYPE) {
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
 		mutex_lock(&driver->md_session_lock);
 		session_info = diag_md_session_get_peripheral(DIAG_LOCAL_PROC,
 								APPS_DATA);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT) {
 			mutex_unlock(&driver->md_session_lock);
-			goto exit;
+			goto end;
 		}
-
+		if (!session_info)
+			mutex_unlock(&driver->md_session_lock);
 		write_len = diag_copy_to_user_log_mask(buf + ret, count,
 						       session_info);
-		mutex_unlock(&driver->md_session_lock);
+		if (session_info)
+			mutex_unlock(&driver->md_session_lock);
 		if (write_len > 0)
 			ret += write_len;
+		mutex_lock(&driver->diagchar_mutex);
 		driver->data_ready[index] ^= LOG_MASKS_TYPE;
 		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
@@ -4347,8 +4391,12 @@ static int __init diagchar_init(void)
 	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, TYPE_CMD);
 	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
 	hdlc_data.len = 0;
+	hdlc_data.allocated = 0;
+	hdlc_data.flushed = 0;
 	non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
 	non_hdlc_data.len = 0;
+	non_hdlc_data.allocated = 0;
+	non_hdlc_data.flushed = 0;
 	mutex_init(&driver->hdlc_disable_mutex);
 	mutex_init(&driver->diagchar_mutex);
 	mutex_init(&driver->diag_notifier_mutex);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 950d04c..fe216df 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1044,6 +1044,7 @@ static void diag_init_apps_feature(void)
 
 	SET_APPS_FEATURE(driver, F_DIAG_EVENT_REPORT);
 	SET_APPS_FEATURE(driver, F_DIAG_HW_ACCELERATION);
+	SET_APPS_FEATURE(driver, F_DIAG_MULTI_SIM_MASK);
 }
 
 void diag_send_error_rsp(unsigned char *buf, int len,
@@ -1917,6 +1918,7 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
 	int peripheral = -1;
 	int type = -1;
 	int num = -1;
+	struct diag_apps_data_t *temp = NULL;
 
 	if (!buf || len < 0)
 		return -EINVAL;
@@ -1935,9 +1937,24 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
 			diag_ws_on_copy(DIAG_WS_MUX);
 		} else if (peripheral == APPS_DATA) {
 			spin_lock_irqsave(&driver->diagmem_lock, flags);
-			diagmem_free(driver, (unsigned char *)buf,
-				     POOL_TYPE_HDLC);
-			buf = NULL;
+			if (hdlc_data.allocated)
+				temp = &hdlc_data;
+			else if (non_hdlc_data.allocated)
+				temp = &non_hdlc_data;
+			else
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"No apps data buffer is allocated to be freed\n");
+			if (temp) {
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"Freeing Apps data buffer after write done hdlc.allocated: %d, non_hdlc.allocated: %d\n",
+				hdlc_data.allocated, non_hdlc_data.allocated);
+				diagmem_free(driver, temp->buf, POOL_TYPE_HDLC);
+				temp->buf = NULL;
+				temp->len = 0;
+				temp->allocated = 0;
+				temp->flushed = 0;
+				wake_up_interruptible(&driver->hdlc_wait_q);
+			}
 			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		} else {
 			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index d2cb9b3..4ef4776 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -86,8 +86,6 @@ static int diagfwd_bridge_mux_connect(int id, int mode)
 {
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
-	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
-		bridge_info[id].dev_ops->open(id, bridge_info[id].ctxt);
 	return 0;
 }
 
@@ -195,16 +193,21 @@ int diag_remote_dev_open(int id)
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	bridge_info[id].inited = 1;
-	if (bridge_info[id].type == DIAG_DATA_TYPE)
+	if (bridge_info[id].type == DIAG_DATA_TYPE) {
+		diag_notify_md_client(BRIDGE_TO_MUX(id), 0, DIAG_STATUS_OPEN);
 		return diag_mux_queue_read(BRIDGE_TO_MUX(id));
-	else if (bridge_info[id].type == DIAG_DCI_TYPE)
+	} else if (bridge_info[id].type == DIAG_DCI_TYPE) {
 		return diag_dci_send_handshake_pkt(bridge_info[id].id);
+	}
 
 	return 0;
 }
 
 void diag_remote_dev_close(int id)
 {
+	if (bridge_info[id].type == DIAG_DATA_TYPE)
+		diag_notify_md_client(BRIDGE_TO_MUX(id), 0, DIAG_STATUS_CLOSED);
+
 }
 
 int diag_remote_dev_read_done(int id, unsigned char *buf, int len)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 76e5601..5acb25f 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -19,11 +19,10 @@
 #include "diag_mux.h"
 
 #define FEATURE_SUPPORTED(x)	((feature_mask << (i * 8)) & (1 << x))
-
+#define DIAG_GET_MD_DEVICE_SIG_MASK(proc) (0x100000 * (1 << proc))
 /* tracks which peripheral is undergoing SSR */
 static uint16_t reg_dirty[NUM_PERIPHERALS];
 static uint8_t diag_id = DIAG_ID_APPS;
-static void diag_notify_md_client(uint8_t peripheral, int data);
 
 static void diag_mask_update_work_fn(struct work_struct *work)
 {
@@ -42,7 +41,9 @@ void diag_cntl_channel_open(struct diagfwd_info *p_info)
 		return;
 	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
 	queue_work(driver->cntl_wq, &driver->mask_update_work);
-	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+	diag_notify_md_client(DIAG_LOCAL_PROC, p_info->peripheral,
+				DIAG_STATUS_OPEN);
+
 }
 
 void diag_cntl_channel_close(struct diagfwd_info *p_info)
@@ -66,7 +67,7 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info)
 	driver->stm_state[peripheral] = DISABLE_STM;
 	driver->stm_state_requested[peripheral] = DISABLE_STM;
 	reg_dirty[peripheral] = 0;
-	diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+	diag_notify_md_client(DIAG_LOCAL_PROC, peripheral, DIAG_STATUS_CLOSED);
 }
 
 static void diag_stm_update_work_fn(struct work_struct *work)
@@ -97,9 +98,9 @@ static void diag_stm_update_work_fn(struct work_struct *work)
 	}
 }
 
-void diag_notify_md_client(uint8_t peripheral, int data)
+void diag_notify_md_client(uint8_t proc, uint8_t peripheral, int data)
 {
-	int stat = 0, proc = DIAG_LOCAL_PROC;
+	int stat = 0;
 	struct siginfo info;
 	struct pid *pid_struct;
 	struct task_struct *result;
@@ -113,7 +114,10 @@ void diag_notify_md_client(uint8_t peripheral, int data)
 	mutex_lock(&driver->md_session_lock);
 	memset(&info, 0, sizeof(struct siginfo));
 	info.si_code = SI_QUEUE;
-	info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+	info.si_int = (DIAG_GET_MD_DEVICE_SIG_MASK(proc) | data);
+	if (proc == DIAG_LOCAL_PROC)
+		info.si_int = info.si_int |
+				(PERIPHERAL_MASK(peripheral) | data);
 	info.si_signo = SIGCONT;
 
 	if (!driver->md_session_map[proc][peripheral] ||
@@ -171,7 +175,7 @@ static void process_pd_status(uint8_t *buf, uint32_t len,
 	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
 	pd = pd_msg->pd_id;
 	status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
-	diag_notify_md_client(peripheral, status);
+	diag_notify_md_client(DIAG_LOCAL_PROC, peripheral, status);
 }
 
 static void enable_stm_feature(uint8_t peripheral)
@@ -1886,11 +1890,9 @@ int diag_send_passthru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params)
 		if (!P_FMASK_DIAGID_V2(i))
 			continue;
 		err = diagfwd_write(i, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
-		if (err && err != -ENODEV) {
+		if (err && err != -ENODEV)
 			pr_err("diag: Unable to send PASSTHRU ctrl packet to peripheral %d, err: %d\n",
 				i, err);
-			return err;
-		}
 	}
 	return 0;
 }
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index 27a7d1f..39eb9c1 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -777,6 +777,9 @@ static int diag_mhi_probe(struct mhi_device *mhi_dev,
 	case MHI_DEV_ID_2:
 		dev_idx = 1;
 		break;
+	case MHI_DEV_ID_3:
+		dev_idx = 1;
+		break;
 	default:
 		return 0;
 	}
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
index 21485cc..ac04094 100644
--- a/drivers/char/diag/diagfwd_mhi.h
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -39,6 +39,7 @@
 /* Below mhi  device ids are from mhi controller */
 #define MHI_DEV_ID_1 0x306
 #define MHI_DEV_ID_2 0x1101
+#define MHI_DEV_ID_3 0x1103
 
 struct diag_mhi_buf_tbl_t {
 	struct list_head link;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5a..0ef7cb0 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
 	size_t size = min_t(size_t, 16, rng_buffer_size());
 
 	mutex_lock(&reading_mutex);
-	bytes_read = rng_get_data(rng, rng_buffer, size, 1);
+	bytes_read = rng_get_data(rng, rng_buffer, size, 0);
 	mutex_unlock(&reading_mutex);
 	if (bytes_read > 0)
 		add_device_randomness(rng_buffer, bytes_read);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 75e5006..006d765 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -221,6 +221,9 @@ struct smi_info {
 	 */
 	bool irq_enable_broken;
 
+	/* Is the driver in maintenance mode? */
+	bool in_maintenance_mode;
+
 	/*
 	 * Did we get an attention that we did not handle?
 	 */
@@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data)
 		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 		busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
 						  &busy_until);
-		if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+		if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
 			; /* do nothing */
-		else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
-			schedule();
-		else if (smi_result == SI_SM_IDLE) {
+		} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+			/*
+			 * In maintenance mode we run as fast as
+			 * possible to allow firmware updates to
+			 * complete as fast as possible, but normally
+			 * don't bang on the scheduler.
+			 */
+			if (smi_info->in_maintenance_mode)
+				schedule();
+			else
+				usleep_range(100, 200);
+		} else if (smi_result == SI_SM_IDLE) {
 			if (atomic_read(&smi_info->need_watch)) {
 				schedule_timeout_interruptible(100);
 			} else {
@@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data)
 				__set_current_state(TASK_INTERRUPTIBLE);
 				schedule();
 			}
-		} else
+		} else {
 			schedule_timeout_interruptible(1);
+		}
 	}
 	return 0;
 }
@@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
 
 	if (!enable)
 		atomic_set(&smi_info->req_events, 0);
+	smi_info->in_maintenance_mode = enable;
 }
 
 static void shutdown_smi(void *send_info);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7b4e4de..54b8649 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 }
 #endif
 
+static inline bool should_stop_iteration(void)
+{
+	if (need_resched())
+		cond_resched();
+	return fatal_signal_pending(current);
+}
+
 /*
  * This funcion reads the *physical* memory. The f_pos points directly to the
  * memory location.
@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 		p += sz;
 		count -= sz;
 		read += sz;
+		if (should_stop_iteration())
+			break;
 	}
 	kfree(bounce);
 
@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 		p += sz;
 		count -= sz;
 		written += sz;
+		if (should_stop_iteration())
+			break;
 	}
 
 	*ppos += written;
@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 			read += sz;
 			low_count -= sz;
 			count -= sz;
+			if (should_stop_iteration()) {
+				count = 0;
+				break;
+			}
 		}
 	}
 
@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 			buf += sz;
 			read += sz;
 			p += sz;
+			if (should_stop_iteration())
+				break;
 		}
 		free_page((unsigned long)kbuf);
 	}
@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 		p += sz;
 		count -= sz;
 		written += sz;
+		if (should_stop_iteration())
+			break;
 	}
 
 	*ppos += written;
@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
 			buf += sz;
 			virtr += sz;
 			p += sz;
+			if (should_stop_iteration())
+				break;
 		}
 		free_page((unsigned long)kbuf);
 	}
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index be5d1ab..8390c5b 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -33,7 +33,7 @@
 
 struct st33zp24_i2c_phy {
 	struct i2c_client *client;
-	u8 buf[TPM_BUFSIZE + 1];
+	u8 buf[ST33ZP24_BUFSIZE + 1];
 	int io_lpcpd;
 };
 
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index d7909ab..ff019a1 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -63,7 +63,7 @@
  * some latency byte before the answer is available (max 15).
  * We have 2048 + 1024 + 15.
  */
-#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
+#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
 				  MAX_SPI_LATENCY)
 
 
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index 6f4a419..20da0a8 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -18,8 +18,8 @@
 #ifndef __LOCAL_ST33ZP24_H__
 #define __LOCAL_ST33ZP24_H__
 
-#define TPM_WRITE_DIRECTION             0x80
-#define TPM_BUFSIZE                     2048
+#define TPM_WRITE_DIRECTION	0x80
+#define ST33ZP24_BUFSIZE	2048
 
 struct st33zp24_dev {
 	struct tpm_chip *chip;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 46caadc..0b01eb7 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -187,12 +187,13 @@ static int tpm_class_shutdown(struct device *dev)
 {
 	struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
 
+	down_write(&chip->ops_sem);
 	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
-		down_write(&chip->ops_sem);
 		tpm2_shutdown(chip, TPM2_SU_CLEAR);
 		chip->ops = NULL;
-		up_write(&chip->ops_sem);
 	}
+	chip->ops = NULL;
+	up_write(&chip->ops_sem);
 
 	return 0;
 }
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 83a77a4..177a60e 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 {
 	struct tpm_buf tpm_buf;
 	struct tpm_readpubek_out *out;
-	ssize_t rc;
 	int i;
 	char *str = buf;
 	struct tpm_chip *chip = to_tpm_chip(dev);
@@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 
 	memset(&anti_replay, 0, sizeof(anti_replay));
 
-	rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
-	if (rc)
-		return rc;
+	if (tpm_try_get_ops(chip))
+		return 0;
+
+	if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
+		goto out_ops;
 
 	tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
 
-	rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+	if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
 			      READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
-			      "attempting to read the PUBEK");
-	if (rc) {
-		tpm_buf_destroy(&tpm_buf);
-		return 0;
-	}
+			      "attempting to read the PUBEK"))
+		goto out_buf;
 
 	out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
 	str +=
@@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 			str += sprintf(str, "\n");
 	}
 
-	rc = str - buf;
+out_buf:
 	tpm_buf_destroy(&tpm_buf);
-	return rc;
+out_ops:
+	tpm_put_ops(chip);
+	return str - buf;
 }
 static DEVICE_ATTR_RO(pubek);
 
@@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
 	char *str = buf;
 	struct tpm_chip *chip = to_tpm_chip(dev);
 
-	rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
-			"attempting to determine the number of PCRS",
-			sizeof(cap.num_pcrs));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
 
+	if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+		       "attempting to determine the number of PCRS",
+		       sizeof(cap.num_pcrs))) {
+		tpm_put_ops(chip);
+		return 0;
+	}
+
 	num_pcrs = be32_to_cpu(cap.num_pcrs);
 	for (i = 0; i < num_pcrs; i++) {
 		rc = tpm_pcr_read_dev(chip, i, digest);
@@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
 			str += sprintf(str, "%02X ", digest[j]);
 		str += sprintf(str, "\n");
 	}
+	tpm_put_ops(chip);
 	return str - buf;
 }
 static DEVICE_ATTR_RO(pcrs);
@@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
 static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
 		     char *buf)
 {
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	ssize_t rc = 0;
 	cap_t cap;
-	ssize_t rc;
 
-	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
-			"attempting to determine the permanent enabled state",
-			sizeof(cap.perm_flags));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
 
+	if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+		       "attempting to determine the permanent enabled state",
+		       sizeof(cap.perm_flags)))
+		goto out_ops;
+
 	rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+out_ops:
+	tpm_put_ops(chip);
 	return rc;
 }
 static DEVICE_ATTR_RO(enabled);
@@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
 static ssize_t active_show(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	ssize_t rc = 0;
 	cap_t cap;
-	ssize_t rc;
 
-	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
-			"attempting to determine the permanent active state",
-			sizeof(cap.perm_flags));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
 
+	if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+		       "attempting to determine the permanent active state",
+		       sizeof(cap.perm_flags)))
+		goto out_ops;
+
 	rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+out_ops:
+	tpm_put_ops(chip);
 	return rc;
 }
 static DEVICE_ATTR_RO(active);
@@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active);
 static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
 			  char *buf)
 {
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	ssize_t rc = 0;
 	cap_t cap;
-	ssize_t rc;
 
-	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
-			"attempting to determine the owner state",
-			sizeof(cap.owned));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
 
+	if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+		       "attempting to determine the owner state",
+		       sizeof(cap.owned)))
+		goto out_ops;
+
 	rc = sprintf(buf, "%d\n", cap.owned);
+out_ops:
+	tpm_put_ops(chip);
 	return rc;
 }
 static DEVICE_ATTR_RO(owned);
@@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
 static ssize_t temp_deactivated_show(struct device *dev,
 				     struct device_attribute *attr, char *buf)
 {
+	struct tpm_chip *chip = to_tpm_chip(dev);
+	ssize_t rc = 0;
 	cap_t cap;
-	ssize_t rc;
 
-	rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
-			"attempting to determine the temporary state",
-			sizeof(cap.stclear_flags));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
 
+	if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+		       "attempting to determine the temporary state",
+		       sizeof(cap.stclear_flags)))
+		goto out_ops;
+
 	rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+out_ops:
+	tpm_put_ops(chip);
 	return rc;
 }
 static DEVICE_ATTR_RO(temp_deactivated);
@@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
 			 char *buf)
 {
 	struct tpm_chip *chip = to_tpm_chip(dev);
-	cap_t cap;
-	ssize_t rc;
+	ssize_t rc = 0;
 	char *str = buf;
+	cap_t cap;
 
-	rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
-			"attempting to determine the manufacturer",
-			sizeof(cap.manufacturer_id));
-	if (rc)
+	if (tpm_try_get_ops(chip))
 		return 0;
+
+	if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+		       "attempting to determine the manufacturer",
+		       sizeof(cap.manufacturer_id)))
+		goto out_ops;
+
 	str += sprintf(str, "Manufacturer: 0x%x\n",
 		       be32_to_cpu(cap.manufacturer_id));
 
@@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
 			       cap.tpm_version_1_2.revMinor);
 	} else {
 		/* Otherwise just use TPM_STRUCT_VER */
-		rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
-				"attempting to determine the 1.1 version",
-				sizeof(cap.tpm_version));
-		if (rc)
-			return 0;
+		if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+			       "attempting to determine the 1.1 version",
+			       sizeof(cap.tpm_version)))
+			goto out_ops;
+
 		str += sprintf(str,
 			       "TCG version: %d.%d\nFirmware version: %d.%d\n",
 			       cap.tpm_version.Major,
 			       cap.tpm_version.Minor,
 			       cap.tpm_version.revMajor,
 			       cap.tpm_version.revMinor);
-	}
-
-	return str - buf;
+}
+	rc = str - buf;
+out_ops:
+	tpm_put_ops(chip);
+	return rc;
 }
 static DEVICE_ATTR_RO(caps);
 
@@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
 	struct tpm_chip *chip = to_tpm_chip(dev);
-	if (chip == NULL)
+
+	if (tpm_try_get_ops(chip))
 		return 0;
 
 	chip->ops->cancel(chip);
+	tpm_put_ops(chip);
 	return count;
 }
 static DEVICE_ATTR_WO(cancel);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 977fd42..3b4e967 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -26,8 +26,7 @@
 #include <linux/wait.h>
 #include "tpm.h"
 
-/* max. buffer size supported by our TPM */
-#define TPM_BUFSIZE 1260
+#define TPM_I2C_INFINEON_BUFSIZE 1260
 
 /* max. number of iterations after I2C NAK */
 #define MAX_COUNT 3
@@ -63,11 +62,13 @@ enum i2c_chip_type {
 	UNKNOWN,
 };
 
-/* Structure to store I2C TPM specific stuff */
 struct tpm_inf_dev {
 	struct i2c_client *client;
 	int locality;
-	u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
+	/* In addition to the data itself, the buffer must fit the 7-bit I2C
+	 * address and the direction bit.
+	 */
+	u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
 	struct tpm_chip *chip;
 	enum i2c_chip_type chip_type;
 	unsigned int adapterlimit;
@@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
 		.buf = tpm_dev.buf
 	};
 
-	if (len > TPM_BUFSIZE)
+	if (len > TPM_I2C_INFINEON_BUFSIZE)
 		return -EINVAL;
 
 	if (!tpm_dev.client->adapter->algo->master_xfer)
@@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
 	u8 retries = 0;
 	u8 sts = TPM_STS_GO;
 
-	if (len > TPM_BUFSIZE)
-		return -E2BIG;	/* command is too long for our tpm, sorry */
+	if (len > TPM_I2C_INFINEON_BUFSIZE)
+		return -E2BIG;
 
 	if (request_locality(chip, 0) < 0)
 		return -EBUSY;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index b8defdf..2803080 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -35,14 +35,12 @@
 #include "tpm.h"
 
 /* I2C interface offsets */
-#define TPM_STS                0x00
-#define TPM_BURST_COUNT        0x01
-#define TPM_DATA_FIFO_W        0x20
-#define TPM_DATA_FIFO_R        0x40
-#define TPM_VID_DID_RID        0x60
-/* TPM command header size */
-#define TPM_HEADER_SIZE        10
-#define TPM_RETRY      5
+#define TPM_STS			0x00
+#define TPM_BURST_COUNT		0x01
+#define TPM_DATA_FIFO_W		0x20
+#define TPM_DATA_FIFO_R		0x40
+#define TPM_VID_DID_RID		0x60
+#define TPM_I2C_RETRIES		5
 /*
  * I2C bus device maximum buffer size w/o counting I2C address or command
  * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
@@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 		dev_err(dev, "%s() count < header size\n", __func__);
 		return -EIO;
 	}
-	for (retries = 0; retries < TPM_RETRY; retries++) {
+	for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
 		if (retries > 0) {
 			/* if this is not the first trial, set responseRetry */
 			i2c_nuvoton_write_status(client,
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index 61c1071..e9be34b 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
 	struct clk_hw *hw;
 
 	for (i = 0; i < hw_clks->num; i++) {
+		const char *name;
 
 		hw = hw_clks->hws[i];
-
 		if (IS_ERR_OR_NULL(hw))
 			continue;
 
+		name = hw->init->name;
 		ret = devm_clk_hw_register(dev, hw);
 		if (ret) {
 			dev_err(dev, "Couldn't register clock %d - %s\n",
-				i, hw->init->name);
+				i, name);
 			return ret;
 		}
 	}
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 3348136..1131524 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
 			continue;
 
 		div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+		if (div > GENERATED_MAX_DIV + 1)
+			div = GENERATED_MAX_DIV + 1;
 
 		clk_generated_best_diff(req, parent, parent_rate, div,
 					&best_diff, &best_rate);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c813c27..2f97a84 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -27,6 +27,10 @@
 
 #define MOR_KEY_MASK		(0xff << 16)
 
+#define clk_main_parent_select(s)	(((s) & \
+					(AT91_PMC_MOSCEN | \
+					AT91_PMC_OSCBYPASS)) ? 1 : 0)
+
 struct clk_main_osc {
 	struct clk_hw hw;
 	struct regmap *regmap;
@@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
 
 	regmap_read(regmap, AT91_PMC_SR, &status);
 
-	return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
+	return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
 }
 
 static const struct clk_ops main_osc_ops = {
@@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
 
 	regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
 
-	return status & AT91_PMC_MOSCEN ? 1 : 0;
+	return clk_main_parent_select(status);
 }
 
 static const struct clk_ops sam9x5_main_ops = {
@@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
 	clkmain->hw.init = &init;
 	clkmain->regmap = regmap;
 	regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
-	clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
+	clkmain->parent = clk_main_parent_select(status);
 
 	hw = &clkmain->hw;
 	ret = clk_hw_register(NULL, &clkmain->hw);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 3a1812f..8abc5c8 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
 		.guts_compat = "fsl,qoriq-device-config-1.0",
 		.init_periph = p5020_init_periph,
 		.cmux_groups = {
-			&p2041_cmux_grp1, &p2041_cmux_grp2
+			&p5020_cmux_grp1, &p5020_cmux_grp2
 		},
 		.cmux_to_group = {
 			0, 1, -1
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 0934d37..4080d4e 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
  * This requires of_device_id table.  In the same time this will not change the
  * actual *device* matching so do not add .of_match_table.
  */
-static const struct of_device_id s2mps11_dt_match[] = {
+static const struct of_device_id s2mps11_dt_match[] __used = {
 	{
 		.compatible = "samsung,s2mps11-clk",
 		.data = (void *)S2MPS11X,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index bd73664..cd46b1d 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -433,3 +433,11 @@
 	  BENGAL devices.
 	  Say Y if you want to support display devices and functionality such as
 	  splash screen.
+
+config SM_DEBUGCC_BENGAL
+	tristate "BENGAL Debug Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the debug clock controller on Qualcomm Technologies, Inc
+	  BENGAL devices.
+	  Say Y if you want to support the clock measurement functionality.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 3e58b7b..dbf60b9 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -53,6 +53,7 @@
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
+obj-$(CONFIG_SM_DEBUGCC_BENGAL) += debugcc-bengal.o
 obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o
 obj-$(CONFIG_SM_DISPCC_BENGAL) += dispcc-bengal.o
 obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index f15be8e..c661977 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -37,7 +37,7 @@
 	.ib = _ib,				\
 }
 
-static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM_MM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
 
 static struct msm_bus_vectors clk_debugfs_vectors[] = {
@@ -377,9 +377,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
 };
 
 static const struct alpha_pll_config cam_cc_pll3_config = {
-	.l = 0xF,
+	.l = 0x24,
 	.cal_l = 0x44,
-	.alpha = 0xA000,
+	.alpha = 0x7555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -432,9 +432,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
 };
 
 static const struct alpha_pll_config cam_cc_pll4_config = {
-	.l = 0xF,
+	.l = 0x24,
 	.cal_l = 0x44,
-	.alpha = 0xA000,
+	.alpha = 0x7555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -876,16 +876,19 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
-	F(150000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(200000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(250000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(425000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(525000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
-	F(630000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(720000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(680000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	{ }
 };
@@ -943,16 +946,19 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
-	F(150000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(200000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(250000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(300000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(425000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(525000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
-	F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(720000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src_kona_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	F(680000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
 	{ }
 };
@@ -2714,7 +2720,9 @@ static void cam_cc_kona_fixup_konav2(struct regmap *regmap)
 	cam_cc_bps_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
 	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
 	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
+	cam_cc_ife_0_clk_src.freq_tbl = ftbl_cam_cc_ife_0_clk_src_kona_v2;
 	cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
+	cam_cc_ife_1_clk_src.freq_tbl = ftbl_cam_cc_ife_1_clk_src_kona_v2;
 	cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
 	cam_cc_ife_lite_clk_src.freq_tbl = ftbl_cam_cc_ife_lite_clk_src_kona_v2;
 	cam_cc_jpeg_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 125e082..d4ff02f 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -24,7 +24,7 @@
 #include "clk-rcg.h"
 #include "clk-regmap.h"
 #include "common.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
@@ -324,7 +324,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
 	},
 };
 
-static const struct alpha_pll_config cam_cc_pll2_config = {
+static struct alpha_pll_config cam_cc_pll2_config = {
 	.l = 0x32,
 	.cal_l = 0x32,
 	.alpha = 0x0,
@@ -2319,6 +2319,7 @@ static const struct qcom_cc_desc cam_cc_lito_desc = {
 
 static const struct of_device_id cam_cc_lito_match_table[] = {
 	{ .compatible = "qcom,lito-camcc" },
+	{ .compatible = "qcom,lito-camcc-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, cam_cc_lito_match_table);
@@ -2361,6 +2362,14 @@ static int cam_cc_lito_probe(struct platform_device *pdev)
 
 	clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
 	clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,lito-camcc-v2")) {
+		cam_cc_pll2_config.config_ctl_val = 0x38200920;
+		cam_cc_pll2_config.config_ctl_hi_val = 0x15002001;
+		cam_cc_pll2_config.config_ctl_hi1_val = 0x80000000;
+		cam_cc_pll2_config.test_ctl_val = 0x00000000;
+	}
+
 	clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
 	clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
 	clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f6fd675..4ac844f 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -844,12 +844,16 @@ static void clk_alpha_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
 	int size, i, val;
 
 	static struct clk_register_data data[] = {
-		{"PLL_MODE", 0x0},
-		{"PLL_L_VAL", 0x4},
-		{"PLL_ALPHA_VAL", 0x8},
-		{"PLL_ALPHA_VAL_U", 0xC},
-		{"PLL_USER_CTL", 0x10},
-		{"PLL_CONFIG_CTL", 0x18},
+		{"PLL_MODE", PLL_OFF_MODE},
+		{"PLL_L_VAL", PLL_OFF_L_VAL},
+		{"PLL_ALPHA_VAL", PLL_OFF_ALPHA_VAL},
+		{"PLL_ALPHA_VAL_U", PLL_OFF_ALPHA_VAL_U},
+		{"PLL_TEST_CTL", PLL_OFF_TEST_CTL},
+		{"PLL_TEST_CTL_U", PLL_OFF_TEST_CTL_U},
+		{"PLL_USER_CTL", PLL_OFF_USER_CTL},
+		{"PLL_USER_CTL_U", PLL_OFF_USER_CTL_U},
+		{"PLL_CONFIG_CTL", PLL_OFF_CONFIG_CTL},
+		{"PLL_STATUS", PLL_OFF_STATUS},
 	};
 
 	static struct clk_register_data data1[] = {
@@ -859,13 +863,14 @@ static void clk_alpha_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
 	size = ARRAY_SIZE(data);
 
 	for (i = 0; i < size; i++) {
-		regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
-					&val);
+		regmap_read(pll->clkr.regmap,
+				pll->offset + pll->regs[data[i].offset], &val);
 		clock_debug_output(f, false,
 				"%20s: 0x%.8x\n", data[i].name, val);
 	}
 
-	regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+	regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[0].offset], &val);
 
 	if (val & PLL_FSM_ENA) {
 		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
@@ -1125,18 +1130,18 @@ static void clk_zonda_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
 	int size, i, val;
 
 	static struct clk_register_data pll_regs[] = {
-		{"PLL_MODE", 0x0},
-		{"PLL_L_VAL", 0x4},
-		{"PLL_ALPHA_VAL", 0x8},
-		{"PLL_USER_CTL", 0xC},
-		{"PLL_CONFIG_CTL", 0x10},
-		{"PLL_CONFIG_CTL_U", 0x14},
-		{"PLL_CONFIG_CTL_U1", 0x18},
-		{"PLL_TEST_CTL", 0x1C},
-		{"PLL_TEST_CTL_U", 0x20},
-		{"PLL_TEST_CTL_U1", 0x24},
-		{"PLL_OPMODE", 0x28},
-		{"PLL_STATUS", 0x38},
+		{"PLL_MODE", PLL_OFF_MODE},
+		{"PLL_L_VAL", PLL_OFF_L_VAL},
+		{"PLL_ALPHA_VAL", PLL_OFF_ALPHA_VAL},
+		{"PLL_USER_CTL", PLL_OFF_USER_CTL},
+		{"PLL_CONFIG_CTL", PLL_OFF_CONFIG_CTL},
+		{"PLL_CONFIG_CTL_U", PLL_OFF_CONFIG_CTL_U},
+		{"PLL_CONFIG_CTL_U1", PLL_OFF_CONFIG_CTL_U1},
+		{"PLL_TEST_CTL", PLL_OFF_TEST_CTL},
+		{"PLL_TEST_CTL_U", PLL_OFF_TEST_CTL_U},
+		{"PLL_TEST_CTL_U1", PLL_OFF_TEST_CTL_U1},
+		{"PLL_OPMODE", PLL_OFF_OPMODE},
+		{"PLL_STATUS", PLL_OFF_STATUS},
 	};
 
 	static struct clk_register_data pll_vote_reg = {
@@ -1146,13 +1151,14 @@ static void clk_zonda_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
 	size = ARRAY_SIZE(pll_regs);
 
 	for (i = 0; i < size; i++) {
-		regmap_read(pll->clkr.regmap, pll->offset + pll_regs[i].offset,
-					&val);
+		regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[pll_regs[i].offset], &val);
 		clock_debug_output(f, false,
 				"%20s: 0x%.8x\n", pll_regs[i].name, val);
 	}
 
-	regmap_read(pll->clkr.regmap, pll->offset + pll_vote_reg.offset, &val);
+	regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[pll_regs[0].offset], &val);
 
 	if (val & PLL_FSM_ENA) {
 		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
@@ -1859,7 +1865,8 @@ static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
 	udelay(1);
 	regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
 	if (!(regval & ALPHA_PLL_ACK_LATCH)) {
-		WARN(1, "PLL latch failed. Output may be unstable!\n");
+		WARN_CLK(hw->core, clk_hw_get_name(hw), 1,
+				"PLL latch failed. Output may be unstable!\n");
 		return -EINVAL;
 	}
 
@@ -1894,20 +1901,20 @@ static void clk_alpha_pll_lucid_list_registers(struct seq_file *f,
 	int size, i, val;
 
 	static struct clk_register_data data[] = {
-		{"PLL_MODE", 0x0},
-		{"PLL_L_VAL", 0x4},
-		{"PLL_CAL_L_VAL", 0x8},
-		{"PLL_USER_CTL", 0x0c},
-		{"PLL_USER_CTL_U", 0x10},
-		{"PLL_USER_CTL_U1", 0x14},
-		{"PLL_CONFIG_CTL", 0x18},
-		{"PLL_CONFIG_CTL_U", 0x1c},
-		{"PLL_CONFIG_CTL_U1", 0x20},
-		{"PLL_TEST_CTL", 0x24},
-		{"PLL_TEST_CTL_U", 0x28},
-		{"PLL_TEST_CTL_U1", 0x2C},
-		{"PLL_STATUS", 0x30},
-		{"PLL_ALPHA_VAL", 0x40},
+		{"PLL_MODE", PLL_OFF_MODE},
+		{"PLL_L_VAL", PLL_OFF_L_VAL},
+		{"PLL_CAL_L_VAL", PLL_OFF_CAL_L_VAL},
+		{"PLL_USER_CTL", PLL_OFF_USER_CTL},
+		{"PLL_USER_CTL_U", PLL_OFF_USER_CTL_U},
+		{"PLL_USER_CTL_U1", PLL_OFF_USER_CTL_U1},
+		{"PLL_CONFIG_CTL", PLL_OFF_CONFIG_CTL},
+		{"PLL_CONFIG_CTL_U", PLL_OFF_CONFIG_CTL_U},
+		{"PLL_CONFIG_CTL_U1", PLL_OFF_CONFIG_CTL_U1},
+		{"PLL_TEST_CTL", PLL_OFF_TEST_CTL},
+		{"PLL_TEST_CTL_U", PLL_OFF_TEST_CTL_U},
+		{"PLL_TEST_CTL_U1", PLL_OFF_TEST_CTL_U1},
+		{"PLL_STATUS", PLL_OFF_STATUS},
+		{"PLL_ALPHA_VAL", PLL_OFF_ALPHA_VAL},
 	};
 
 	static struct clk_register_data data1[] = {
@@ -1917,13 +1924,14 @@ static void clk_alpha_pll_lucid_list_registers(struct seq_file *f,
 	size = ARRAY_SIZE(data);
 
 	for (i = 0; i < size; i++) {
-		regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
-					&val);
+		regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[i].offset], &val);
 		clock_debug_output(f, false,
 				"%20s: 0x%.8x\n", data[i].name, val);
 	}
 
-	regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+	regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[0].offset], &val);
 
 	if (val & PLL_FSM_ENA) {
 		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 685ecdd..9c5a800 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -19,6 +19,7 @@ enum {
 };
 
 enum {
+	PLL_OFF_MODE,
 	PLL_OFF_L_VAL,
 	PLL_OFF_CAL_L_VAL,
 	PLL_OFF_ALPHA_VAL,
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 850c02a..d1bdfe3 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, Linaro Limited
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2019, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,10 +23,15 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/soc/qcom/smd-rpm.h>
+#include <soc/qcom/rpm-smd.h>
+#include <linux/clk.h>
 
 #include <dt-bindings/clock/qcom,rpmcc.h>
 #include <dt-bindings/mfd/qcom-rpm.h>
 
+#include "clk-voter.h"
+#include "clk-debug.h"
+
 #define QCOM_RPM_KEY_SOFTWARE_ENABLE			0x6e657773
 #define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
 #define QCOM_RPM_SMD_KEY_RATE				0x007a484b
@@ -37,6 +42,8 @@
 #define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id,  \
 			     key)					      \
 	static struct clk_smd_rpm _platform##_##_active;		      \
+	static unsigned long _name##_##last_active_set_vote;		      \
+	static unsigned long _name##_##last_sleep_set_vote;		      \
 	static struct clk_smd_rpm _platform##_##_name = {		      \
 		.rpm_res_type = (type),					      \
 		.rpm_clk_id = (r_id),					      \
@@ -44,9 +51,12 @@
 		.rpm_key = (key),					      \
 		.peer = &_platform##_##_active,				      \
 		.rate = INT_MAX,					      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_ops,			      \
 			.name = #_name,					      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },       \
 			.num_parents = 1,				      \
 		},							      \
@@ -59,9 +69,12 @@
 		.rpm_key = (key),					      \
 		.peer = &_platform##_##_name,				      \
 		.rate = INT_MAX,					      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_ops,			      \
 			.name = #_active,				      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -70,6 +83,8 @@
 #define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id,    \
 				    stat_id, r, key)			      \
 	static struct clk_smd_rpm _platform##_##_active;		      \
+	static unsigned long _name##_##last_active_set_vote;		      \
+	static unsigned long _name##_##last_sleep_set_vote;		      \
 	static struct clk_smd_rpm _platform##_##_name = {		      \
 		.rpm_res_type = (type),					      \
 		.rpm_clk_id = (r_id),					      \
@@ -78,9 +93,12 @@
 		.branch = true,						      \
 		.peer = &_platform##_##_active,				      \
 		.rate = (r),						      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_branch_ops,			      \
 			.name = #_name,					      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -94,9 +112,12 @@
 		.branch = true,						      \
 		.peer = &_platform##_##_name,				      \
 		.rate = (r),						      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_branch_ops,			      \
 			.name = #_active,				      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -137,7 +158,8 @@ struct clk_smd_rpm {
 	struct clk_smd_rpm *peer;
 	struct clk_hw hw;
 	unsigned long rate;
-	struct qcom_smd_rpm *rpm;
+	unsigned long *last_active_set_vote;
+	unsigned long *last_sleep_set_vote;
 };
 
 struct clk_smd_rpm_req {
@@ -148,72 +170,76 @@ struct clk_smd_rpm_req {
 
 struct rpm_cc {
 	struct qcom_rpm *rpm;
-	struct clk_smd_rpm **clks;
-	size_t num_clks;
+	struct clk_onecell_data data;
+	struct clk *clks[];
 };
 
 struct rpm_smd_clk_desc {
-	struct clk_smd_rpm **clks;
+	struct clk_hw **clks;
+	size_t num_rpm_clks;
 	size_t num_clks;
 };
 
 static DEFINE_MUTEX(rpm_smd_clk_lock);
 
-static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+static int clk_smd_rpm_prepare(struct clk_hw *hw);
+
+static int clk_smd_rpm_handoff(struct clk_hw *hw)
 {
-	int ret;
-	struct clk_smd_rpm_req req = {
-		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(r->branch ? 1 : INT_MAX),
-	};
-
-	ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				 r->rpm_res_type, r->rpm_clk_id, &req,
-				 sizeof(req));
-	if (ret)
-		return ret;
-	ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				 r->rpm_res_type, r->rpm_clk_id, &req,
-				 sizeof(req));
-	if (ret)
-		return ret;
-
-	return 0;
+	return clk_smd_rpm_prepare(hw);
 }
 
 static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
-				       unsigned long rate)
+					uint32_t rate)
 {
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+		.data = (void *)&rate,
+		.length = sizeof(rate),
 	};
 
-	return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				  r->rpm_res_type, r->rpm_clk_id, &req,
-				  sizeof(req));
+	if (*r->last_active_set_vote == rate)
+		return ret;
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type,
+			r->rpm_clk_id, &req, 1);
+	if (ret)
+		return ret;
+
+	*r->last_active_set_vote = rate;
+
+	return ret;
 }
 
 static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
-				      unsigned long rate)
+					uint32_t rate)
 {
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+		.data = (void *)&rate,
+		.length = sizeof(rate),
 	};
 
-	return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				  r->rpm_res_type, r->rpm_clk_id, &req,
-				  sizeof(req));
+	if (*r->last_sleep_set_vote == rate)
+		return ret;
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, r->rpm_res_type,
+			r->rpm_clk_id, &req, 1);
+	if (ret)
+		return ret;
+
+	*r->last_sleep_set_vote = rate;
+
+	return ret;
 }
 
 static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
 			    unsigned long *active, unsigned long *sleep)
 {
-	*active = rate;
+	/* Convert the rate (hz) to khz */
+	*active = DIV_ROUND_UP(rate, 1000);
 
 	/*
 	 * Active-only clocks don't care what the rate is during sleep. So,
@@ -231,17 +257,17 @@ static int clk_smd_rpm_prepare(struct clk_hw *hw)
 	struct clk_smd_rpm *peer = r->peer;
 	unsigned long this_rate = 0, this_sleep_rate = 0;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	int ret = 0;
 
 	mutex_lock(&rpm_smd_clk_lock);
 
-	/* Don't send requests to the RPM if the rate has not been set. */
-	if (!r->rate)
-		goto out;
-
 	to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
 
+	/* Don't send requests to the RPM if the rate has not been set. */
+	if (this_rate == 0)
+		goto out;
+
 	/* Take peer clock's rate into account only if it's enabled. */
 	if (peer->enabled)
 		to_active_sleep(peer, peer->rate,
@@ -279,13 +305,13 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
 	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
 	struct clk_smd_rpm *peer = r->peer;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	int ret;
 
 	mutex_lock(&rpm_smd_clk_lock);
 
 	if (!r->rate)
-		goto out;
+		goto enable;
 
 	/* Take peer clock's rate into account only if it's enabled. */
 	if (peer->enabled)
@@ -302,6 +328,7 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
 	if (ret)
 		goto out;
 
+enable:
 	r->enabled = false;
 
 out:
@@ -313,7 +340,7 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
 {
 	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
 	struct clk_smd_rpm *peer = r->peer;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	unsigned long this_rate = 0, this_sleep_rate = 0;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
 	int ret = 0;
@@ -372,33 +399,62 @@ static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
 	return r->rate;
 }
 
-static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+static int clk_smd_rpm_enable_scaling(void)
 {
-	int ret;
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	uint32_t value = cpu_to_le32(1);
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(1),
+		.data = (void *)&value,
+		.length = sizeof(value),
 	};
 
-	ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				 QCOM_SMD_RPM_MISC_CLK,
-				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE,
+			QCOM_SMD_RPM_MISC_CLK,
+			QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
 	if (ret) {
 		pr_err("RPM clock scaling (sleep set) not enabled!\n");
 		return ret;
 	}
 
-	ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				 QCOM_SMD_RPM_MISC_CLK,
-				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE,
+			QCOM_SMD_RPM_MISC_CLK,
+			QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
 	if (ret) {
 		pr_err("RPM clock scaling (active set) not enabled!\n");
 		return ret;
 	}
 
 	pr_debug("%s: RPM clock scaling is enabled\n", __func__);
-	return 0;
+	return ret;
+}
+
+static int clk_vote_bimc(struct clk_hw *hw, uint32_t rate)
+{
+	int ret = 0;
+	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+	struct msm_rpm_kvp req = {
+		.key = r->rpm_key,
+		.data = (void *)&rate,
+		.length = sizeof(rate),
+	};
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE,
+		r->rpm_res_type, r->rpm_clk_id, &req, 1);
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			WARN(1, "BIMC vote not sent!\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int clk_smd_rpm_is_enabled(struct clk_hw *hw)
+{
+	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+	return r->enabled;
 }
 
 static const struct clk_ops clk_smd_rpm_ops = {
@@ -407,11 +463,17 @@ static const struct clk_ops clk_smd_rpm_ops = {
 	.set_rate	= clk_smd_rpm_set_rate,
 	.round_rate	= clk_smd_rpm_round_rate,
 	.recalc_rate	= clk_smd_rpm_recalc_rate,
+	.is_enabled	= clk_smd_rpm_is_enabled,
+	.debug_init	= clk_debug_measure_add,
 };
 
 static const struct clk_ops clk_smd_rpm_branch_ops = {
 	.prepare	= clk_smd_rpm_prepare,
 	.unprepare	= clk_smd_rpm_unprepare,
+	.round_rate	= clk_smd_rpm_round_rate,
+	.recalc_rate	= clk_smd_rpm_recalc_rate,
+	.is_enabled	= clk_smd_rpm_is_enabled,
+	.debug_init	= clk_debug_measure_add,
 };
 
 /* msm8916 */
@@ -428,35 +490,36 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
 
-static struct clk_smd_rpm *msm8916_clks[] = {
-	[RPM_SMD_PCNOC_CLK]		= &msm8916_pcnoc_clk,
-	[RPM_SMD_PCNOC_A_CLK]		= &msm8916_pcnoc_a_clk,
-	[RPM_SMD_SNOC_CLK]		= &msm8916_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK]		= &msm8916_snoc_a_clk,
-	[RPM_SMD_BIMC_CLK]		= &msm8916_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK]		= &msm8916_bimc_a_clk,
-	[RPM_SMD_QDSS_CLK]		= &msm8916_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK]		= &msm8916_qdss_a_clk,
-	[RPM_SMD_BB_CLK1]		= &msm8916_bb_clk1,
-	[RPM_SMD_BB_CLK1_A]		= &msm8916_bb_clk1_a,
-	[RPM_SMD_BB_CLK2]		= &msm8916_bb_clk2,
-	[RPM_SMD_BB_CLK2_A]		= &msm8916_bb_clk2_a,
-	[RPM_SMD_RF_CLK1]		= &msm8916_rf_clk1,
-	[RPM_SMD_RF_CLK1_A]		= &msm8916_rf_clk1_a,
-	[RPM_SMD_RF_CLK2]		= &msm8916_rf_clk2,
-	[RPM_SMD_RF_CLK2_A]		= &msm8916_rf_clk2_a,
-	[RPM_SMD_BB_CLK1_PIN]		= &msm8916_bb_clk1_pin,
-	[RPM_SMD_BB_CLK1_A_PIN]		= &msm8916_bb_clk1_a_pin,
-	[RPM_SMD_BB_CLK2_PIN]		= &msm8916_bb_clk2_pin,
-	[RPM_SMD_BB_CLK2_A_PIN]		= &msm8916_bb_clk2_a_pin,
-	[RPM_SMD_RF_CLK1_PIN]		= &msm8916_rf_clk1_pin,
-	[RPM_SMD_RF_CLK1_A_PIN]		= &msm8916_rf_clk1_a_pin,
-	[RPM_SMD_RF_CLK2_PIN]		= &msm8916_rf_clk2_pin,
-	[RPM_SMD_RF_CLK2_A_PIN]		= &msm8916_rf_clk2_a_pin,
+static struct clk_hw *msm8916_clks[] = {
+	[RPM_SMD_PCNOC_CLK]		= &msm8916_pcnoc_clk.hw,
+	[RPM_SMD_PCNOC_A_CLK]		= &msm8916_pcnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK]		= &msm8916_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK]		= &msm8916_snoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK]		= &msm8916_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK]		= &msm8916_bimc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK]		= &msm8916_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK]		= &msm8916_qdss_a_clk.hw,
+	[RPM_SMD_BB_CLK1]		= &msm8916_bb_clk1.hw,
+	[RPM_SMD_BB_CLK1_A]		= &msm8916_bb_clk1_a.hw,
+	[RPM_SMD_BB_CLK2]		= &msm8916_bb_clk2.hw,
+	[RPM_SMD_BB_CLK2_A]		= &msm8916_bb_clk2_a.hw,
+	[RPM_SMD_RF_CLK1]		= &msm8916_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A]		= &msm8916_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2]		= &msm8916_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A]		= &msm8916_rf_clk2_a.hw,
+	[RPM_SMD_BB_CLK1_PIN]		= &msm8916_bb_clk1_pin.hw,
+	[RPM_SMD_BB_CLK1_A_PIN]		= &msm8916_bb_clk1_a_pin.hw,
+	[RPM_SMD_BB_CLK2_PIN]		= &msm8916_bb_clk2_pin.hw,
+	[RPM_SMD_BB_CLK2_A_PIN]		= &msm8916_bb_clk2_a_pin.hw,
+	[RPM_SMD_RF_CLK1_PIN]		= &msm8916_rf_clk1_pin.hw,
+	[RPM_SMD_RF_CLK1_A_PIN]		= &msm8916_rf_clk1_a_pin.hw,
+	[RPM_SMD_RF_CLK2_PIN]		= &msm8916_rf_clk2_pin.hw,
+	[RPM_SMD_RF_CLK2_A_PIN]		= &msm8916_rf_clk2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
 	.clks = msm8916_clks,
+	.num_rpm_clks = RPM_SMD_RF_CLK2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8916_clks),
 };
 
@@ -483,51 +546,52 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6);
 
-static struct clk_smd_rpm *msm8974_clks[] = {
-	[RPM_SMD_PNOC_CLK]		= &msm8974_pnoc_clk,
-	[RPM_SMD_PNOC_A_CLK]		= &msm8974_pnoc_a_clk,
-	[RPM_SMD_SNOC_CLK]		= &msm8974_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK]		= &msm8974_snoc_a_clk,
-	[RPM_SMD_CNOC_CLK]		= &msm8974_cnoc_clk,
-	[RPM_SMD_CNOC_A_CLK]		= &msm8974_cnoc_a_clk,
-	[RPM_SMD_MMSSNOC_AHB_CLK]	= &msm8974_mmssnoc_ahb_clk,
-	[RPM_SMD_MMSSNOC_AHB_A_CLK]	= &msm8974_mmssnoc_ahb_a_clk,
-	[RPM_SMD_BIMC_CLK]		= &msm8974_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK]		= &msm8974_bimc_a_clk,
-	[RPM_SMD_OCMEMGX_CLK]		= &msm8974_ocmemgx_clk,
-	[RPM_SMD_OCMEMGX_A_CLK]		= &msm8974_ocmemgx_a_clk,
-	[RPM_SMD_QDSS_CLK]		= &msm8974_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK]		= &msm8974_qdss_a_clk,
-	[RPM_SMD_CXO_D0]		= &msm8974_cxo_d0,
-	[RPM_SMD_CXO_D0_A]		= &msm8974_cxo_d0_a,
-	[RPM_SMD_CXO_D1]		= &msm8974_cxo_d1,
-	[RPM_SMD_CXO_D1_A]		= &msm8974_cxo_d1_a,
-	[RPM_SMD_CXO_A0]		= &msm8974_cxo_a0,
-	[RPM_SMD_CXO_A0_A]		= &msm8974_cxo_a0_a,
-	[RPM_SMD_CXO_A1]		= &msm8974_cxo_a1,
-	[RPM_SMD_CXO_A1_A]		= &msm8974_cxo_a1_a,
-	[RPM_SMD_CXO_A2]		= &msm8974_cxo_a2,
-	[RPM_SMD_CXO_A2_A]		= &msm8974_cxo_a2_a,
-	[RPM_SMD_DIFF_CLK]		= &msm8974_diff_clk,
-	[RPM_SMD_DIFF_A_CLK]		= &msm8974_diff_a_clk,
-	[RPM_SMD_DIV_CLK1]		= &msm8974_div_clk1,
-	[RPM_SMD_DIV_A_CLK1]		= &msm8974_div_a_clk1,
-	[RPM_SMD_DIV_CLK2]		= &msm8974_div_clk2,
-	[RPM_SMD_DIV_A_CLK2]		= &msm8974_div_a_clk2,
-	[RPM_SMD_CXO_D0_PIN]		= &msm8974_cxo_d0_pin,
-	[RPM_SMD_CXO_D0_A_PIN]		= &msm8974_cxo_d0_a_pin,
-	[RPM_SMD_CXO_D1_PIN]		= &msm8974_cxo_d1_pin,
-	[RPM_SMD_CXO_D1_A_PIN]		= &msm8974_cxo_d1_a_pin,
-	[RPM_SMD_CXO_A0_PIN]		= &msm8974_cxo_a0_pin,
-	[RPM_SMD_CXO_A0_A_PIN]		= &msm8974_cxo_a0_a_pin,
-	[RPM_SMD_CXO_A1_PIN]		= &msm8974_cxo_a1_pin,
-	[RPM_SMD_CXO_A1_A_PIN]		= &msm8974_cxo_a1_a_pin,
-	[RPM_SMD_CXO_A2_PIN]		= &msm8974_cxo_a2_pin,
-	[RPM_SMD_CXO_A2_A_PIN]		= &msm8974_cxo_a2_a_pin,
+static struct clk_hw *msm8974_clks[] = {
+	[RPM_SMD_PNOC_CLK]		= &msm8974_pnoc_clk.hw,
+	[RPM_SMD_PNOC_A_CLK]		= &msm8974_pnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK]		= &msm8974_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK]		= &msm8974_snoc_a_clk.hw,
+	[RPM_SMD_CNOC_CLK]		= &msm8974_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK]		= &msm8974_cnoc_a_clk.hw,
+	[RPM_SMD_MMSSNOC_AHB_CLK]	= &msm8974_mmssnoc_ahb_clk.hw,
+	[RPM_SMD_MMSSNOC_AHB_A_CLK]	= &msm8974_mmssnoc_ahb_a_clk.hw,
+	[RPM_SMD_BIMC_CLK]		= &msm8974_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK]		= &msm8974_bimc_a_clk.hw,
+	[RPM_SMD_OCMEMGX_CLK]		= &msm8974_ocmemgx_clk.hw,
+	[RPM_SMD_OCMEMGX_A_CLK]		= &msm8974_ocmemgx_a_clk.hw,
+	[RPM_SMD_QDSS_CLK]		= &msm8974_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK]		= &msm8974_qdss_a_clk.hw,
+	[RPM_SMD_CXO_D0]		= &msm8974_cxo_d0.hw,
+	[RPM_SMD_CXO_D0_A]		= &msm8974_cxo_d0_a.hw,
+	[RPM_SMD_CXO_D1]		= &msm8974_cxo_d1.hw,
+	[RPM_SMD_CXO_D1_A]		= &msm8974_cxo_d1_a.hw,
+	[RPM_SMD_CXO_A0]		= &msm8974_cxo_a0.hw,
+	[RPM_SMD_CXO_A0_A]		= &msm8974_cxo_a0_a.hw,
+	[RPM_SMD_CXO_A1]		= &msm8974_cxo_a1.hw,
+	[RPM_SMD_CXO_A1_A]		= &msm8974_cxo_a1_a.hw,
+	[RPM_SMD_CXO_A2]		= &msm8974_cxo_a2.hw,
+	[RPM_SMD_CXO_A2_A]		= &msm8974_cxo_a2_a.hw,
+	[RPM_SMD_DIFF_CLK]		= &msm8974_diff_clk.hw,
+	[RPM_SMD_DIFF_A_CLK]		= &msm8974_diff_a_clk.hw,
+	[RPM_SMD_DIV_CLK1]		= &msm8974_div_clk1.hw,
+	[RPM_SMD_DIV_A_CLK1]		= &msm8974_div_a_clk1.hw,
+	[RPM_SMD_DIV_CLK2]		= &msm8974_div_clk2.hw,
+	[RPM_SMD_DIV_A_CLK2]		= &msm8974_div_a_clk2.hw,
+	[RPM_SMD_CXO_D0_PIN]		= &msm8974_cxo_d0_pin.hw,
+	[RPM_SMD_CXO_D0_A_PIN]		= &msm8974_cxo_d0_a_pin.hw,
+	[RPM_SMD_CXO_D1_PIN]		= &msm8974_cxo_d1_pin.hw,
+	[RPM_SMD_CXO_D1_A_PIN]		= &msm8974_cxo_d1_a_pin.hw,
+	[RPM_SMD_CXO_A0_PIN]		= &msm8974_cxo_a0_pin.hw,
+	[RPM_SMD_CXO_A0_A_PIN]		= &msm8974_cxo_a0_a_pin.hw,
+	[RPM_SMD_CXO_A1_PIN]		= &msm8974_cxo_a1_pin.hw,
+	[RPM_SMD_CXO_A1_A_PIN]		= &msm8974_cxo_a1_a_pin.hw,
+	[RPM_SMD_CXO_A2_PIN]		= &msm8974_cxo_a2_pin.hw,
+	[RPM_SMD_CXO_A2_A_PIN]		= &msm8974_cxo_a2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
 	.clks = msm8974_clks,
+	.num_rpm_clks = RPM_SMD_CXO_A2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8974_clks),
 };
 
@@ -559,150 +623,369 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
 
-static struct clk_smd_rpm *msm8996_clks[] = {
-	[RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
-	[RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
-	[RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
-	[RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk,
-	[RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
-	[RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
-	[RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
-	[RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
-	[RPM_SMD_IPA_CLK] = &msm8996_ipa_clk,
-	[RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk,
-	[RPM_SMD_CE1_CLK] = &msm8996_ce1_clk,
-	[RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk,
-	[RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
-	[RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
-	[RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
-	[RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
-	[RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
-	[RPM_SMD_BB_CLK1] = &msm8996_bb_clk1,
-	[RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a,
-	[RPM_SMD_BB_CLK2] = &msm8996_bb_clk2,
-	[RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a,
-	[RPM_SMD_RF_CLK1] = &msm8996_rf_clk1,
-	[RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a,
-	[RPM_SMD_RF_CLK2] = &msm8996_rf_clk2,
-	[RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a,
-	[RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk,
-	[RPM_SMD_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
-	[RPM_SMD_DIV_CLK1] = &msm8996_div_clk1,
-	[RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a,
-	[RPM_SMD_DIV_CLK2] = &msm8996_div_clk2,
-	[RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a,
-	[RPM_SMD_DIV_CLK3] = &msm8996_div_clk3,
-	[RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a,
-	[RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
-	[RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
-	[RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
-	[RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
-	[RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
-	[RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
-	[RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
-	[RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+static struct clk_hw *msm8996_clks[] = {
+	[RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk.hw,
+	[RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk.hw,
+	[RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk.hw,
+	[RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk.hw,
+	[RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk.hw,
+	[RPM_SMD_IPA_CLK] = &msm8996_ipa_clk.hw,
+	[RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk.hw,
+	[RPM_SMD_CE1_CLK] = &msm8996_ce1_clk.hw,
+	[RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk.hw,
+	[RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk.hw,
+	[RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk.hw,
+	[RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk.hw,
+	[RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk.hw,
+	[RPM_SMD_BB_CLK1] = &msm8996_bb_clk1.hw,
+	[RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a.hw,
+	[RPM_SMD_BB_CLK2] = &msm8996_bb_clk2.hw,
+	[RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a.hw,
+	[RPM_SMD_RF_CLK1] = &msm8996_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2] = &msm8996_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a.hw,
+	[RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk.hw,
+	[RPM_SMD_LN_BB_CLK_A] = &msm8996_ln_bb_a_clk.hw,
+	[RPM_SMD_DIV_CLK1] = &msm8996_div_clk1.hw,
+	[RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a.hw,
+	[RPM_SMD_DIV_CLK2] = &msm8996_div_clk2.hw,
+	[RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a.hw,
+	[RPM_SMD_DIV_CLK3] = &msm8996_div_clk3.hw,
+	[RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a.hw,
+	[RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin.hw,
+	[RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin.hw,
+	[RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin.hw,
+	[RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin.hw,
+	[RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin.hw,
+	[RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin.hw,
+	[RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin.hw,
+	[RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
 	.clks = msm8996_clks,
+	.num_rpm_clks = RPM_SMD_RF_CLK2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8996_clks),
 };
 
+/* bengal */
+DEFINE_CLK_SMD_RPM_BRANCH(bengal, bi_tcxo, bi_tcxo_ao,
+					QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+DEFINE_CLK_SMD_RPM(bengal, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(bengal, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM_BRANCH(bengal, qdss_clk, qdss_a_clk,
+					QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
+DEFINE_CLK_SMD_RPM(bengal, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, qup_clk, qup_a_clk, QCOM_SMD_RPM_QUP_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 1);
+DEFINE_CLK_SMD_RPM(bengal, snoc_periph_clk, snoc_periph_a_clk,
+						QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, snoc_lpass_clk, snoc_lpass_a_clk,
+						QCOM_SMD_RPM_BUS_CLK, 5);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bengal, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bengal, rf_clk2, rf_clk2_a, 5);
+
+/* Voter clocks */
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, snoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, bimc_clk, LONG_MAX);
+
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, snoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, bimc_a_clk, LONG_MAX);
+
+static DEFINE_CLK_VOTER(mcd_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(scm_ce1_clk, ce1_clk, 85710000);
+
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, cnoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, cnoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_keepalive_a_clk, cnoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_keepalive_a_clk, snoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(vfe_mmrt_msmbus_clk, mmrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(vfe_mmrt_msmbus_a_clk, mmrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(mdp_mmrt_msmbus_clk, mmrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(mdp_mmrt_msmbus_a_clk, mmrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cpp_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cpp_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(jpeg_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(jpeg_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(venus_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(venus_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(arm9_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(arm9_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(qup0_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup0_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup1_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup1_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(dap_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(dap_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(crypto_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(crypto_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_slv_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_slv_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_slv_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_slv_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+
+/* Branch Voter clocks */
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_otg_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_pronto_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_mss_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_wlan_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_lpass_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_cdsp_clk, bi_tcxo);
+
+static struct clk_hw *bengal_clks[] = {
+	[RPM_SMD_XO_CLK_SRC] = &bengal_bi_tcxo.hw,
+	[RPM_SMD_XO_A_CLK_SRC] = &bengal_bi_tcxo_ao.hw,
+	[RPM_SMD_SNOC_CLK] = &bengal_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK] = &bengal_snoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK] = &bengal_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK] = &bengal_bimc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK] = &bengal_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK] = &bengal_qdss_a_clk.hw,
+	[RPM_SMD_RF_CLK1] = &bengal_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A] = &bengal_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2] = &bengal_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A] = &bengal_rf_clk2_a.hw,
+	[RPM_SMD_CNOC_CLK] = &bengal_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK] = &bengal_cnoc_a_clk.hw,
+	[RPM_SMD_IPA_CLK] = &bengal_ipa_clk.hw,
+	[RPM_SMD_IPA_A_CLK] = &bengal_ipa_a_clk.hw,
+	[RPM_SMD_QUP_CLK] = &bengal_qup_clk.hw,
+	[RPM_SMD_QUP_A_CLK] = &bengal_qup_a_clk.hw,
+	[RPM_SMD_MMRT_CLK] = &bengal_mmrt_clk.hw,
+	[RPM_SMD_MMRT_A_CLK] = &bengal_mmrt_a_clk.hw,
+	[RPM_SMD_MMNRT_CLK] = &bengal_mmnrt_clk.hw,
+	[RPM_SMD_MMNRT_A_CLK] = &bengal_mmnrt_a_clk.hw,
+	[RPM_SMD_SNOC_PERIPH_CLK] = &bengal_snoc_periph_clk.hw,
+	[RPM_SMD_SNOC_PERIPH_A_CLK] = &bengal_snoc_periph_a_clk.hw,
+	[RPM_SMD_SNOC_LPASS_CLK] = &bengal_snoc_lpass_clk.hw,
+	[RPM_SMD_SNOC_LPASS_A_CLK] = &bengal_snoc_lpass_a_clk.hw,
+	[RPM_SMD_CE1_CLK] = &bengal_ce1_clk.hw,
+	[RPM_SMD_CE1_A_CLK] = &bengal_ce1_a_clk.hw,
+	[CNOC_MSMBUS_CLK] = &cnoc_msmbus_clk.hw,
+	[CNOC_MSMBUS_A_CLK] = &cnoc_msmbus_a_clk.hw,
+	[SNOC_KEEPALIVE_A_CLK] = &snoc_keepalive_a_clk.hw,
+	[CNOC_KEEPALIVE_A_CLK] = &cnoc_keepalive_a_clk.hw,
+	[SNOC_MSMBUS_CLK] = &snoc_msmbus_clk.hw,
+	[SNOC_MSMBUS_A_CLK] = &snoc_msmbus_a_clk.hw,
+	[BIMC_MSMBUS_CLK] = &bimc_msmbus_clk.hw,
+	[BIMC_MSMBUS_A_CLK] = &bimc_msmbus_a_clk.hw,
+	[CPP_MMNRT_MSMBUS_CLK] = &cpp_mmnrt_msmbus_clk.hw,
+	[CPP_MMNRT_MSMBUS_A_CLK] = &cpp_mmnrt_msmbus_a_clk.hw,
+	[JPEG_MMNRT_MSMBUS_CLK] = &jpeg_mmnrt_msmbus_clk.hw,
+	[JPEG_MMNRT_MSMBUS_A_CLK] = &jpeg_mmnrt_msmbus_a_clk.hw,
+	[VENUS_MMNRT_MSMBUS_CLK] = &venus_mmnrt_msmbus_clk.hw,
+	[VENUS_MMNRT_MSMBUS_A_CLK] = &venus_mmnrt_msmbus_a_clk.hw,
+	[ARM9_MMNRT_MSMBUS_CLK] = &arm9_mmnrt_msmbus_clk.hw,
+	[ARM9_MMNRT_MSMBUS_A_CLK] = &arm9_mmnrt_msmbus_a_clk.hw,
+	[VFE_MMRT_MSMBUS_CLK] = &vfe_mmrt_msmbus_clk.hw,
+	[VFE_MMRT_MSMBUS_A_CLK] = &vfe_mmrt_msmbus_a_clk.hw,
+	[MDP_MMRT_MSMBUS_CLK] = &mdp_mmrt_msmbus_clk.hw,
+	[MDP_MMRT_MSMBUS_A_CLK] = &mdp_mmrt_msmbus_a_clk.hw,
+	[QUP0_MSMBUS_SNOC_PERIPH_CLK] = &qup0_msmbus_snoc_periph_clk.hw,
+	[QUP0_MSMBUS_SNOC_PERIPH_A_CLK] = &qup0_msmbus_snoc_periph_a_clk.hw,
+	[QUP1_MSMBUS_SNOC_PERIPH_CLK] = &qup1_msmbus_snoc_periph_clk.hw,
+	[QUP1_MSMBUS_SNOC_PERIPH_A_CLK] = &qup1_msmbus_snoc_periph_a_clk.hw,
+	[DAP_MSMBUS_SNOC_PERIPH_CLK] = &dap_msmbus_snoc_periph_clk.hw,
+	[DAP_MSMBUS_SNOC_PERIPH_A_CLK] = &dap_msmbus_snoc_periph_a_clk.hw,
+	[SDC1_MSMBUS_SNOC_PERIPH_CLK] = &sdc1_msmbus_snoc_periph_clk.hw,
+	[SDC1_MSMBUS_SNOC_PERIPH_A_CLK] = &sdc1_msmbus_snoc_periph_a_clk.hw,
+	[SDC2_MSMBUS_SNOC_PERIPH_CLK] = &sdc2_msmbus_snoc_periph_clk.hw,
+	[SDC2_MSMBUS_SNOC_PERIPH_A_CLK] = &sdc2_msmbus_snoc_periph_a_clk.hw,
+	[CRYPTO_MSMBUS_SNOC_PERIPH_CLK] = &crypto_msmbus_snoc_periph_clk.hw,
+	[CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&crypto_msmbus_snoc_periph_a_clk.hw,
+	[SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK] =
+				&sdc1_slv_msmbus_snoc_periph_clk.hw,
+	[SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&sdc1_slv_msmbus_snoc_periph_a_clk.hw,
+	[SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK] =
+				&sdc2_slv_msmbus_snoc_periph_clk.hw,
+	[SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&sdc2_slv_msmbus_snoc_periph_a_clk.hw,
+	[MCD_CE1_CLK] = &mcd_ce1_clk.hw,
+	[QCEDEV_CE1_CLK] = &qcedev_ce1_clk.hw,
+	[QCRYPTO_CE1_CLK] = &qcrypto_ce1_clk.hw,
+	[QSEECOM_CE1_CLK] = &qseecom_ce1_clk.hw,
+	[SCM_CE1_CLK] = &scm_ce1_clk.hw,
+	[CXO_SMD_OTG_CLK] = &bi_tcxo_otg_clk.hw,
+	[CXO_SMD_PIL_PRONTO_CLK] = &bi_tcxo_pil_pronto_clk.hw,
+	[CXO_SMD_PIL_MSS_CLK] = &bi_tcxo_pil_mss_clk.hw,
+	[CXO_SMD_WLAN_CLK] = &bi_tcxo_wlan_clk.hw,
+	[CXO_SMD_PIL_LPASS_CLK] = &bi_tcxo_pil_lpass_clk.hw,
+	[CXO_SMD_PIL_CDSP_CLK] = &bi_tcxo_pil_cdsp_clk.hw,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_bengal = {
+	.clks = bengal_clks,
+	.num_rpm_clks = RPM_SMD_CE1_A_CLK,
+	.num_clks = ARRAY_SIZE(bengal_clks),
+};
+
 static const struct of_device_id rpm_smd_clk_match_table[] = {
 	{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
 	{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
 	{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
+	{ .compatible = "qcom,rpmcc-bengal", .data = &rpm_clk_bengal},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
 
-static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
-					     void *data)
-{
-	struct rpm_cc *rcc = data;
-	unsigned int idx = clkspec->args[0];
-
-	if (idx >= rcc->num_clks) {
-		pr_err("%s: invalid index %u\n", __func__, idx);
-		return ERR_PTR(-EINVAL);
-	}
-
-	return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
-}
-
 static int rpm_smd_clk_probe(struct platform_device *pdev)
 {
+	struct clk **clks;
+	struct clk *clk;
 	struct rpm_cc *rcc;
-	int ret;
+	struct clk_onecell_data *data;
+	int ret, is_bengal;
 	size_t num_clks, i;
-	struct qcom_smd_rpm *rpm;
-	struct clk_smd_rpm **rpm_smd_clks;
+	struct clk_hw **hw_clks;
 	const struct rpm_smd_clk_desc *desc;
 
-	rpm = dev_get_drvdata(pdev->dev.parent);
-	if (!rpm) {
-		dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
-		return -ENODEV;
+
+	is_bengal = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,rpmcc-bengal");
+	if (is_bengal) {
+		ret = clk_vote_bimc(&bengal_bimc_clk.hw, INT_MAX);
+		if (ret < 0)
+			return ret;
 	}
 
 	desc = of_device_get_match_data(&pdev->dev);
 	if (!desc)
 		return -EINVAL;
 
-	rpm_smd_clks = desc->clks;
+	hw_clks = desc->clks;
 	num_clks = desc->num_clks;
 
-	rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+	rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+			   GFP_KERNEL);
 	if (!rcc)
 		return -ENOMEM;
 
-	rcc->clks = rpm_smd_clks;
-	rcc->num_clks = num_clks;
+	clks = rcc->clks;
+	data = &rcc->data;
+	data->clks = clks;
+	data->clk_num = num_clks;
 
-	for (i = 0; i < num_clks; i++) {
-		if (!rpm_smd_clks[i])
+	for (i = 0; i <= desc->num_rpm_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
 			continue;
+		}
 
-		rpm_smd_clks[i]->rpm = rpm;
-
-		ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+		ret = clk_smd_rpm_handoff(hw_clks[i]);
 		if (ret)
 			goto err;
 	}
 
-	ret = clk_smd_rpm_enable_scaling(rpm);
-	if (ret)
-		goto err;
-
-	for (i = 0; i < num_clks; i++) {
-		if (!rpm_smd_clks[i])
+	for (i = (desc->num_rpm_clks + 1); i < num_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
 			continue;
+		}
 
-		ret = devm_clk_hw_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+		ret = voter_clk_handoff(hw_clks[i]);
 		if (ret)
 			goto err;
 	}
 
-	ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
-				     rcc);
+	ret = clk_smd_rpm_enable_scaling();
 	if (ret)
 		goto err;
 
+	for (i = 0; i < num_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
+			continue;
+		}
+
+		clk = devm_clk_register(&pdev->dev, hw_clks[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			goto err;
+		}
+
+		clks[i] = clk;
+	}
+
+	ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+				  data);
+	if (ret)
+		goto err;
+
+	if (is_bengal) {
+		/*
+		 * Keep an active vote on CXO in case no other driver
+		 * votes for it.
+		 */
+		clk_prepare_enable(bengal_bi_tcxo_ao.hw.clk);
+
+		/* Hold an active set vote for the cnoc_keepalive_a_clk */
+		clk_set_rate(cnoc_keepalive_a_clk.hw.clk, 19200000);
+		clk_prepare_enable(cnoc_keepalive_a_clk.hw.clk);
+
+		/* Hold an active set vote for the snoc_keepalive_a_clk */
+		clk_set_rate(snoc_keepalive_a_clk.hw.clk, 19200000);
+		clk_prepare_enable(snoc_keepalive_a_clk.hw.clk);
+	}
+
+	dev_info(&pdev->dev, "Registered RPM clocks\n");
+
 	return 0;
 err:
 	dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
 	return ret;
 }
 
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+	of_clk_del_provider(pdev->dev.of_node);
+	return 0;
+}
+
 static struct platform_driver rpm_smd_clk_driver = {
 	.driver = {
 		.name = "qcom-clk-smd-rpm",
 		.of_match_table = rpm_smd_clk_match_table,
 	},
 	.probe = rpm_smd_clk_probe,
+	.remove = rpm_smd_clk_remove,
 };
 
 static int __init rpm_smd_clk_init(void)
diff --git a/drivers/clk/qcom/clk-voter.c b/drivers/clk/qcom/clk-voter.c
index 1ffc5e5..b8f585f 100644
--- a/drivers/clk/qcom/clk-voter.c
+++ b/drivers/clk/qcom/clk-voter.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk.h>
@@ -126,6 +126,16 @@ static unsigned long voter_clk_recalc_rate(struct clk_hw *hw,
 	return v->rate;
 }
 
+int voter_clk_handoff(struct clk_hw *hw)
+{
+	struct clk_voter *v = to_clk_voter(hw);
+
+	v->enabled = true;
+
+	return 0;
+}
+EXPORT_SYMBOL(voter_clk_handoff);
+
 const struct clk_ops clk_ops_voter = {
 	.prepare = voter_clk_prepare,
 	.unprepare = voter_clk_unprepare,
diff --git a/drivers/clk/qcom/clk-voter.h b/drivers/clk/qcom/clk-voter.h
index b9a36f6..57333bb 100644
--- a/drivers/clk/qcom/clk-voter.h
+++ b/drivers/clk/qcom/clk-voter.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_CLK_VOTER_H__
@@ -27,6 +27,7 @@ extern const struct clk_ops clk_ops_voter;
 		.hw.init = &(struct clk_init_data){			   \
 			.ops = &clk_ops_voter,				   \
 			.name = #clk_name,				   \
+			.flags = CLK_ENABLE_HAND_OFF,			   \
 			.parent_names = (const char *[]){ #_parent_name }, \
 			.num_parents = 1,				   \
 		},							   \
@@ -38,4 +39,6 @@ extern const struct clk_ops clk_ops_voter;
 #define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent_name) \
 	 __DEFINE_CLK_VOTER(clk_name, _parent_name, 1000, 1)
 
+int voter_clk_handoff(struct clk_hw *hw);
+
 #endif
diff --git a/drivers/clk/qcom/debugcc-bengal.c b/drivers/clk/qcom/debugcc-bengal.c
new file mode 100644
index 0000000..c1cb3dc
--- /dev/null
+++ b/drivers/clk/qcom/debugcc-bengal.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-debug.h"
+#include "common.h"
+
+static struct measure_clk_data debug_mux_priv = {
+	.ctl_reg = 0x62038,
+	.status_reg = 0x6203C,
+	.xo_div4_cbcr = 0x28008,
+};
+
+static const char *const cpu_cc_debug_mux_parent_names[] = {
+	"perfcl_clk",
+	"pwrcl_clk",
+};
+
+static int cpu_cc_debug_mux_sels[] = {
+	0x1,		/* perfcl_clk */
+	0x0,		/* pwrcl_clk */
+};
+
+static int apss_cc_debug_mux_pre_divs[] = {
+	0x8,		/* perfcl_clk */
+	0x4,		/* pwrcl_clk */
+};
+
+static struct clk_debug_mux cpu_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x0,
+	.post_div_offset = 0x0,
+	.cbcr_offset = U32_MAX,
+	.src_sel_mask = 0x3FF00,
+	.src_sel_shift = 8,
+	.post_div_mask = 0xF0000000,
+	.post_div_shift = 28,
+	.post_div_val = 1,
+	.mux_sels = cpu_cc_debug_mux_sels,
+	.pre_div_vals = apss_cc_debug_mux_pre_divs,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = cpu_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(cpu_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+
+static const char *const disp_cc_debug_mux_parent_names[] = {
+	"disp_cc_mdss_ahb_clk",
+	"disp_cc_mdss_byte0_clk",
+	"disp_cc_mdss_byte0_intf_clk",
+	"disp_cc_mdss_esc0_clk",
+	"disp_cc_mdss_mdp_clk",
+	"disp_cc_mdss_mdp_lut_clk",
+	"disp_cc_mdss_non_gdsc_ahb_clk",
+	"disp_cc_mdss_pclk0_clk",
+	"disp_cc_mdss_rot_clk",
+	"disp_cc_mdss_vsync_clk",
+	"disp_cc_sleep_clk",
+	"disp_cc_xo_clk",
+};
+
+static int disp_cc_debug_mux_sels[] = {
+	0x1A,		/* disp_cc_mdss_ahb_clk */
+	0x12,		/* disp_cc_mdss_byte0_clk */
+	0x13,		/* disp_cc_mdss_byte0_intf_clk */
+	0x14,		/* disp_cc_mdss_esc0_clk */
+	0xE,		/* disp_cc_mdss_mdp_clk */
+	0x10,		/* disp_cc_mdss_mdp_lut_clk */
+	0x1B,		/* disp_cc_mdss_non_gdsc_ahb_clk */
+	0xD,		/* disp_cc_mdss_pclk0_clk */
+	0xF,		/* disp_cc_mdss_rot_clk */
+	0x11,		/* disp_cc_mdss_vsync_clk */
+	0x24,		/* disp_cc_sleep_clk */
+	0x23,		/* disp_cc_xo_clk */
+};
+
+static struct clk_debug_mux disp_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x7000,
+	.post_div_offset = 0x5008,
+	.cbcr_offset = 0x500C,
+	.src_sel_mask = 0xFF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0x3,
+	.post_div_shift = 0,
+	.post_div_val = 4,
+	.mux_sels = disp_cc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = disp_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(disp_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const gcc_debug_mux_parent_names[] = {
+	"cpu_cc_debug_mux",
+	"disp_cc_debug_mux",
+	"gcc_ahb2phy_csi_clk",
+	"gcc_ahb2phy_usb_clk",
+	"gcc_apc_vs_clk",
+	"gcc_bimc_gpu_axi_clk",
+	"gcc_boot_rom_ahb_clk",
+	"gcc_cam_throttle_nrt_clk",
+	"gcc_cam_throttle_rt_clk",
+	"gcc_camera_ahb_clk",
+	"gcc_camera_xo_clk",
+	"gcc_camss_axi_clk",
+	"gcc_camss_camnoc_atb_clk",
+	"gcc_camss_camnoc_nts_xo_clk",
+	"gcc_camss_cci_0_clk",
+	"gcc_camss_cphy_0_clk",
+	"gcc_camss_cphy_1_clk",
+	"gcc_camss_cphy_2_clk",
+	"gcc_camss_csi0phytimer_clk",
+	"gcc_camss_csi1phytimer_clk",
+	"gcc_camss_csi2phytimer_clk",
+	"gcc_camss_mclk0_clk",
+	"gcc_camss_mclk1_clk",
+	"gcc_camss_mclk2_clk",
+	"gcc_camss_mclk3_clk",
+	"gcc_camss_nrt_axi_clk",
+	"gcc_camss_ope_ahb_clk",
+	"gcc_camss_ope_clk",
+	"gcc_camss_rt_axi_clk",
+	"gcc_camss_tfe_0_clk",
+	"gcc_camss_tfe_0_cphy_rx_clk",
+	"gcc_camss_tfe_0_csid_clk",
+	"gcc_camss_tfe_1_clk",
+	"gcc_camss_tfe_1_cphy_rx_clk",
+	"gcc_camss_tfe_1_csid_clk",
+	"gcc_camss_tfe_2_clk",
+	"gcc_camss_tfe_2_cphy_rx_clk",
+	"gcc_camss_tfe_2_csid_clk",
+	"gcc_camss_top_ahb_clk",
+	"gcc_cfg_noc_usb3_prim_axi_clk",
+	"gcc_cpuss_ahb_clk",
+	"gcc_cpuss_gnoc_clk",
+	"gcc_cpuss_throttle_core_clk",
+	"gcc_cpuss_throttle_xo_clk",
+	"gcc_disp_ahb_clk",
+	"gcc_disp_gpll0_div_clk_src",
+	"gcc_disp_hf_axi_clk",
+	"gcc_disp_throttle_core_clk",
+	"gcc_disp_xo_clk",
+	"gcc_gp1_clk",
+	"gcc_gp2_clk",
+	"gcc_gp3_clk",
+	"gcc_gpu_cfg_ahb_clk",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
+	"gcc_gpu_memnoc_gfx_clk",
+	"gcc_gpu_snoc_dvm_gfx_clk",
+	"gcc_gpu_throttle_core_clk",
+	"gcc_gpu_throttle_xo_clk",
+	"gcc_mss_vs_clk",
+	"gcc_pdm2_clk",
+	"gcc_pdm_ahb_clk",
+	"gcc_pdm_xo4_clk",
+	"gcc_prng_ahb_clk",
+	"gcc_qmip_camera_nrt_ahb_clk",
+	"gcc_qmip_camera_rt_ahb_clk",
+	"gcc_qmip_cpuss_cfg_ahb_clk",
+	"gcc_qmip_disp_ahb_clk",
+	"gcc_qmip_gpu_cfg_ahb_clk",
+	"gcc_qmip_video_vcodec_ahb_clk",
+	"gcc_qupv3_wrap0_core_2x_clk",
+	"gcc_qupv3_wrap0_core_clk",
+	"gcc_qupv3_wrap_0_m_ahb_clk",
+	"gcc_qupv3_wrap_0_s_ahb_clk",
+	"gcc_sdcc1_ahb_clk",
+	"gcc_sdcc1_apps_clk",
+	"gcc_sdcc1_ice_core_clk",
+	"gcc_sdcc2_ahb_clk",
+	"gcc_sdcc2_apps_clk",
+	"gcc_sys_noc_cpuss_ahb_clk",
+	"gcc_sys_noc_ufs_phy_axi_clk",
+	"gcc_sys_noc_usb3_prim_axi_clk",
+	"gcc_ufs_phy_ahb_clk",
+	"gcc_ufs_phy_axi_clk",
+	"gcc_ufs_phy_ice_core_clk",
+	"gcc_ufs_phy_phy_aux_clk",
+	"gcc_ufs_phy_rx_symbol_0_clk",
+	"gcc_ufs_phy_tx_symbol_0_clk",
+	"gcc_ufs_phy_unipro_core_clk",
+	"gcc_usb30_prim_master_clk",
+	"gcc_usb30_prim_mock_utmi_clk",
+	"gcc_usb30_prim_sleep_clk",
+	"gcc_usb3_prim_phy_com_aux_clk",
+	"gcc_usb3_prim_phy_pipe_clk",
+	"gcc_vcodec0_axi_clk",
+	"gcc_vdda_vs_clk",
+	"gcc_vddcx_vs_clk",
+	"gcc_vddmx_vs_clk",
+	"gcc_venus_ahb_clk",
+	"gcc_venus_ctl_axi_clk",
+	"gcc_video_ahb_clk",
+	"gcc_video_axi0_clk",
+	"gcc_video_throttle_core_clk",
+	"gcc_video_vcodec0_sys_clk",
+	"gcc_video_venus_ctl_clk",
+	"gcc_video_xo_clk",
+	"gcc_vs_ctrl_ahb_clk",
+	"gcc_vs_ctrl_clk",
+	"gcc_wcss_vs_clk",
+	"gpu_cc_debug_mux",
+	"mc_cc_debug_mux",
+};
+
+static int gcc_debug_mux_sels[] = {
+	0xAF,		/* cpu_cc_debug_mux */
+	0x42,		/* disp_cc_debug_mux */
+	0x63,		/* gcc_ahb2phy_csi_clk */
+	0x64,		/* gcc_ahb2phy_usb_clk */
+	0xC3,		/* gcc_apc_vs_clk */
+	0x90,		/* gcc_bimc_gpu_axi_clk */
+	0x76,		/* gcc_boot_rom_ahb_clk */
+	0x4C,		/* gcc_cam_throttle_nrt_clk */
+	0x4B,		/* gcc_cam_throttle_rt_clk */
+	0x37,		/* gcc_camera_ahb_clk */
+	0x3F,		/* gcc_camera_xo_clk */
+	0x136,		/* gcc_camss_axi_clk */
+	0x138,		/* gcc_camss_camnoc_atb_clk */
+	0x139,		/* gcc_camss_camnoc_nts_xo_clk */
+	0x134,		/* gcc_camss_cci_0_clk */
+	0x128,		/* gcc_camss_cphy_0_clk */
+	0x129,		/* gcc_camss_cphy_1_clk */
+	0x12A,		/* gcc_camss_cphy_2_clk */
+	0x11A,		/* gcc_camss_csi0phytimer_clk */
+	0x11B,		/* gcc_camss_csi1phytimer_clk */
+	0x11C,		/* gcc_camss_csi2phytimer_clk */
+	0x11D,		/* gcc_camss_mclk0_clk */
+	0x11E,		/* gcc_camss_mclk1_clk */
+	0x11F,		/* gcc_camss_mclk2_clk */
+	0x120,		/* gcc_camss_mclk3_clk */
+	0x13A,		/* gcc_camss_nrt_axi_clk */
+	0x133,		/* gcc_camss_ope_ahb_clk */
+	0x131,		/* gcc_camss_ope_clk */
+	0x13C,		/* gcc_camss_rt_axi_clk */
+	0x121,		/* gcc_camss_tfe_0_clk */
+	0x125,		/* gcc_camss_tfe_0_cphy_rx_clk */
+	0x12B,		/* gcc_camss_tfe_0_csid_clk */
+	0x122,		/* gcc_camss_tfe_1_clk */
+	0x126,		/* gcc_camss_tfe_1_cphy_rx_clk */
+	0x12D,		/* gcc_camss_tfe_1_csid_clk */
+	0x123,		/* gcc_camss_tfe_2_clk */
+	0x127,		/* gcc_camss_tfe_2_cphy_rx_clk */
+	0x12F,		/* gcc_camss_tfe_2_csid_clk */
+	0x135,		/* gcc_camss_top_ahb_clk */
+	0x1D,		/* gcc_cfg_noc_usb3_prim_axi_clk */
+	0xA9,		/* gcc_cpuss_ahb_clk */
+	0xAA,		/* gcc_cpuss_gnoc_clk */
+	0xB2,		/* gcc_cpuss_throttle_core_clk */
+	0xB1,		/* gcc_cpuss_throttle_xo_clk */
+	0x38,		/* gcc_disp_ahb_clk */
+	0x47,		/* gcc_disp_gpll0_div_clk_src */
+	0x3D,		/* gcc_disp_hf_axi_clk */
+	0x49,		/* gcc_disp_throttle_core_clk */
+	0x40,		/* gcc_disp_xo_clk */
+	0xBA,		/* gcc_gp1_clk */
+	0xBB,		/* gcc_gp2_clk */
+	0xBC,		/* gcc_gp3_clk */
+	0xE5,		/* gcc_gpu_cfg_ahb_clk */
+	0xEB,		/* gcc_gpu_gpll0_clk_src */
+	0xEC,		/* gcc_gpu_gpll0_div_clk_src */
+	0xE8,		/* gcc_gpu_memnoc_gfx_clk */
+	0xEA,		/* gcc_gpu_snoc_dvm_gfx_clk */
+	0xEF,		/* gcc_gpu_throttle_core_clk */
+	0xEE,		/* gcc_gpu_throttle_xo_clk */
+	0xC2,		/* gcc_mss_vs_clk */
+	0x73,		/* gcc_pdm2_clk */
+	0x71,		/* gcc_pdm_ahb_clk */
+	0x72,		/* gcc_pdm_xo4_clk */
+	0x74,		/* gcc_prng_ahb_clk */
+	0x3A,		/* gcc_qmip_camera_nrt_ahb_clk */
+	0x48,		/* gcc_qmip_camera_rt_ahb_clk */
+	0xB0,		/* gcc_qmip_cpuss_cfg_ahb_clk */
+	0x3B,		/* gcc_qmip_disp_ahb_clk */
+	0xED,		/* gcc_qmip_gpu_cfg_ahb_clk */
+	0x39,		/* gcc_qmip_video_vcodec_ahb_clk */
+	0x6A,		/* gcc_qupv3_wrap0_core_2x_clk */
+	0x69,		/* gcc_qupv3_wrap0_core_clk */
+	0x67,		/* gcc_qupv3_wrap_0_m_ahb_clk */
+	0x68,		/* gcc_qupv3_wrap_0_s_ahb_clk */
+	0xF3,		/* gcc_sdcc1_ahb_clk */
+	0xF2,		/* gcc_sdcc1_apps_clk */
+	0xF4,		/* gcc_sdcc1_ice_core_clk */
+	0x66,		/* gcc_sdcc2_ahb_clk */
+	0x65,		/* gcc_sdcc2_apps_clk */
+	0x9,		/* gcc_sys_noc_cpuss_ahb_clk */
+	0x19,		/* gcc_sys_noc_ufs_phy_axi_clk */
+	0x18,		/* gcc_sys_noc_usb3_prim_axi_clk */
+	0x111,		/* gcc_ufs_phy_ahb_clk */
+	0x110,		/* gcc_ufs_phy_axi_clk */
+	0x117,		/* gcc_ufs_phy_ice_core_clk */
+	0x118,		/* gcc_ufs_phy_phy_aux_clk */
+	0x113,		/* gcc_ufs_phy_rx_symbol_0_clk */
+	0x112,		/* gcc_ufs_phy_tx_symbol_0_clk */
+	0x116,		/* gcc_ufs_phy_unipro_core_clk */
+	0x5C,		/* gcc_usb30_prim_master_clk */
+	0x5E,		/* gcc_usb30_prim_mock_utmi_clk */
+	0x5D,		/* gcc_usb30_prim_sleep_clk */
+	0x5F,		/* gcc_usb3_prim_phy_com_aux_clk */
+	0x60,		/* gcc_usb3_prim_phy_pipe_clk */
+	0x142,		/* gcc_vcodec0_axi_clk */
+	0xBF,		/* gcc_vdda_vs_clk */
+	0xBD,		/* gcc_vddcx_vs_clk */
+	0xBE,		/* gcc_vddmx_vs_clk */
+	0x143,		/* gcc_venus_ahb_clk */
+	0x141,		/* gcc_venus_ctl_axi_clk */
+	0x36,		/* gcc_video_ahb_clk */
+	0x3C,		/* gcc_video_axi0_clk */
+	0x4A,		/* gcc_video_throttle_core_clk */
+	0x13F,		/* gcc_video_vcodec0_sys_clk */
+	0x13D,		/* gcc_video_venus_ctl_clk */
+	0x3E,		/* gcc_video_xo_clk */
+	0xC1,		/* gcc_vs_ctrl_ahb_clk */
+	0xC0,		/* gcc_vs_ctrl_clk */
+	0xC4,		/* gcc_wcss_vs_clk */
+	0xE7,		/* gpu_cc_debug_mux */
+	0x9E,           /* mc_cc_debug_mux */
+};
+
+static struct clk_debug_mux gcc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x62000,
+	.post_div_offset = 0x30000,
+	.cbcr_offset = 0x30004,
+	.src_sel_mask = 0x3FF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0xF,
+	.post_div_shift = 0,
+	.post_div_val = 1,
+	.mux_sels = gcc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "gcc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = gcc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const gpu_cc_debug_mux_parent_names[] = {
+	"gpu_cc_ahb_clk",
+	"gpu_cc_crc_ahb_clk",
+	"gpu_cc_cx_gfx3d_clk",
+	"gpu_cc_cx_gfx3d_slv_clk",
+	"gpu_cc_cx_gmu_clk",
+	"gpu_cc_cx_snoc_dvm_clk",
+	"gpu_cc_cxo_aon_clk",
+	"gpu_cc_cxo_clk",
+	"gpu_cc_gx_cxo_clk",
+	"gpu_cc_gx_gfx3d_clk",
+	"gpu_cc_sleep_clk",
+};
+
+static int gpu_cc_debug_mux_sels[] = {
+	0x10,		/* gpu_cc_ahb_clk */
+	0x11,		/* gpu_cc_crc_ahb_clk */
+	0x1A,		/* gpu_cc_cx_gfx3d_clk */
+	0x1B,		/* gpu_cc_cx_gfx3d_slv_clk */
+	0x18,		/* gpu_cc_cx_gmu_clk */
+	0x15,		/* gpu_cc_cx_snoc_dvm_clk */
+	0xA,		/* gpu_cc_cxo_aon_clk */
+	0x19,		/* gpu_cc_cxo_clk */
+	0xE,		/* gpu_cc_gx_cxo_clk */
+	0xB,		/* gpu_cc_gx_gfx3d_clk */
+	0x16,		/* gpu_cc_sleep_clk */
+};
+
+static struct clk_debug_mux gpu_cc_debug_mux = {
+	.priv = &debug_mux_priv,
+	.debug_offset = 0x1568,
+	.post_div_offset = 0x10FC,
+	.cbcr_offset = 0x1100,
+	.src_sel_mask = 0xFF,
+	.src_sel_shift = 0,
+	.post_div_mask = 0x3,
+	.post_div_shift = 0,
+	.post_div_val = 2,
+	.mux_sels = gpu_cc_debug_mux_sels,
+	.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = gpu_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(gpu_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static const char *const mc_cc_debug_mux_parent_names[] = {
+	"measure_only_mccc_clk",
+};
+
+static struct clk_debug_mux mc_cc_debug_mux = {
+	.period_offset = 0x50,
+	.hw.init = &(struct clk_init_data){
+		.name = "mc_cc_debug_mux",
+		.ops = &clk_debug_mux_ops,
+		.parent_names = mc_cc_debug_mux_parent_names,
+		.num_parents = ARRAY_SIZE(mc_cc_debug_mux_parent_names),
+		.flags = CLK_IS_MEASURE,
+	},
+};
+
+static struct mux_regmap_names mux_list[] = {
+	{ .mux = &cpu_cc_debug_mux, .regmap_name = "qcom,cpucc" },
+	{ .mux = &disp_cc_debug_mux, .regmap_name = "qcom,dispcc" },
+	{ .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" },
+	{ .mux = &gpu_cc_debug_mux, .regmap_name = "qcom,gpucc" },
+	{ .mux = &mc_cc_debug_mux, .regmap_name = "qcom,mccc" },
+};
+
+static struct clk_dummy measure_only_mccc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_mccc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy perfcl_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "perfcl_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy pwrcl_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "pwrcl_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+struct clk_hw *debugcc_bengal_hws[] = {
+	&measure_only_mccc_clk.hw,
+	&perfcl_clk.hw,
+	&pwrcl_clk.hw,
+};
+
+static const struct of_device_id clk_debug_match_table[] = {
+	{ .compatible = "qcom,bengal-debugcc" },
+	{ }
+};
+
+static int clk_debug_bengal_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	int i, ret;
+
+	BUILD_BUG_ON(ARRAY_SIZE(disp_cc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(disp_cc_debug_mux_sels));
+	BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(gcc_debug_mux_sels));
+	BUILD_BUG_ON(ARRAY_SIZE(gpu_cc_debug_mux_parent_names) !=
+		     ARRAY_SIZE(gpu_cc_debug_mux_sels));
+
+	clk = devm_clk_get(&pdev->dev, "xo_clk_src");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo clock\n");
+		return PTR_ERR(clk);
+	}
+
+	debug_mux_priv.cxo = clk;
+
+	for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
+		ret = map_debug_bases(pdev, mux_list[i].regmap_name,
+				      mux_list[i].mux);
+		if (ret == -EBADR)
+			continue;
+		else if (ret)
+			return ret;
+
+		clk = devm_clk_register(&pdev->dev, &mux_list[i].mux->hw);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n",
+				mux_list[i].mux->hw.init->name, PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugcc_bengal_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, debugcc_bengal_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n",
+			debugcc_bengal_hws[i]->init->name, PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+	}
+
+	ret = clk_debug_measure_register(&gcc_debug_mux.hw);
+	if (ret)
+		dev_err(&pdev->dev, "Could not register Measure clock\n");
+
+	return ret;
+}
+
+static struct platform_driver clk_debug_driver = {
+	.probe = clk_debug_bengal_probe,
+	.driver = {
+		.name = "debugcc-bengal",
+		.of_match_table = clk_debug_match_table,
+	},
+};
+
+int __init clk_debug_bengal_init(void)
+{
+	return platform_driver_register(&clk_debug_driver);
+}
+fs_initcall(clk_debug_bengal_init);
+
+MODULE_DESCRIPTION("QTI DEBUG CC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
index 4f48c64..8c1d82c 100644
--- a/drivers/clk/qcom/dispcc-bengal.c
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -78,7 +78,7 @@ static const struct parent_map disp_cc_parent_map_3[] = {
 
 static const char * const disp_cc_parent_names_3[] = {
 	"bi_tcxo",
-	"gpll0_out_main",
+	"gcc_disp_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -118,7 +118,9 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
 	.vco_val = 0x2 << 20,
 	.vco_mask = GENMASK(21, 20),
 	.main_output_mask = BIT(0),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 static struct clk_alpha_pll disp_cc_pll0 = {
diff --git a/drivers/clk/qcom/dispcc-kona.c b/drivers/clk/qcom/dispcc-kona.c
index 7ce8f52..5eb750c 100644
--- a/drivers/clk/qcom/dispcc-kona.c
+++ b/drivers/clk/qcom/dispcc-kona.c
@@ -31,7 +31,7 @@
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
 
-static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM_MM, 1, vdd_corner);
 
 #define MSM_BUS_VECTOR(_src, _dst, _ab, _ib)	\
 {						\
@@ -282,7 +282,7 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
 		.parent_names =
 			(const char *[]){ "disp_cc_mdss_byte0_clk_src" },
 		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
+		.ops = &clk_regmap_div_ops,
 	},
 };
 
@@ -296,7 +296,7 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
 		.parent_names =
 			(const char *[]){ "disp_cc_mdss_byte1_clk_src" },
 		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
+		.ops = &clk_regmap_div_ops,
 	},
 };
 
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index 35a7d34..0d4f4b7 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -25,7 +25,7 @@
 #include "common.h"
 #include "gdsc.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
index b2cad01..ba18207 100644
--- a/drivers/clk/qcom/gcc-bengal.c
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -445,7 +445,9 @@ static const struct alpha_pll_config gpll10_config = {
 	.vco_val = 0x1 << 20,
 	.vco_mask = GENMASK(21, 20),
 	.main_output_mask = BIT(0),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 static struct clk_alpha_pll gpll10 = {
@@ -499,7 +501,9 @@ static const struct alpha_pll_config gpll11_config = {
 	.alpha_en_mask = BIT(24),
 	.vco_val = 0x2 << 20,
 	.vco_mask = GENMASK(21, 20),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 static struct clk_alpha_pll gpll11 = {
@@ -697,7 +701,7 @@ static struct clk_alpha_pll_postdiv gpll7_out_main = {
 /* 800MHz configuration */
 static const struct alpha_pll_config gpll8_config = {
 	.l = 0x29,
-	.alpha = 0xAA000000,
+	.alpha = 0xAAAAAAAA,
 	.alpha_hi = 0xAA,
 	.alpha_en_mask = BIT(24),
 	.vco_val = 0x2 << 20,
@@ -706,7 +710,9 @@ static const struct alpha_pll_config gpll8_config = {
 	.early_output_mask = BIT(3),
 	.post_div_val = 0x1 << 8,
 	.post_div_mask = GENMASK(11, 8),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 static struct clk_alpha_pll gpll8 = {
@@ -760,7 +766,9 @@ static const struct alpha_pll_config gpll9_config = {
 	.post_div_val = 0x1 << 8,
 	.post_div_mask = GENMASK(9, 8),
 	.main_output_mask = BIT(0),
-	.config_ctl_val = 0x000040C9,
+	.config_ctl_val = 0x00004289,
+	.test_ctl_mask = GENMASK(31, 0),
+	.test_ctl_val = 0x08000000,
 };
 
 static struct clk_alpha_pll gpll9 = {
@@ -1102,7 +1110,19 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(128000000, P_GPLL10_OUT_MAIN, 9, 0, 0),
+	F(135529412, P_GPLL10_OUT_MAIN, 8.5, 0, 0),
+	F(144000000, P_GPLL10_OUT_MAIN, 8, 0, 0),
+	F(153600000, P_GPLL10_OUT_MAIN, 7.5, 0, 0),
+	F(164571429, P_GPLL10_OUT_MAIN, 7, 0, 0),
+	F(177230769, P_GPLL10_OUT_MAIN, 6.5, 0, 0),
+	F(192000000, P_GPLL10_OUT_MAIN, 6, 0, 0),
+	F(209454545, P_GPLL10_OUT_MAIN, 5.5, 0, 0),
+	F(230400000, P_GPLL10_OUT_MAIN, 5, 0, 0),
 	F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+	F(288000000, P_GPLL10_OUT_MAIN, 4, 0, 0),
+	F(329142857, P_GPLL10_OUT_MAIN, 3.5, 0, 0),
+	F(384000000, P_GPLL10_OUT_MAIN, 3, 0, 0),
 	F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
 	F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
 	{ }
@@ -1133,6 +1153,8 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(120000000, P_GPLL0_OUT_EARLY, 5, 0, 0),
+	F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
 	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
 	F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
 	F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
@@ -1834,7 +1856,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
-	F(133000000, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+	F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
 	F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
 	F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
 	F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
@@ -2568,6 +2590,19 @@ static struct clk_branch gcc_disp_ahb_clk = {
 	},
 };
 
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+	.reg = 0x17058,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "gcc_disp_gpll0_clk_src",
+		.parent_names =
+			(const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
 static struct clk_branch gcc_disp_gpll0_div_clk_src = {
 	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
@@ -2576,7 +2611,7 @@ static struct clk_branch gcc_disp_gpll0_div_clk_src = {
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_disp_gpll0_div_clk_src",
 			.parent_names = (const char *[]){
-				"gpll0",
+				"gcc_disp_gpll0_clk_src",
 			},
 			.num_parents = 1,
 			.flags = CLK_SET_RATE_PARENT,
@@ -3253,6 +3288,19 @@ static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
 	},
 };
 
+static struct clk_branch gcc_ufs_clkref_clk = {
+	.halt_reg = 0x8c000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_phy_ahb_clk = {
 	.halt_reg = 0x45014,
 	.halt_check = BRANCH_HALT,
@@ -3666,6 +3714,7 @@ static struct clk_regmap *gcc_bengal_clocks[] = {
 	[GCC_CPUSS_THROTTLE_CORE_CLK] = &gcc_cpuss_throttle_core_clk.clkr,
 	[GCC_CPUSS_THROTTLE_XO_CLK] = &gcc_cpuss_throttle_xo_clk.clkr,
 	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+	[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
 	[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
 	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
 	[GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
@@ -3722,6 +3771,7 @@ static struct clk_regmap *gcc_bengal_clocks[] = {
 	[GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
 	[GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
 	[GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+	[GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
 	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
@@ -3782,6 +3832,8 @@ static const struct qcom_reset_map gcc_bengal_resets[] = {
 	[GCC_UFS_PHY_BCR] = { 0x45000 },
 	[GCC_USB30_PRIM_BCR] = { 0x1a000 },
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+	[GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 },
+	[GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
 	[GCC_VCODEC0_BCR] = { 0x58094 },
 	[GCC_VENUS_BCR] = { 0x58078 },
 	[GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
diff --git a/drivers/clk/qcom/gcc-kona.c b/drivers/clk/qcom/gcc-kona.c
index 87bd776..f786292 100644
--- a/drivers/clk/qcom/gcc-kona.c
+++ b/drivers/clk/qcom/gcc-kona.c
@@ -35,7 +35,7 @@
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner);
-static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM_MM, 1, vdd_corner);
 
 enum {
 	P_BI_TCXO,
@@ -292,6 +292,8 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_ahb_clk_src",
 		.parent_names = gcc_parent_names_0_ao,
@@ -322,6 +324,8 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_1,
 	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_gp1_clk_src",
 		.parent_names = gcc_parent_names_1,
@@ -344,6 +348,8 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_1,
 	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_gp2_clk_src",
 		.parent_names = gcc_parent_names_1,
@@ -366,6 +372,8 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_1,
 	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_gp3_clk_src",
 		.parent_names = gcc_parent_names_1,
@@ -394,6 +402,8 @@ static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_2,
 	.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_pcie_0_aux_clk_src",
 		.parent_names = gcc_parent_names_2,
@@ -413,6 +423,8 @@ static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_2,
 	.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_pcie_1_aux_clk_src",
 		.parent_names = gcc_parent_names_2,
@@ -432,6 +444,8 @@ static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_2,
 	.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_pcie_2_aux_clk_src",
 		.parent_names = gcc_parent_names_2,
@@ -457,6 +471,8 @@ static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_pcie_phy_refgen_clk_src",
 		.parent_names = gcc_parent_names_0_ao,
@@ -484,6 +500,8 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_pdm2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_pdm2_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -539,6 +557,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
 };
 
@@ -562,6 +582,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
 };
 
@@ -601,6 +623,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
 };
 
@@ -624,6 +648,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
 };
 
@@ -647,6 +673,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
 };
 
@@ -670,6 +698,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
 };
 
@@ -693,6 +723,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
 };
 
@@ -716,6 +748,8 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
 };
 
@@ -739,6 +773,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
 };
 
@@ -762,6 +798,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
 };
 
@@ -785,6 +823,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
 };
 
@@ -808,6 +848,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
 };
 
@@ -831,6 +873,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
 };
 
@@ -854,6 +898,8 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
 };
 
@@ -877,6 +923,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
 };
 
@@ -900,6 +948,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
 };
 
@@ -923,6 +973,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
 };
 
@@ -946,6 +998,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
 };
 
@@ -969,6 +1023,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
 };
 
@@ -992,6 +1048,8 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
 };
 
@@ -1011,6 +1069,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_4,
 	.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_sdcc2_apps_clk_src",
 		.parent_names = gcc_parent_names_4,
@@ -1042,6 +1102,8 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_sdcc4_apps_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1068,6 +1130,8 @@ static struct clk_rcg2 gcc_tsif_ref_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_5,
 	.freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_tsif_ref_clk_src",
 		.parent_names = gcc_parent_names_5,
@@ -1318,6 +1382,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb30_prim_master_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1341,6 +1406,8 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb30_prim_mock_utmi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1361,6 +1428,7 @@ static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
 	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb30_sec_master_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1384,6 +1452,8 @@ static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb30_sec_mock_utmi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1403,6 +1473,8 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_2,
 	.freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb3_prim_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_2,
@@ -1422,6 +1494,8 @@ static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_2,
 	.freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+	.enable_safe_config = true,
+	.flags = HW_CLK_CTRL_MODE,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_usb3_sec_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_2,
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index f5ec81f..2d420ef9 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 #define GCC_NPU_MISC				0x4d110
 #define GCC_GPU_MISC				0x71028
@@ -1366,13 +1366,12 @@ static struct clk_branch gcc_npu_axi_clk = {
 
 static struct clk_branch gcc_npu_bwmon2_axi_clk = {
 	.halt_reg = 0x7000c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x7000c,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_npu_bwmon2_axi_clk",
-			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1380,13 +1379,12 @@ static struct clk_branch gcc_npu_bwmon2_axi_clk = {
 
 static struct clk_branch gcc_npu_bwmon_axi_clk = {
 	.halt_reg = 0x70008,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x70008,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_npu_bwmon_axi_clk",
-			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 3bf11a6..ada3e4a 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -647,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
 		.name = "gcc_sdcc2_apps_clk_src",
 		.parent_names = gcc_parent_names_10,
 		.num_parents = 5,
-		.ops = &clk_rcg2_ops,
+		.ops = &clk_rcg2_floor_ops,
 	},
 };
 
@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
 		.name = "gcc_sdcc4_apps_clk_src",
 		.parent_names = gcc_parent_names_0,
 		.num_parents = 4,
-		.ops = &clk_rcg2_ops,
+		.ops = &clk_rcg2_floor_ops,
 	},
 };
 
diff --git a/drivers/clk/qcom/gpucc-bengal.c b/drivers/clk/qcom/gpucc-bengal.c
index 91f62b2..939568d0 100644
--- a/drivers/clk/qcom/gpucc-bengal.c
+++ b/drivers/clk/qcom/gpucc-bengal.c
@@ -53,8 +53,8 @@ static const char * const gpu_cc_parent_names_0[] = {
 	"bi_tcxo",
 	"gpu_cc_pll0_out_main",
 	"gpu_cc_pll1_out_main",
-	"gpll0_out_main",
-	"gpll0_out_main_div",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -74,7 +74,7 @@ static const char * const gpu_cc_parent_names_1[] = {
 	"gpu_cc_pll0_out_aux2",
 	"gpu_cc_pll1_out_aux",
 	"gpu_cc_pll1_out_aux2",
-	"gpll0_out_main",
+	"gpll0",
 	"core_bi_pll_test_se",
 };
 
@@ -93,7 +93,9 @@ static const struct alpha_pll_config gpu_cc_pll0_config = {
 	.main_output_mask = BIT(0),
 	.aux_output_mask = BIT(1),
 	.aux2_output_mask = BIT(2),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 /* 532MHz configuration */
@@ -149,7 +151,9 @@ static const struct alpha_pll_config gpu_cc_pll1_config = {
 	.vco_mask = GENMASK(21, 20),
 	.main_output_mask = BIT(0),
 	.aux_output_mask = BIT(1),
-	.config_ctl_val = 0x40008529,
+	.config_ctl_val = 0x4001055b,
+	.test_ctl_hi1_val = 0x1,
+	.test_ctl_hi_mask = 0x1,
 };
 
 static struct clk_alpha_pll gpu_cc_pll1 = {
@@ -195,7 +199,7 @@ static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
 };
 
 static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
-	F(200000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
 	{ }
 };
 
@@ -360,12 +364,13 @@ static struct clk_branch gpu_cc_cxo_clk = {
 
 static struct clk_branch gpu_cc_gx_cxo_clk = {
 	.halt_reg = 0x1060,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x1060,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_gx_cxo_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
index 310d4bb..156fe4f 100644
--- a/drivers/clk/qcom/gpucc-lito.c
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/npucc-kona.c b/drivers/clk/qcom/npucc-kona.c
index 8f16096..7972284 100644
--- a/drivers/clk/qcom/npucc-kona.c
+++ b/drivers/clk/qcom/npucc-kona.c
@@ -120,12 +120,12 @@ static const u32 crc_reg_val[] = {
 };
 
 static struct alpha_pll_config npu_cc_pll0_config = {
-	.l = 0x14,
+	.l = 0x1F,
 	.cal_l = 0x44,
-	.alpha = 0xD555,
+	.alpha = 0x4000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
@@ -184,7 +184,7 @@ static struct alpha_pll_config npu_cc_pll1_config = {
 	.alpha = 0x2000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
@@ -235,12 +235,12 @@ static struct clk_alpha_pll_postdiv npu_cc_pll1_out_even = {
 };
 
 static struct alpha_pll_config npu_q6ss_pll_config = {
-	.l = 0xD,
+	.l = 0xF,
 	.cal_l = 0x44,
-	.alpha = 0x555,
+	.alpha = 0xA000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
-	.config_ctl_hi1_val = 0x029A699C,
+	.config_ctl_hi1_val = 0x329A699C,
 	.user_ctl_val = 0x00000000,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
@@ -282,7 +282,6 @@ static struct clk_fixed_factor npu_cc_crc_div = {
 };
 
 static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
-	F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(466000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(533000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
@@ -291,6 +290,16 @@ static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src_kona_v2[] = {
+	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(406000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(533000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(730000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(920000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 npu_cc_cal_hm1_clk_src = {
 	.cmd_rcgr = 0x1140,
 	.mnd_width = 0,
@@ -308,7 +317,6 @@ static struct clk_rcg2 npu_cc_cal_hm1_clk_src = {
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 200000000,
 			[VDD_LOWER] = 300000000,
 			[VDD_LOW] = 466000000,
 			[VDD_LOW_L1] = 533000000,
@@ -335,7 +343,6 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 			.vdd_class = &vdd_cx,
 			.num_rate_max = VDD_NUM,
 			.rate_max = (unsigned long[VDD_NUM]) {
-				[VDD_MIN] = 200000000,
 				[VDD_LOWER] = 300000000,
 				[VDD_LOW] = 466000000,
 				[VDD_LOW_L1] = 533000000,
@@ -352,7 +359,6 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 };
 
 static const struct freq_tbl ftbl_npu_cc_core_clk_src[] = {
-	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
 	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
 	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
 	F(333333333, P_NPU_CC_PLL1_OUT_EVEN, 4.5, 0, 0),
@@ -378,7 +384,6 @@ static struct clk_rcg2 npu_cc_core_clk_src = {
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 60000000,
 			[VDD_LOWER] = 100000000,
 			[VDD_LOW] = 200000000,
 			[VDD_LOW_L1] = 333333333,
@@ -388,7 +393,6 @@ static struct clk_rcg2 npu_cc_core_clk_src = {
 };
 
 static const struct freq_tbl ftbl_npu_cc_lmh_clk_src[] = {
-	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
 	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
 	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
 	F(214285714, P_NPU_CC_PLL1_OUT_EVEN, 7, 0, 0),
@@ -413,7 +417,6 @@ static struct clk_rcg2 npu_cc_lmh_clk_src = {
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 60000000,
 			[VDD_LOWER] = 100000000,
 			[VDD_LOW] = 200000000,
 			[VDD_LOW_L1] = 214285714,
@@ -441,7 +444,6 @@ static struct clk_rcg2 npu_cc_xo_clk_src = {
 };
 
 static const struct freq_tbl ftbl_npu_dsp_core_clk_src[] = {
-	F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
 	F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
 	F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
 	F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
@@ -467,7 +469,6 @@ static struct clk_rcg2 npu_dsp_core_clk_src = {
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 250000000,
 			[VDD_LOWER] = 300000000,
 			[VDD_LOW] = 400000000,
 			[VDD_LOW_L1] = 500000000,
@@ -1168,10 +1169,45 @@ static const struct qcom_cc_desc npu_qdsp6ss_pll_kona_desc = {
 
 static const struct of_device_id npu_cc_kona_match_table[] = {
 	{ .compatible = "qcom,npucc-kona" },
+	{ .compatible = "qcom,npucc-kona-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, npu_cc_kona_match_table);
 
+static void npu_cc_kona_fixup_konav2(struct regmap *regmap)
+{
+	npu_cc_cal_hm0_clk_src.freq_tbl = ftbl_npu_cc_cal_hm0_clk_src_kona_v2;
+	npu_cc_cal_hm0_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 406000000;
+	npu_cc_cal_hm0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 730000000;
+	npu_cc_cal_hm0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL_L1] =
+		850000000;
+	npu_cc_cal_hm0_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 920000000;
+	npu_cc_cal_hm0_clk_src.clkr.hw.init->rate_max[VDD_HIGH_L1] = 1000000000;
+	npu_cc_cal_hm1_clk_src.freq_tbl = ftbl_npu_cc_cal_hm0_clk_src_kona_v2;
+	npu_cc_cal_hm1_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 406000000;
+	npu_cc_cal_hm1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 730000000;
+	npu_cc_cal_hm1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL_L1] =
+		850000000;
+	npu_cc_cal_hm1_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 920000000;
+	npu_cc_cal_hm1_clk_src.clkr.hw.init->rate_max[VDD_HIGH_L1] = 1000000000;
+}
+
+static int npu_cc_kona_fixup(struct platform_device *pdev,
+	struct regmap *regmap)
+{
+	const char *compat = NULL;
+	int compatlen = 0;
+
+	compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+	if (!compat || (compatlen <= 0))
+		return -EINVAL;
+
+	if (!strcmp(compat, "qcom,npucc-kona-v2"))
+		npu_cc_kona_fixup_konav2(regmap);
+
+	return 0;
+}
+
 static int npu_clocks_kona_probe(struct platform_device *pdev,
 				 const struct qcom_cc_desc *desc)
 {
@@ -1203,6 +1239,10 @@ static int npu_clocks_kona_probe(struct platform_device *pdev,
 				ret);
 			return ret;
 		}
+
+		ret = npu_cc_kona_fixup(pdev, regmap);
+		if (ret)
+			return ret;
 	} else if (!strcmp("qdsp6ss_pll", desc->config->name)) {
 		clk_lucid_pll_configure(&npu_q6ss_pll, regmap,
 					&npu_q6ss_pll_config);
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
index e2604f5..5a56ca9 100644
--- a/drivers/clk/qcom/npucc-lito.c
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -26,7 +26,7 @@
 #include "clk-regmap.h"
 #include "common.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
@@ -283,10 +283,11 @@ static struct clk_fixed_factor npu_cc_crc_div = {
 
 static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
 	F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(518400000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(633600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(825600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(230000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(422000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(557000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(729000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(844000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	{ }
 };
@@ -308,11 +309,12 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
 			[VDD_MIN] = 200000000,
-			[VDD_LOWER] = 300000000,
-			[VDD_LOW] = 518400000,
-			[VDD_LOW_L1] = 633600000,
-			[VDD_NOMINAL] = 825600000,
-			[VDD_HIGH] = 1000000000},
+			[VDD_LOWER] = 230000000,
+			[VDD_LOW] = 422000000,
+			[VDD_LOW_L1] = 557000000,
+			[VDD_NOMINAL] = 729000000,
+			[VDD_HIGH] = 844000000,
+			[VDD_HIGH_L1] = 1000000000},
 	},
 };
 
diff --git a/drivers/clk/qcom/vdd-level-lito.h b/drivers/clk/qcom/vdd-level-lito.h
new file mode 100644
index 0000000..b968de1
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-lito.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+enum vdd_levels {
+	VDD_NONE,
+	VDD_MIN,		/* MIN SVS */
+	VDD_LOWER,		/* SVS2 */
+	VDD_LOW,		/* SVS */
+	VDD_LOW_L1,		/* SVSL1 */
+	VDD_NOMINAL,		/* NOM */
+	VDD_HIGH,		/* TURBO */
+	VDD_HIGH_L1,		/* TURBO_L1 */
+	VDD_NUM,
+};
+
+static int vdd_corner[] = {
+	[VDD_NONE]    = 0,
+	[VDD_MIN]     = RPMH_REGULATOR_LEVEL_MIN_SVS,
+	[VDD_LOWER]   = RPMH_REGULATOR_LEVEL_LOW_SVS,
+	[VDD_LOW]     = RPMH_REGULATOR_LEVEL_SVS,
+	[VDD_LOW_L1]  = RPMH_REGULATOR_LEVEL_SVS_L1,
+	[VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+	[VDD_HIGH]    = RPMH_REGULATOR_LEVEL_TURBO,
+	[VDD_HIGH_L1] = RPMH_REGULATOR_LEVEL_TURBO_L1,
+};
+
+#endif
diff --git a/drivers/clk/qcom/vdd-level.h b/drivers/clk/qcom/vdd-level.h
index 17b3a0f..3c0595a 100644
--- a/drivers/clk/qcom/vdd-level.h
+++ b/drivers/clk/qcom/vdd-level.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_H
@@ -16,7 +16,10 @@ enum vdd_levels {
 	VDD_LOW,		/* SVS */
 	VDD_LOW_L1,		/* SVSL1 */
 	VDD_NOMINAL,		/* NOM */
+	VDD_NOMINAL_L1,		/* NOM L1 */
 	VDD_HIGH,		/* TURBO */
+	VDD_HIGH_L1,		/* TURBO L1 */
+	VDD_NUM_MM = VDD_HIGH_L1,
 	VDD_NUM,
 };
 
@@ -27,7 +30,9 @@ static int vdd_corner[] = {
 	[VDD_LOW]     = RPMH_REGULATOR_LEVEL_SVS,
 	[VDD_LOW_L1]  = RPMH_REGULATOR_LEVEL_SVS_L1,
 	[VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+	[VDD_NOMINAL_L1] = RPMH_REGULATOR_LEVEL_NOM_L1,
 	[VDD_HIGH]    = RPMH_REGULATOR_LEVEL_TURBO,
+	[VDD_HIGH_L1]    = RPMH_REGULATOR_LEVEL_TURBO_L1,
 };
 
 #endif
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index a18ba3b..4616556 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -37,7 +37,7 @@
 	.ib = _ib,				\
 }
 
-static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM_MM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
 
 static struct msm_bus_vectors clk_debugfs_vectors[] = {
diff --git a/drivers/clk/qcom/videocc-lito.c b/drivers/clk/qcom/videocc-lito.c
index 6619bb4..03b63f8 100644
--- a/drivers/clk/qcom/videocc-lito.c
+++ b/drivers/clk/qcom/videocc-lito.c
@@ -28,7 +28,7 @@
 #include "common.h"
 #include "gdsc.h"
 #include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index e82adcb..45d94fb 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
 		return;
 
 	pd->name = np->name;
-	pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+	pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+		    GENPD_FLAG_ACTIVE_WAKEUP;
 	pd->attach_dev = cpg_mstp_attach_dev;
 	pd->detach_dev = cpg_mstp_detach_dev;
 	pm_genpd_init(pd, &pm_domain_always_on_gov, false);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f4b013e..d7a2ad6 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
 
 	genpd = &pd->genpd;
 	genpd->name = np->name;
-	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+		       GENPD_FLAG_ACTIVE_WAKEUP;
 	genpd->attach_dev = cpg_mssr_attach_dev;
 	genpd->detach_dev = cpg_mssr_detach_dev;
 	pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
@@ -535,17 +536,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
 	/* Reset module */
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 
 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
 	udelay(35);
@@ -562,16 +557,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 	return 0;
 }
 
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 026a26b..dbec842 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
 	u32 delay_num = 0;
 
 	/* See the comment for rockchip_mmc_set_phase below */
-	if (!rate) {
-		pr_err("%s: invalid clk rate\n", __func__);
+	if (!rate)
 		return -EINVAL;
-	}
 
 	raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
 
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index d8f9efa..25351d6 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
 {
 	struct clk_dmn *clk = to_dmnclk(hw);
 	u32 cfg = clkc_readl(clk->regofs);
+	const char *name = clk_hw_get_name(hw);
 
 	/* parent of io domain can only be pll3 */
-	if (strcmp(hw->init->name, "io") == 0)
+	if (strcmp(name, "io") == 0)
 		return 4;
 
 	WARN_ON((cfg & (BIT(3) - 1)) > 4);
@@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
 {
 	struct clk_dmn *clk = to_dmnclk(hw);
 	u32 cfg = clkc_readl(clk->regofs);
+	const char *name = clk_hw_get_name(hw);
 
 	/* parent of io domain can only be pll3 */
-	if (strcmp(hw->init->name, "io") == 0)
+	if (strcmp(name, "io") == 0)
 		return -EINVAL;
 
 	cfg &= ~(BIT(3) - 1);
@@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 {
 	unsigned long fin;
 	unsigned ratio, wait, hold;
-	unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+	const char *name = clk_hw_get_name(hw);
+	unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
 
 	fin = *parent_rate;
 	ratio = fin / rate;
@@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 	struct clk_dmn *clk = to_dmnclk(hw);
 	unsigned long fin;
 	unsigned ratio, wait, hold, reg;
-	unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+	const char *name = clk_hw_get_name(hw);
+	unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
 
 	fin = parent_rate;
 	ratio = fin / rate;
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b..e7c877d 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
 	if (socfpgaclk->fixed_div) {
 		div = socfpgaclk->fixed_div;
 	} else {
-		if (!socfpgaclk->bypass_reg)
+		if (socfpgaclk->hw.reg)
 			div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
 	}
 
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 8789247..bad8099 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -2,6 +2,7 @@
 	tristate "Clock support for Spreadtrum SoCs"
 	depends on ARCH_SPRD || COMPILE_TEST
 	default ARCH_SPRD
+	select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index e038b044..8bdab1c 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
 	struct clk_hw *hw;
 
 	for (i = 0; i < clkhw->num; i++) {
+		const char *name;
 
 		hw = clkhw->hws[i];
-
 		if (!hw)
 			continue;
 
+		name = hw->init->name;
 		ret = devm_clk_hw_register(dev, hw);
 		if (ret) {
 			dev_err(dev, "Couldn't register clock %d - %s\n",
-				i, hw->init->name);
+				i, name);
 			return ret;
 		}
 	}
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 36b4402..640270f 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
 					 k2 + refin * nint * CLK_PLL_1M;
 	}
 
+	kfree(cfg);
 	return rate;
 }
 
@@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
 	if (!ret)
 		udelay(pll->udelay);
 
+	kfree(cfg);
 	return ret;
 }
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index ac12f26..9e3f408 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
 		[CLK_MMC1]		= &mmc1_clk.common.hw,
 		[CLK_MMC1_SAMPLE]	= &mmc1_sample_clk.common.hw,
 		[CLK_MMC1_OUTPUT]	= &mmc1_output_clk.common.hw,
+		[CLK_MMC2]		= &mmc2_clk.common.hw,
+		[CLK_MMC2_SAMPLE]	= &mmc2_sample_clk.common.hw,
+		[CLK_MMC2_OUTPUT]	= &mmc2_output_clk.common.hw,
 		[CLK_CE]		= &ce_clk.common.hw,
 		[CLK_SPI0]		= &spi0_clk.common.hw,
 		[CLK_USB_PHY0]		= &usb_phy0_clk.common.hw,
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 92d04ce..53cdc0e 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = {
 };
 
 struct clk *tegra_clk_register_sync_source(const char *name,
-		unsigned long rate, unsigned long max_rate)
+					   unsigned long max_rate)
 {
 	struct tegra_clk_sync_source *sync;
 	struct clk_init_data init;
@@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name,
 		return ERR_PTR(-ENOMEM);
 	}
 
-	sync->rate = rate;
 	sync->max_rate = max_rate;
 
 	init.ops = &tegra_clk_sync_source_ops;
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
index b37cae7..02dd648 100644
--- a/drivers/clk/tegra/clk-tegra-audio.c
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -49,8 +49,6 @@ struct tegra_sync_source_initdata {
 #define SYNC(_name) \
 	{\
 		.name		= #_name,\
-		.rate		= 24000000,\
-		.max_rate	= 24000000,\
 		.clk_id		= tegra_clk_ ## _name,\
 	}
 
@@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base,
 void __init tegra_audio_clk_init(void __iomem *clk_base,
 			void __iomem *pmc_base, struct tegra_clk *tegra_clks,
 			struct tegra_audio_clk_info *audio_info,
-			unsigned int num_plls)
+			unsigned int num_plls, unsigned long sync_max_rate)
 {
 	struct clk *clk;
 	struct clk **dt_clk;
@@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base,
 		if (!dt_clk)
 			continue;
 
-		clk = tegra_clk_register_sync_source(data->name,
-					data->rate, data->max_rate);
+		clk = tegra_clk_register_sync_source(data->name, sync_max_rate);
 		*dt_clk = clk;
 	}
 
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 1824f01..625d110 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
 	{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
 	{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
 	{ TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
+	{ TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
 	/* must be the last entry */
 	{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
 };
@@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np)
 	tegra114_periph_clk_init(clk_base, pmc_base);
 	tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
 			     tegra114_audio_plls,
-			     ARRAY_SIZE(tegra114_audio_plls));
+			     ARRAY_SIZE(tegra114_audio_plls), 24000000);
 	tegra_pmc_clk_init(pmc_base, tegra114_clks);
 	tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
 					&pll_x_params);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index b6cf28c..df0018f 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
 	{ TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
 	{ TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
 	{ TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
+	{ TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
 	/* must be the last entry */
 	{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
 };
@@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
 	tegra124_periph_clk_init(clk_base, pmc_base);
 	tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
 			     tegra124_audio_plls,
-			     ARRAY_SIZE(tegra124_audio_plls));
+			     ARRAY_SIZE(tegra124_audio_plls), 24576000);
 	tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
 	/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 4e1bc23..080bfa2 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3369,6 +3369,15 @@ static struct tegra_clk_init_table init_table[] __initdata = {
 	{ TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
 	{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
 	{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
+	{ TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+	{ TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
+	{ TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
 	/* This MUST be the last entry. */
 	{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
 };
@@ -3562,7 +3571,7 @@ static void __init tegra210_clock_init(struct device_node *np)
 	tegra210_periph_clk_init(clk_base, pmc_base);
 	tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
 			     tegra210_audio_plls,
-			     ARRAY_SIZE(tegra210_audio_plls));
+			     ARRAY_SIZE(tegra210_audio_plls), 24576000);
 	tegra_pmc_clk_init(pmc_base, tegra210_clks);
 
 	/* For Tegra210, PLLD is the only source for DSIA & DSIB */
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index acfe661..e0aaecd 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
 	{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
 	{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
 	{ TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+	{ TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+	{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
 	/* must be the last entry */
 	{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
 };
@@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np)
 	tegra30_periph_clk_init();
 	tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
 			     tegra30_audio_plls,
-			     ARRAY_SIZE(tegra30_audio_plls));
+			     ARRAY_SIZE(tegra30_audio_plls), 24000000);
 	tegra_pmc_clk_init(pmc_base, tegra30_clks);
 
 	tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index d2c3a01..09bccbb 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops;
 extern int *periph_clk_enb_refcnt;
 
 struct clk *tegra_clk_register_sync_source(const char *name,
-		unsigned long fixed_rate, unsigned long max_rate);
+					   unsigned long max_rate);
 
 /**
  * struct tegra_clk_frac_div - fractional divider clock
@@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
 void tegra_audio_clk_init(void __iomem *clk_base,
 			void __iomem *pmc_base, struct tegra_clk *tegra_clks,
 			struct tegra_audio_clk_info *audio_info,
-			unsigned int num_plls);
+			unsigned int num_plls, unsigned long sync_max_rate);
 
 void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
 			struct tegra_clk *tegra_clks,
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
index 354dd50..8dfb852 100644
--- a/drivers/clk/zte/clk-zx296718.c
+++ b/drivers/clk/zte/clk-zx296718.c
@@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
 {
 	void __iomem *reg_base;
 	int i, ret;
+	const char *name;
 
 	reg_base = of_iomap(np, 0);
 	if (!reg_base) {
@@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
 
 	for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
 		zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
+		name = zx296718_pll_clk[i].hw.init->name;
 		ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
-		if (ret) {
-			pr_warn("top clk %s init error!\n",
-				zx296718_pll_clk[i].hw.init->name);
-		}
+		if (ret)
+			pr_warn("top clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
@@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
 			top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
 					&top_ffactor_clk[i].factor.hw;
 
+		name = top_ffactor_clk[i].factor.hw.init->name;
 		ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
-		if (ret) {
-			pr_warn("top clk %s init error!\n",
-				top_ffactor_clk[i].factor.hw.init->name);
-		}
+		if (ret)
+			pr_warn("top clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
@@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
 					&top_mux_clk[i].mux.hw;
 
 		top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+		name = top_mux_clk[i].mux.hw.init->name;
 		ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
-		if (ret) {
-			pr_warn("top clk %s init error!\n",
-				top_mux_clk[i].mux.hw.init->name);
-		}
+		if (ret)
+			pr_warn("top clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
@@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
 					&top_gate_clk[i].gate.hw;
 
 		top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+		name = top_gate_clk[i].gate.hw.init->name;
 		ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
-		if (ret) {
-			pr_warn("top clk %s init error!\n",
-				top_gate_clk[i].gate.hw.init->name);
-		}
+		if (ret)
+			pr_warn("top clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
@@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
 					&top_div_clk[i].div.hw;
 
 		top_div_clk[i].div.reg += (uintptr_t)reg_base;
+		name = top_div_clk[i].div.hw.init->name;
 		ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
-		if (ret) {
-			pr_warn("top clk %s init error!\n",
-				top_div_clk[i].div.hw.init->name);
-		}
+		if (ret)
+			pr_warn("top clk %s init error!\n", name);
 	}
 
 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
 {
 	void __iomem *reg_base;
 	int i, ret;
+	const char *name;
 
 	reg_base = of_iomap(np, 0);
 	if (!reg_base) {
@@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
 					&lsp0_mux_clk[i].mux.hw;
 
 		lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+		name = lsp0_mux_clk[i].mux.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
-		if (ret) {
-			pr_warn("lsp0 clk %s init error!\n",
-				lsp0_mux_clk[i].mux.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp0 clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
@@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
 					&lsp0_gate_clk[i].gate.hw;
 
 		lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+		name = lsp0_gate_clk[i].gate.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
-		if (ret) {
-			pr_warn("lsp0 clk %s init error!\n",
-				lsp0_gate_clk[i].gate.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp0 clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
@@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
 					&lsp0_div_clk[i].div.hw;
 
 		lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
+		name = lsp0_div_clk[i].div.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
-		if (ret) {
-			pr_warn("lsp0 clk %s init error!\n",
-				lsp0_div_clk[i].div.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp0 clk %s init error!\n", name);
 	}
 
 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
 {
 	void __iomem *reg_base;
 	int i, ret;
+	const char *name;
 
 	reg_base = of_iomap(np, 0);
 	if (!reg_base) {
@@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
 					&lsp0_mux_clk[i].mux.hw;
 
 		lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+		name = lsp1_mux_clk[i].mux.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
-		if (ret) {
-			pr_warn("lsp1 clk %s init error!\n",
-				lsp1_mux_clk[i].mux.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp1 clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
@@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
 					&lsp1_gate_clk[i].gate.hw;
 
 		lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+		name = lsp1_gate_clk[i].gate.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
-		if (ret) {
-			pr_warn("lsp1 clk %s init error!\n",
-				lsp1_gate_clk[i].gate.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp1 clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
@@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
 					&lsp1_div_clk[i].div.hw;
 
 		lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
+		name = lsp1_div_clk[i].div.hw.init->name;
 		ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
-		if (ret) {
-			pr_warn("lsp1 clk %s init error!\n",
-				lsp1_div_clk[i].div.hw.init->name);
-		}
+		if (ret)
+			pr_warn("lsp1 clk %s init error!\n", name);
 	}
 
 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
 {
 	void __iomem *reg_base;
 	int i, ret;
+	const char *name;
 
 	reg_base = of_iomap(np, 0);
 	if (!reg_base) {
@@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np)
 					&audio_mux_clk[i].mux.hw;
 
 		audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+		name = audio_mux_clk[i].mux.hw.init->name;
 		ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
-		if (ret) {
-			pr_warn("audio clk %s init error!\n",
-				audio_mux_clk[i].mux.hw.init->name);
-		}
+		if (ret)
+			pr_warn("audio clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
@@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np)
 					&audio_adiv_clk[i].hw;
 
 		audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
+		name = audio_adiv_clk[i].hw.init->name;
 		ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
-		if (ret) {
-			pr_warn("audio clk %s init error!\n",
-				audio_adiv_clk[i].hw.init->name);
-		}
+		if (ret)
+			pr_warn("audio clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
@@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np)
 					&audio_div_clk[i].div.hw;
 
 		audio_div_clk[i].div.reg += (uintptr_t)reg_base;
+		name = audio_div_clk[i].div.hw.init->name;
 		ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
-		if (ret) {
-			pr_warn("audio clk %s init error!\n",
-				audio_div_clk[i].div.hw.init->name);
-		}
+		if (ret)
+			pr_warn("audio clk %s init error!\n", name);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
@@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np)
 					&audio_gate_clk[i].gate.hw;
 
 		audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+		name = audio_gate_clk[i].gate.hw.init->name;
 		ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
-		if (ret) {
-			pr_warn("audio clk %s init error!\n",
-				audio_gate_clk[i].gate.hw.init->name);
-		}
+		if (ret)
+			pr_warn("audio clk %s init error!\n", name);
 	}
 
 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ecaf191..136c9c2 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2260,6 +2260,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_ADJUST, new_policy);
 
+	/* adjust if necessary - hardware incompatibility */
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_INCOMPATIBLE, new_policy);
+
 	/*
 	 * verify the cpu speed can be set within this limit, which might be
 	 * different to the first one
@@ -2616,14 +2620,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
-	.shutdown = cpufreq_suspend,
-};
-
 struct kobject *cpufreq_global_kobject;
 EXPORT_SYMBOL(cpufreq_global_kobject);
 
@@ -2635,8 +2631,6 @@ static int __init cpufreq_core_init(void)
 	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
 	BUG_ON(!cpufreq_global_kobject);
 
-	register_syscore_ops(&cpufreq_syscore_ops);
-
 	return 0;
 }
 module_param(off, int, 0444);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 032efe8..117960f 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -5,33 +5,47 @@
 
 #include <linux/cpufreq.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/pm_opp.h>
 #include <linux/energy_model.h>
 #include <linux/sched.h>
 #include <linux/cpu_cooling.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/dcvsh.h>
+
 #define LUT_MAX_ENTRIES			40U
 #define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
 #define LUT_ROW_SIZE			32
 #define CLK_HW_DIV			2
+#define GT_IRQ_STATUS			BIT(2)
+#define MAX_FN_SIZE			12
+#define LIMITS_POLLING_DELAY_MS		10
 
 #define CYCLE_CNTR_OFFSET(c, m, acc_count)				\
 			(acc_count ? ((c - cpumask_first(m) + 1) * 4) : 0)
+
 enum {
 	REG_ENABLE,
 	REG_FREQ_LUT_TABLE,
 	REG_VOLT_LUT_TABLE,
 	REG_PERF_STATE,
 	REG_CYCLE_CNTR,
+	REG_DOMAIN_STATE,
+	REG_INTR_EN,
+	REG_INTR_CLR,
+	REG_INTR_STATUS,
 
 	REG_ARRAY_SIZE,
 };
 
 static unsigned int lut_row_size = LUT_ROW_SIZE;
+static unsigned int lut_max_entries = LUT_MAX_ENTRIES;
 static bool accumulative_counter;
 
 struct cpufreq_qcom {
@@ -41,6 +55,14 @@ struct cpufreq_qcom {
 	unsigned int max_cores;
 	unsigned long xo_rate;
 	unsigned long cpu_hw_rate;
+	unsigned long dcvsh_freq_limit;
+	struct delayed_work freq_poll_work;
+	struct mutex dcvsh_lock;
+	struct device_attribute freq_limit_attr;
+	int dcvsh_irq;
+	char dcvsh_irq_name[MAX_FN_SIZE];
+	bool is_irq_enabled;
+	bool is_irq_requested;
 };
 
 struct cpufreq_counter {
@@ -63,11 +85,95 @@ static const u16 cpufreq_qcom_epss_std_offsets[REG_ARRAY_SIZE] = {
 	[REG_VOLT_LUT_TABLE]	= 0x200,
 	[REG_PERF_STATE]	= 0x320,
 	[REG_CYCLE_CNTR]	= 0x3c4,
+	[REG_DOMAIN_STATE]	= 0x020,
+	[REG_INTR_EN]		= 0x304,
+	[REG_INTR_CLR]		= 0x308,
+	[REG_INTR_STATUS]	= 0x30C,
 };
 
 static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
 static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
 
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu);
+
+static ssize_t dcvsh_freq_limit_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cpufreq_qcom *c = container_of(attr, struct cpufreq_qcom,
+						freq_limit_attr);
+	return snprintf(buf, PAGE_SIZE, "%lu\n", c->dcvsh_freq_limit);
+}
+
+static unsigned long limits_mitigation_notify(struct cpufreq_qcom *c)
+{
+	unsigned long freq;
+
+	freq = readl_relaxed(c->reg_bases[REG_DOMAIN_STATE]) &
+				GENMASK(7, 0);
+	freq = DIV_ROUND_CLOSEST_ULL(freq * c->xo_rate, 1000);
+
+	sched_update_cpu_freq_min_max(&c->related_cpus, 0, freq);
+	trace_dcvsh_freq(cpumask_first(&c->related_cpus), freq);
+	c->dcvsh_freq_limit = freq;
+
+	return freq;
+}
+
+static void limits_dcvsh_poll(struct work_struct *work)
+{
+	struct cpufreq_qcom *c = container_of(work, struct cpufreq_qcom,
+						freq_poll_work.work);
+	unsigned long freq_limit, dcvsh_freq;
+	u32 regval, cpu;
+
+	mutex_lock(&c->dcvsh_lock);
+
+	cpu = cpumask_first(&c->related_cpus);
+
+	freq_limit = limits_mitigation_notify(c);
+
+	dcvsh_freq = qcom_cpufreq_hw_get(cpu);
+
+	if (freq_limit != dcvsh_freq) {
+		mod_delayed_work(system_highpri_wq, &c->freq_poll_work,
+				msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
+	} else {
+		regval = readl_relaxed(c->reg_bases[REG_INTR_CLR]);
+		regval |= GT_IRQ_STATUS;
+		writel_relaxed(regval, c->reg_bases[REG_INTR_CLR]);
+
+		c->is_irq_enabled = true;
+		enable_irq(c->dcvsh_irq);
+	}
+
+	mutex_unlock(&c->dcvsh_lock);
+}
+
+static irqreturn_t dcvsh_handle_isr(int irq, void *data)
+{
+	struct cpufreq_qcom *c = data;
+	u32 regval;
+
+	regval = readl_relaxed(c->reg_bases[REG_INTR_STATUS]);
+	if (!(regval & GT_IRQ_STATUS))
+		return IRQ_HANDLED;
+
+	mutex_lock(&c->dcvsh_lock);
+
+	if (c->is_irq_enabled) {
+		c->is_irq_enabled = false;
+		disable_irq_nosync(c->dcvsh_irq);
+		limits_mitigation_notify(c);
+		mod_delayed_work(system_highpri_wq, &c->freq_poll_work,
+				msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
+
+	}
+
+	mutex_unlock(&c->dcvsh_lock);
+
+	return IRQ_HANDLED;
+}
+
 static u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
 {
 	struct cpufreq_counter *cpu_counter;
@@ -129,7 +235,7 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
 	c = policy->driver_data;
 
 	index = readl_relaxed(c->reg_bases[REG_PERF_STATE]);
-	index = min(index, LUT_MAX_ENTRIES - 1);
+	index = min(index, lut_max_entries - 1);
 
 	return policy->freq_table[index].frequency;
 }
@@ -183,6 +289,26 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
 
 	em_register_perf_domain(policy->cpus, ret, &em_cb);
 
+	if (c->dcvsh_irq > 0 && !c->is_irq_requested) {
+		snprintf(c->dcvsh_irq_name, sizeof(c->dcvsh_irq_name),
+					"dcvsh-irq-%d", policy->cpu);
+		ret = devm_request_threaded_irq(cpu_dev, c->dcvsh_irq, NULL,
+			dcvsh_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
+			IRQF_NO_SUSPEND, c->dcvsh_irq_name, c);
+		if (ret) {
+			dev_err(cpu_dev, "Failed to register irq %d\n", ret);
+			return ret;
+		}
+
+		c->is_irq_requested = true;
+		c->is_irq_enabled = true;
+		c->freq_limit_attr.attr.name = "dcvsh_freq_limit";
+		c->freq_limit_attr.show = dcvsh_freq_limit_show;
+		c->freq_limit_attr.attr.mode = 0444;
+		c->dcvsh_freq_limit = U32_MAX;
+		device_create_file(cpu_dev, &c->freq_limit_attr);
+	}
+
 	return 0;
 }
 
@@ -241,9 +367,10 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
 	struct device *dev = &pdev->dev;
 	void __iomem *base_freq, *base_volt;
 	u32 data, src, lval, i, core_count, prev_cc, prev_freq, cur_freq, volt;
+	u32 vc;
 	unsigned long cpu;
 
-	c->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+	c->table = devm_kcalloc(dev, lut_max_entries + 1,
 				sizeof(*c->table), GFP_KERNEL);
 	if (!c->table)
 		return -ENOMEM;
@@ -251,7 +378,7 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
 	base_freq = c->reg_bases[REG_FREQ_LUT_TABLE];
 	base_volt = c->reg_bases[REG_VOLT_LUT_TABLE];
 
-	for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+	for (i = 0; i < lut_max_entries; i++) {
 		data = readl_relaxed(base_freq + i * lut_row_size);
 		src = (data & GENMASK(31, 30)) >> 30;
 		lval = data & GENMASK(7, 0);
@@ -259,6 +386,7 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
 
 		data = readl_relaxed(base_volt + i * lut_row_size);
 		volt = (data & GENMASK(11, 0)) * 1000;
+		vc = data & GENMASK(21, 16);
 
 		if (src)
 			c->table[i].frequency = c->xo_rate * lval / 1000;
@@ -385,6 +513,15 @@ static int qcom_cpu_resources_init(struct platform_device *pdev,
 		return ret;
 	}
 
+	if (of_find_property(dev->of_node, "interrupts", NULL)) {
+		c->dcvsh_irq = of_irq_get(dev->of_node, index);
+		if (c->dcvsh_irq > 0) {
+			mutex_init(&c->dcvsh_lock);
+			INIT_DEFERRABLE_WORK(&c->freq_poll_work,
+					limits_dcvsh_poll);
+		}
+	}
+
 	for_each_cpu(cpu_r, &c->related_cpus)
 		qcom_freq_domain_map[cpu_r] = c;
 
@@ -419,6 +556,9 @@ static int qcom_resources_init(struct platform_device *pdev)
 	of_property_read_u32(pdev->dev.of_node, "qcom,lut-row-size",
 			      &lut_row_size);
 
+	of_property_read_u32(pdev->dev.of_node, "qcom,lut-max-entries",
+			      &lut_max_entries);
+
 	for_each_possible_cpu(cpu) {
 		cpu_np = of_cpu_device_node_get(cpu);
 		if (!cpu_np) {
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 3a01799..e7801c2 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -47,7 +47,6 @@
 #define SCLK_HZ (32768)
 #define PSCI_POWER_STATE(reset) (reset << 30)
 #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
-#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
 
 enum {
 	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
@@ -83,9 +82,6 @@ struct lpm_cluster *lpm_root_node;
 static bool lpm_prediction = true;
 module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
 
-static uint32_t bias_hyst;
-module_param_named(bias_hyst, bias_hyst, uint, 0664);
-
 struct lpm_history {
 	uint32_t resi[MAXSAMPLES];
 	int mode[MAXSAMPLES];
@@ -617,36 +613,23 @@ static void clear_predict_history(void)
 
 static void update_history(struct cpuidle_device *dev, int idx);
 
-static inline bool is_cpu_biased(int cpu, uint64_t *bias_time)
-{
-	u64 now = sched_clock();
-	u64 last = sched_get_cpu_last_busy_time(cpu);
-	u64 diff = 0;
-
-	if (!last)
-		return false;
-
-	diff = now - last;
-	if (diff < BIAS_HYST) {
-		*bias_time = BIAS_HYST - diff;
-		return true;
-	}
-
-	return false;
-}
-
 static inline bool lpm_disallowed(s64 sleep_us, int cpu, struct lpm_cpu *pm_cpu)
 {
 	uint64_t bias_time = 0;
 
-	if (sleep_disabled && !cpu_isolated(cpu))
+	if (cpu_isolated(cpu))
+		goto out;
+
+	if (sleep_disabled)
 		return true;
 
-	if (is_cpu_biased(cpu, &bias_time) && (!cpu_isolated(cpu))) {
+	bias_time = sched_lpm_disallowed_time(cpu);
+	if (bias_time) {
 		pm_cpu->bias = bias_time;
 		return true;
 	}
 
+out:
 	if (sleep_us < 0)
 		return true;
 
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd..edacf9b 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 			       const bool is_qi, int era)
 {
 	u32 geniv, moveiv;
+	u32 *wait_cmd;
 
 	/* Note: Context registers are saved. */
 	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -604,6 +605,14 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 
 	/* Will read cryptlen */
 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * Wait for IV transfer (ofifo -> class2) to finish before starting
+	 * ciphertext transfer (ofifo -> external memory).
+	 */
+	wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+	set_jump_tgt_here(desc, wait_cmd);
+
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
 			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5..05516b0 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,7 +12,7 @@
 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_ENC_LEN		(DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_DEC_LEN		(DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_GIVENC_LEN		(DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 6183f91..ea901bc 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_stats_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 static int zip_clear_open(struct inode *inode, struct file *file)
@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_clear_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 static int zip_regs_open(struct inode *inode, struct file *file)
@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
 	.owner = THIS_MODULE,
 	.open  = zip_regs_open,
 	.read  = seq_read,
+	.release = single_release,
 };
 
 /* Root directory for thunderx_zip debugfs entry */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 9b6d897..b8c94a0 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -543,6 +543,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
 	unsigned long flags;
 	unsigned int i;
 
+	/* If there's no device there's nothing to do */
+	if (!ccp)
+		return 0;
+
 	spin_lock_irqsave(&ccp->cmd_lock, flags);
 
 	ccp->suspending = 1;
@@ -567,6 +571,10 @@ int ccp_dev_resume(struct sp_device *sp)
 	unsigned long flags;
 	unsigned int i;
 
+	/* If there's no device there's nothing to do */
+	if (!ccp)
+		return 0;
+
 	spin_lock_irqsave(&ccp->cmd_lock, flags);
 
 	ccp->suspending = 0;
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 0669033..aa6b45b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 			/* In case of payload authentication failure, MUST NOT
 			 * revealed the decrypted message --> zero its memory.
 			 */
-			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+			cc_zero_sgl(areq->dst, areq->cryptlen);
 			err = -EBADMSG;
 		}
 	} else { /*ENCRYPT*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 1ff229c..186a253 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	rc = cc_ivgen_init(new_drvdata);
 	if (rc) {
 		dev_err(dev, "cc_ivgen_init failed\n");
-		goto post_power_mgr_err;
+		goto post_buf_mgr_err;
 	}
 
 	/* Allocate crypto algs */
@@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		goto post_hash_err;
 	}
 
+	/* All set, we can allow autosuspend */
+	cc_pm_go(new_drvdata);
+
 	/* If we got here and FIPS mode is enabled
 	 * it means all FIPS test passed, so let TEE
 	 * know we're good.
@@ -401,8 +404,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	cc_cipher_free(new_drvdata);
 post_ivgen_err:
 	cc_ivgen_fini(new_drvdata);
-post_power_mgr_err:
-	cc_pm_fini(new_drvdata);
 post_buf_mgr_err:
 	 cc_buffer_mgr_fini(new_drvdata);
 post_req_mgr_err:
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 09f708f..bac278d 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
 	u32 reg;
 
 	reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
-	return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+	/* Did the TEE report status? */
+	if (reg & CC_FIPS_SYNC_TEE_STATUS)
+		/* Yes. Is it OK? */
+		return (reg & CC_FIPS_SYNC_MODULE_OK);
+
+	/* No. It's either not in use or will be reported later */
+	return true;
 }
 
 /*
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 79fc0a3..638082d 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -103,20 +103,19 @@ int cc_pm_put_suspend(struct device *dev)
 
 int cc_pm_init(struct cc_drvdata *drvdata)
 {
-	int rc = 0;
 	struct device *dev = drvdata_to_dev(drvdata);
 
 	/* must be before the enabling to avoid resdundent suspending */
 	pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
 	pm_runtime_use_autosuspend(dev);
 	/* activate the PM module */
-	rc = pm_runtime_set_active(dev);
-	if (rc)
-		return rc;
-	/* enable the PM module*/
-	pm_runtime_enable(dev);
+	return pm_runtime_set_active(dev);
+}
 
-	return rc;
+/* enable the PM module*/
+void cc_pm_go(struct cc_drvdata *drvdata)
+{
+	pm_runtime_enable(drvdata_to_dev(drvdata));
 }
 
 void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a540..907a6db 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -16,6 +16,7 @@
 extern const struct dev_pm_ops ccree_pm;
 
 int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_go(struct cc_drvdata *drvdata);
 void cc_pm_fini(struct cc_drvdata *drvdata);
 int cc_pm_suspend(struct device *dev);
 int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
 	return 0;
 }
 
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
+
 static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
 
 static inline int cc_pm_suspend(struct device *dev)
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a..db2983c 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
 			    dma_addr_t psec_sgl, struct sec_dev_info *info)
 {
 	struct sec_hw_sgl *sgl_current, *sgl_next;
+	dma_addr_t sgl_next_dma;
 
-	if (!hw_sgl)
-		return;
 	sgl_current = hw_sgl;
-	while (sgl_current->next) {
+	while (sgl_current) {
 		sgl_next = sgl_current->next;
-		dma_pool_free(info->hw_sgl_pool, sgl_current,
-			      sgl_current->next_sgl);
+		sgl_next_dma = sgl_current->next_sgl;
+
+		dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
+
 		sgl_current = sgl_next;
+		psec_sgl = sgl_next_dma;
 	}
-	dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
 }
 
 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 020c2bf..dac0203 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -57,6 +57,7 @@
 
 #define ICE_CRYPTO_CXT_FDE 1
 #define ICE_CRYPTO_CXT_FBE 2
+#define ICE_INSTANCE_TYPE_LENGTH 12
 
 static int ice_fde_flag;
 
@@ -654,37 +655,33 @@ static int register_ice_device(struct ice_device *ice_dev)
 	unsigned int baseminor = 0;
 	unsigned int count = 1;
 	struct device *class_dev;
-	int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
-	int is_ufscard_ice = !strcmp(ice_dev->ice_instance_type, "ufscard");
+	char ice_type[ICE_INSTANCE_TYPE_LENGTH];
+
+	if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
+		strlcpy(ice_type, QCOM_SDCC_ICE_DEV, sizeof(ice_type));
+	else if (!strcmp(ice_dev->ice_instance_type, "ufscard"))
+		strlcpy(ice_type, QCOM_UFS_CARD_ICE_DEV, sizeof(ice_type));
+	else
+		strlcpy(ice_type, QCOM_UFS_ICE_DEV, sizeof(ice_type));
 
 	rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+			ice_type);
 	if (rc < 0) {
 		pr_err("alloc_chrdev_region failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+			ice_type);
 		return rc;
 	}
-	ice_dev->driver_class = class_create(THIS_MODULE,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+	ice_dev->driver_class = class_create(THIS_MODULE, ice_type);
 	if (IS_ERR(ice_dev->driver_class)) {
 		rc = -ENOMEM;
-		pr_err("class_create failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+		pr_err("class_create failed %d for %s\n", rc, ice_type);
 		goto exit_unreg_chrdev_region;
 	}
 	class_dev = device_create(ice_dev->driver_class, NULL,
-					ice_dev->device_no, NULL,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+					ice_dev->device_no, NULL, ice_type);
 
 	if (!class_dev) {
-		pr_err("class_device_create failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+		pr_err("class_device_create failed %d for %s\n", rc, ice_type);
 		rc = -ENOMEM;
 		goto exit_destroy_class;
 	}
@@ -694,9 +691,7 @@ static int register_ice_device(struct ice_device *ice_dev)
 
 	rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
 	if (rc < 0) {
-		pr_err("cdev_add failed %d for %s\n", rc,
-			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
-				QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
+		pr_err("cdev_add failed %d for %s\n", rc, ice_type);
 		goto exit_destroy_device;
 	}
 	return  0;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index a9bcfbb..d00c6f5 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3227,12 +3227,17 @@ static void _aead_aes_fb_stage1_ahash_complete(
 		unsigned char *tmp;
 
 		tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+		if (!tmp) {
+			err = -ENOMEM;
+			goto ret;
+		}
 		scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
 			req->cryptlen - ctx->authsize, ctx->authsize, 0);
 		if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
 			err = -EBADMSG;
 		kfree(tmp);
 	}
+ret:
 	if (err)
 		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
 	else {
@@ -3359,6 +3364,10 @@ static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
 			unsigned char *tmp;
 
 			tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+			if (!tmp) {
+				rc = -ENOMEM;
+				goto ret;
+			}
 			/* compare icv */
 			scatterwalk_map_and_copy(tmp,
 				src, req->cryptlen - ctx->authsize,
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5c4c0a2..d78f8d5 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ struct service_hndl {
 
 static inline int get_current_node(void)
 {
-	return topology_physical_package_id(smp_processor_id());
+	return topology_physical_package_id(raw_smp_processor_id());
 }
 
 int adf_service_register(struct service_hndl *service);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 41b288b..634ae48 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -959,11 +959,13 @@ static void talitos_sg_unmap(struct device *dev,
 
 static void ipsec_esp_unmap(struct device *dev,
 			    struct talitos_edesc *edesc,
-			    struct aead_request *areq)
+			    struct aead_request *areq, bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
 
@@ -972,7 +974,7 @@ static void ipsec_esp_unmap(struct device *dev,
 					 DMA_FROM_DEVICE);
 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
 
-	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
 			 areq->assoclen);
 
 	if (edesc->dma_len)
@@ -983,7 +985,7 @@ static void ipsec_esp_unmap(struct device *dev,
 		unsigned int dst_nents = edesc->dst_nents ? : 1;
 
 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
-				   areq->assoclen + areq->cryptlen - ivsize);
+				   areq->assoclen + cryptlen - ivsize);
 	}
 }
 
@@ -1005,7 +1007,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
 
 	edesc = container_of(desc, struct talitos_edesc, desc);
 
-	ipsec_esp_unmap(dev, edesc, areq);
+	ipsec_esp_unmap(dev, edesc, areq, true);
 
 	/* copy the generated ICV to dst */
 	if (edesc->icv_ool) {
@@ -1039,7 +1041,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 
 	edesc = container_of(desc, struct talitos_edesc, desc);
 
-	ipsec_esp_unmap(dev, edesc, req);
+	ipsec_esp_unmap(dev, edesc, req, false);
 
 	if (!err) {
 		char icvdata[SHA512_DIGEST_SIZE];
@@ -1085,7 +1087,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
 
 	edesc = container_of(desc, struct talitos_edesc, desc);
 
-	ipsec_esp_unmap(dev, edesc, req);
+	ipsec_esp_unmap(dev, edesc, req, false);
 
 	/* check ICV auth status */
 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1188,6 +1190,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
  * fill in and submit ipsec_esp descriptor
  */
 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+		     bool encrypt,
 		     void (*callback)(struct device *dev,
 				      struct talitos_desc *desc,
 				      void *context, int error))
@@ -1197,7 +1200,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *dev = ctx->dev;
 	struct talitos_desc *desc = &edesc->desc;
-	unsigned int cryptlen = areq->cryptlen;
+	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	int tbl_off = 0;
 	int sg_count, ret;
@@ -1324,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 
 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
 	if (ret != -EINPROGRESS) {
-		ipsec_esp_unmap(dev, edesc, areq);
+		ipsec_esp_unmap(dev, edesc, areq, encrypt);
 		kfree(edesc);
 	}
 	return ret;
@@ -1438,9 +1441,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
 	unsigned int authsize = crypto_aead_authsize(authenc);
 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 	unsigned int ivsize = crypto_aead_ivsize(authenc);
+	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 
 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
-				   iv, areq->assoclen, areq->cryptlen,
+				   iv, areq->assoclen, cryptlen,
 				   authsize, ivsize, icv_stashing,
 				   areq->base.flags, encrypt);
 }
@@ -1459,7 +1463,7 @@ static int aead_encrypt(struct aead_request *req)
 	/* set encrypt */
 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
 
-	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
 }
 
 static int aead_decrypt(struct aead_request *req)
@@ -1471,14 +1475,13 @@ static int aead_decrypt(struct aead_request *req)
 	struct talitos_edesc *edesc;
 	void *icvdata;
 
-	req->cryptlen -= authsize;
-
 	/* allocate extended descriptor */
 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
-	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
 	    ((!edesc->src_nents && !edesc->dst_nents) ||
 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
 
@@ -1489,7 +1492,8 @@ static int aead_decrypt(struct aead_request *req)
 
 		/* reset integrity check result bits */
 
-		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+		return ipsec_esp(edesc, req, false,
+				 ipsec_esp_decrypt_hwauth_done);
 	}
 
 	/* Have to check the ICV with software */
@@ -1505,7 +1509,7 @@ static int aead_decrypt(struct aead_request *req)
 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
 			   req->assoclen + req->cryptlen - authsize);
 
-	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
 }
 
 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1538,6 +1542,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 	return 0;
 }
 
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+				  const u8 *key, unsigned int keylen)
+{
+	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+	    keylen == AES_KEYSIZE_256)
+		return ablkcipher_setkey(cipher, key, keylen);
+
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+	return -EINVAL;
+}
+
 static void common_nonsnoop_unmap(struct device *dev,
 				  struct talitos_edesc *edesc,
 				  struct ablkcipher_request *areq)
@@ -1660,6 +1676,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 	struct talitos_edesc *edesc;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+	if (!areq->nbytes)
+		return 0;
+
+	if (areq->nbytes % blocksize)
+		return -EINVAL;
 
 	/* allocate extended descriptor */
 	edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1677,6 +1701,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 	struct talitos_edesc *edesc;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+	if (!areq->nbytes)
+		return 0;
+
+	if (areq->nbytes % blocksize)
+		return -EINVAL;
 
 	/* allocate extended descriptor */
 	edesc = ablkcipher_edesc_alloc(areq, false);
@@ -2705,6 +2737,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.min_keysize = AES_MIN_KEY_SIZE,
 				.max_keysize = AES_MAX_KEY_SIZE,
 				.ivsize = AES_BLOCK_SIZE,
+				.setkey = ablkcipher_aes_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2715,13 +2748,13 @@ static struct talitos_alg_template driver_algs[] = {
 		.alg.crypto = {
 			.cra_name = "ctr(aes)",
 			.cra_driver_name = "ctr-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_blocksize = 1,
 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
 				     CRYPTO_ALG_ASYNC,
 			.cra_ablkcipher = {
 				.min_keysize = AES_MIN_KEY_SIZE,
 				.max_keysize = AES_MAX_KEY_SIZE,
-				.ivsize = AES_BLOCK_SIZE,
+				.setkey = ablkcipher_aes_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -3091,6 +3124,7 @@ static int talitos_remove(struct platform_device *ofdev)
 			break;
 		case CRYPTO_ALG_TYPE_AEAD:
 			crypto_unregister_aead(&t_alg->algt.alg.aead);
+			break;
 		case CRYPTO_ALG_TYPE_AHASH:
 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
 			break;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 43fd8aa..bc43003 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -238,6 +238,14 @@
 	  driver votes on this interface to request a particular
 	  memory latency QoS level.
 
+config DEVFREQ_GOV_STATICMAP
+	tristate "Device driver for static voting of DDR freq based on a clock rate change"
+	depends on ARCH_QCOM
+	help
+	  Clock notifier based governor for device to DDR Bandwidth voting.
+	  This governor votes for the DDR BW based on the device's clock rate
+	  change.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index e106e9c..c511cf0 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
 obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
 obj-$(CONFIG_ARM_QCOM_DEVFREQ_QOSLAT)	+= devfreq_qcom_qoslat.o
+obj-$(CONFIG_DEVFREQ_GOV_STATICMAP)	+= governor_staticmap.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 84874a1..09d0547 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -193,7 +193,7 @@ void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
 	mb();
 }
 
-#define	SAMPLE_WIN_LIM	0xFFFFF
+#define	SAMPLE_WIN_LIM	0xFFFFFF
 static __always_inline
 void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms,
 				enum mon_reg_type type)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 2ebf83f..97d57c3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -859,7 +859,7 @@ int devfreq_resume_device(struct devfreq *devfreq)
 		return -EINVAL;
 
 	mutex_lock(&devfreq->event_lock);
-	if (!devfreq->governor) {
+	if (!devfreq->governor || !devfreq->dev_suspended) {
 		mutex_unlock(&devfreq->event_lock);
 		return 0;
 	}
@@ -1147,14 +1147,16 @@ static ssize_t polling_interval_store(struct device *dev,
 	unsigned int value;
 	int ret;
 
-	if (!df->governor)
-		return -EINVAL;
-
 	ret = sscanf(buf, "%u", &value);
 	if (ret != 1)
 		return -EINVAL;
 
 	mutex_lock(&df->event_lock);
+	if (!df->governor || df->dev_suspended) {
+		dev_warn(dev, "device suspended, operation not allowed\n");
+		mutex_unlock(&df->event_lock);
+		return -EINVAL;
+	}
 	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
 	ret = count;
 	mutex_unlock(&df->event_lock);
@@ -1176,6 +1178,11 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
 		return -EINVAL;
 
 	mutex_lock(&df->event_lock);
+	if (df->dev_suspended) {
+		dev_warn(dev, "device suspended, min freq not allowed\n");
+		mutex_unlock(&df->event_lock);
+		return -EINVAL;
+	}
 	mutex_lock(&df->lock);
 	max = df->max_freq;
 	if (value && max && value > max) {
@@ -1213,6 +1220,11 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
 		return -EINVAL;
 
 	mutex_lock(&df->event_lock);
+	if (df->dev_suspended) {
+		mutex_unlock(&df->event_lock);
+		dev_warn(dev, "device suspended, max freq not allowed\n");
+		return -EINVAL;
+	}
 	mutex_lock(&df->lock);
 	min = df->min_freq;
 	if (value && min && value < min) {
diff --git a/drivers/devfreq/devfreq_qcom_qoslat.c b/drivers/devfreq/devfreq_qcom_qoslat.c
index 3c73985..a751541 100644
--- a/drivers/devfreq/devfreq_qcom_qoslat.c
+++ b/drivers/devfreq/devfreq_qcom_qoslat.c
@@ -27,6 +27,9 @@ struct qoslat_data {
 	unsigned int			qos_level;
 };
 
+#define QOS_LEVEL_OFF	1
+#define QOS_LEVEL_ON	2
+
 #define MAX_MSG_LEN	96
 static int update_qos_level(struct device *dev, struct qoslat_data *d)
 {
@@ -35,7 +38,7 @@ static int update_qos_level(struct device *dev, struct qoslat_data *d)
 	char *qos_msg = "off";
 	int ret;
 
-	if (d->qos_level)
+	if (d->qos_level == QOS_LEVEL_ON)
 		qos_msg = "on";
 
 	snprintf(mbox_msg, MAX_MSG_LEN, "{class: ddr, perfmode: %s}", qos_msg);
@@ -106,7 +109,7 @@ static int devfreq_qcom_qoslat_probe(struct platform_device *pdev)
 		dev_err(dev, "Failed to get mailbox channel: %d\n", ret);
 		return ret;
 	}
-	d->qos_level = 0;
+	d->qos_level = QOS_LEVEL_OFF;
 
 	p = &d->profile;
 	p->target = dev_target;
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c25658b..24a9658 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev)
 	if (ret < 0)
 		dev_warn(dev, "failed to disable the devfreq-event devices\n");
 
-	if (bus->regulator)
-		regulator_disable(bus->regulator);
-
 	dev_pm_opp_of_remove_table(dev);
 	clk_disable_unprepare(bus->clk);
+	if (bus->regulator)
+		regulator_disable(bus->regulator);
 }
 
 /*
@@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
 	struct exynos_bus *bus;
 	int ret, max_state;
 	unsigned long min_freq, max_freq;
+	bool passive = false;
 
 	if (!np) {
 		dev_err(dev, "failed to find devicetree node\n");
@@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
 	bus->dev = &pdev->dev;
 	platform_set_drvdata(pdev, bus);
 
-	/* Parse the device-tree to get the resource information */
-	ret = exynos_bus_parse_of(np, bus);
-	if (ret < 0)
-		return ret;
-
 	profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
-	if (!profile) {
-		ret = -ENOMEM;
-		goto err;
-	}
+	if (!profile)
+		return -ENOMEM;
 
 	node = of_parse_phandle(dev->of_node, "devfreq", 0);
 	if (node) {
 		of_node_put(node);
-		goto passive;
+		passive = true;
 	} else {
 		ret = exynos_bus_parent_parse_of(np, bus);
+		if (ret < 0)
+			return ret;
 	}
 
+	/* Parse the device-tree to get the resource information */
+	ret = exynos_bus_parse_of(np, bus);
 	if (ret < 0)
-		goto err;
+		goto err_reg;
+
+	if (passive)
+		goto passive;
 
 	/* Initialize the struct profile and governor data for parent device */
 	profile->polling_ms = 50;
@@ -510,6 +510,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
 err:
 	dev_pm_opp_of_remove_table(dev);
 	clk_disable_unprepare(bus->clk);
+err_reg:
+	if (!passive)
+		regulator_disable(bus->regulator);
 
 	return ret;
 }
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index 7d45781..c5d21cc 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -40,6 +40,7 @@ struct hwmon_node {
 	unsigned int hyst_trigger_count;
 	unsigned int hyst_length;
 	unsigned int idle_mbps;
+	unsigned int use_ab;
 	unsigned int mbps_zones[NUM_MBPS_ZONES];
 
 	unsigned long prev_ab;
@@ -163,6 +164,9 @@ static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
 #define MIN_MS	10U
 #define MAX_MS	500U
 
+#define SAMPLE_MIN_MS	1U
+#define SAMPLE_MAX_MS	50U
+
 /* Returns MBps of read/writes for the sampling window. */
 static unsigned long bytes_to_mbps(unsigned long long bytes, unsigned int us)
 {
@@ -459,8 +463,10 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
 	}
 
 	node->prev_ab = new_bw;
-	if (ab)
+	if (ab && node->use_ab)
 		*ab = roundup(new_bw, node->bw_step);
+	else if (ab)
+		*ab = 0;
 
 	*freq = (new_bw * 100) / io_percent;
 	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
@@ -590,11 +596,9 @@ static int gov_start(struct devfreq *df)
 	struct bw_hwmon *hw;
 	struct devfreq_dev_status stat;
 
-	mutex_lock(&df->lock);
 	node = find_hwmon_node(df);
 	if (!node) {
 		dev_err(dev, "Unable to find HW monitor!\n");
-		mutex_unlock(&df->lock);
 		return -ENODEV;
 	}
 	hw = node->hw;
@@ -619,7 +623,6 @@ static int gov_start(struct devfreq *df)
 	if (ret)
 		goto err_sysfs;
 
-	mutex_unlock(&df->lock);
 	return 0;
 
 err_sysfs:
@@ -629,7 +632,6 @@ static int gov_start(struct devfreq *df)
 	node->orig_data = NULL;
 	hw->df = NULL;
 	node->dev_ab = NULL;
-	mutex_unlock(&df->lock);
 	return ret;
 }
 
@@ -638,7 +640,6 @@ static void gov_stop(struct devfreq *df)
 	struct hwmon_node *node = df->data;
 	struct bw_hwmon *hw = node->hw;
 
-	mutex_lock(&df->lock);
 	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
 	stop_monitor(df, true);
 	df->data = node->orig_data;
@@ -653,7 +654,6 @@ static void gov_stop(struct devfreq *df)
 	if (node->dev_ab)
 		*node->dev_ab = 0;
 	node->dev_ab = NULL;
-	mutex_unlock(&df->lock);
 }
 
 static int gov_suspend(struct devfreq *df)
@@ -689,11 +689,6 @@ static int gov_resume(struct devfreq *df)
 	if (!node->hw->resume_hwmon)
 		return -EPERM;
 
-	if (!node->resume_freq) {
-		dev_warn(df->dev.parent, "Governor already resumed!\n");
-		return -EBUSY;
-	}
-
 	mutex_lock(&df->lock);
 	update_devfreq(df);
 	mutex_unlock(&df->lock);
@@ -710,7 +705,7 @@ static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
 	struct hwmon_node *node = df->data;
 
 	/* Suspend/resume sequence */
-	if (!node->mon_started) {
+	if (node && !node->mon_started) {
 		*freq = node->resume_freq;
 		*node->dev_ab = node->resume_ab;
 		return 0;
@@ -761,11 +756,42 @@ static ssize_t throttle_adj_show(struct device *dev,
 
 static DEVICE_ATTR_RW(throttle_adj);
 
+static ssize_t sample_ms_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *hw = df->data;
+	int ret;
+	unsigned int val;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	val = max(val, SAMPLE_MIN_MS);
+	val = min(val, SAMPLE_MAX_MS);
+	if (val > df->profile->polling_ms)
+		return -EINVAL;
+
+	hw->sample_ms = val;
+	return count;
+}
+
+static ssize_t sample_ms_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct hwmon_node *node = df->data;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", node->sample_ms);
+}
+
+static DEVICE_ATTR_RW(sample_ms);
+
 gov_attr(guard_band_mbps, 0U, 2000U);
 gov_attr(decay_rate, 0U, 100U);
-gov_attr(io_percent, 1U, 100U);
+gov_attr(io_percent, 1U, 400U);
 gov_attr(bw_step, 50U, 1000U);
-gov_attr(sample_ms, 1U, 50U);
 gov_attr(up_scale, 0U, 500U);
 gov_attr(up_thres, 1U, 100U);
 gov_attr(down_thres, 0U, 90U);
@@ -774,6 +800,7 @@ gov_attr(hist_memory, 0U, 90U);
 gov_attr(hyst_trigger_count, 0U, 90U);
 gov_attr(hyst_length, 0U, 90U);
 gov_attr(idle_mbps, 0U, 2000U);
+gov_attr(use_ab, 0U, 1U);
 gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
 
 static struct attribute *dev_attr[] = {
@@ -790,6 +817,7 @@ static struct attribute *dev_attr[] = {
 	&dev_attr_hyst_trigger_count.attr,
 	&dev_attr_hyst_length.attr,
 	&dev_attr_idle_mbps.attr,
+	&dev_attr_use_ab.attr,
 	&dev_attr_mbps_zones.attr,
 	&dev_attr_throttle_adj.attr,
 	NULL,
@@ -832,7 +860,13 @@ static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
 		break;
 
 	case DEVFREQ_GOV_INTERVAL:
+		node = df->data;
 		sample_ms = *(unsigned int *)data;
+		if (sample_ms < node->sample_ms) {
+			ret = -EINVAL;
+			goto out;
+		}
+
 		sample_ms = max(MIN_MS, sample_ms);
 		sample_ms = min(MAX_MS, sample_ms);
 		/*
@@ -841,8 +875,12 @@ static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
 		 * stop/start the delayed workqueue while the interval update
 		 * is happening.
 		 */
-		node = df->data;
 		hw = node->hw;
+
+		mutex_lock(&node->mon_lock);
+		node->mon_started = false;
+		mutex_unlock(&node->mon_lock);
+
 		hw->suspend_hwmon(hw);
 		devfreq_interval_update(df, &sample_ms);
 		ret = hw->resume_hwmon(hw);
@@ -851,6 +889,9 @@ static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
 				"Unable to resume HW monitor (%d)\n", ret);
 			goto out;
 		}
+		mutex_lock(&node->mon_lock);
+		node->mon_started = true;
+		mutex_unlock(&node->mon_lock);
 		break;
 
 	case DEVFREQ_GOV_SUSPEND:
@@ -933,6 +974,7 @@ int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
 	node->hyst_trigger_count = 3;
 	node->hyst_length = 0;
 	node->idle_mbps = 400;
+	node->use_ab = 1;
 	node->mbps_zones[0] = 0;
 	node->hw = hwmon;
 
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index c07f6a1..f305c75 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -100,10 +100,11 @@ static int devfreq_gpubw_get_target(struct devfreq *df,
 
 	/*
 	 * If there's a new high watermark, update the cutoffs and send the
-	 * FAST hint.  Otherwise check the current value against the current
+	 * FAST hint, provided that we are using a floating watermark.
+	 * Otherwise check the current value against the current
 	 * cutoffs.
 	 */
-	if (norm_max_cycles > priv->bus.max) {
+	if (norm_max_cycles > priv->bus.max && priv->bus.floating) {
 		_update_cutoff(priv, norm_max_cycles);
 		bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
 	} else {
@@ -224,10 +225,11 @@ static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
 	case DEVFREQ_GOV_SUSPEND:
 		{
 			struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
-			priv->bus.total_time = 0;
-			priv->bus.gpu_time = 0;
-			priv->bus.ram_time = 0;
+			if (priv) {
+				priv->bus.total_time = 0;
+				priv->bus.gpu_time = 0;
+				priv->bus.ram_time = 0;
+			}
 		}
 		break;
 	default:
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index ae4bfc2..fabdc81 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -141,6 +141,8 @@ void compute_work_load(struct devfreq_dev_status *stats,
 		struct devfreq_msm_adreno_tz_data *priv,
 		struct devfreq *devfreq)
 {
+	u64 busy;
+
 	spin_lock(&sample_lock);
 	/*
 	 * Keep collecting the stats till the client
@@ -148,8 +150,10 @@ void compute_work_load(struct devfreq_dev_status *stats,
 	 * is done when the entry is read
 	 */
 	acc_total += stats->total_time;
-	acc_relative_busy += (stats->busy_time * stats->current_frequency) /
-				devfreq->profile->freq_table[0];
+	busy = (u64)stats->busy_time * stats->current_frequency;
+	do_div(busy, devfreq->profile->freq_table[0]);
+	acc_relative_busy += busy;
+
 	spin_unlock(&sample_lock);
 }
 
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 3bc29ac..8cfb697 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
 static int devfreq_passive_event_handler(struct devfreq *devfreq,
 				unsigned int event, void *data)
 {
-	struct device *dev = devfreq->dev.parent;
 	struct devfreq_passive_data *p_data
 			= (struct devfreq_passive_data *)devfreq->data;
 	struct devfreq *parent = (struct devfreq *)p_data->parent;
@@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
 			p_data->this = devfreq;
 
 		nb->notifier_call = devfreq_passive_notifier_call;
-		ret = devm_devfreq_register_notifier(dev, parent, nb,
+		ret = devfreq_register_notifier(parent, nb,
 					DEVFREQ_TRANSITION_NOTIFIER);
 		break;
 	case DEVFREQ_GOV_STOP:
-		devm_devfreq_unregister_notifier(dev, parent, nb,
-					DEVFREQ_TRANSITION_NOTIFIER);
+		WARN_ON(devfreq_unregister_notifier(parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER));
 		break;
 	default:
 		break;
diff --git a/drivers/devfreq/governor_staticmap.c b/drivers/devfreq/governor_staticmap.c
new file mode 100644
index 0000000..34ab108
--- /dev/null
+++ b/drivers/devfreq/governor_staticmap.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "governor-static-map: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+struct core_dev_map {
+	unsigned int core_mhz;
+	unsigned int target_freq;
+};
+
+struct static_map_gov {
+	struct device		*dev;
+	struct device_node	*of_node;
+	struct clk		*dev_clk;
+	unsigned long		dev_clk_cur_freq;
+	struct notifier_block	clock_change_nb;
+	struct core_dev_map	*freq_map;
+	struct devfreq_governor	*gov;
+	struct devfreq		*df;
+	bool			mon_started;
+	struct list_head	list;
+	void			*orig_data;
+	unsigned long		resume_freq;
+};
+
+static LIST_HEAD(static_map_list);
+static DEFINE_MUTEX(static_map_lock);
+static DEFINE_MUTEX(state_lock);
+static int static_use_cnt;
+
+static struct static_map_gov *find_static_map_node(struct devfreq *df)
+{
+	struct static_map_gov *node, *found = NULL;
+
+	mutex_lock(&static_map_lock);
+	list_for_each_entry(node, &static_map_list, list)
+		if (node->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&static_map_lock);
+
+	return found;
+}
+
+static unsigned long core_to_dev_freq(struct static_map_gov *d,
+		unsigned long coref)
+{
+	struct core_dev_map *map = d->freq_map;
+	unsigned long freq = 0;
+
+	if (!map || !coref)
+		goto out;
+
+	/* Start with the first non-zero freq map entry */
+	map++;
+	while (map->core_mhz && map->core_mhz != coref)
+		map++;
+	if (!map->core_mhz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	pr_debug("core freq: %lu -> target: %lu\n", coref, freq);
+	return freq;
+}
+
+#define NUM_COLS	2
+static struct core_dev_map *init_core_dev_map(struct device *dev,
+					struct device_node *of_node,
+					char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct core_dev_map *tbl;
+	int ret;
+
+	if (!of_node)
+		of_node = dev->of_node;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
+			GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		ret = of_property_read_u32_index(of_node, prop_name, j,
+				&data);
+		if (ret) {
+			dev_err(dev,
+				"Couldn't read the core-dev freq table %d\n",
+									ret);
+			return NULL;
+		}
+		tbl[i].core_mhz = data;
+
+		ret = of_property_read_u32_index(of_node, prop_name, j + 1,
+				&data);
+		if (ret) {
+			dev_err(dev,
+				"Couldn't read the core-dev freq table %d\n",
+									ret);
+			return NULL;
+		}
+		tbl[i].target_freq = data;
+		pr_debug("Entry%d DEV:%u, Target:%u\n", i, tbl[i].core_mhz,
+				tbl[i].target_freq);
+	}
+	tbl[i].core_mhz = 0;
+
+	return tbl;
+}
+static int devfreq_static_map_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct static_map_gov *gov_node = df->data;
+
+	*freq = core_to_dev_freq(gov_node, gov_node->dev_clk_cur_freq);
+
+	return 0;
+}
+static int devfreq_clock_change_notify_cb(struct notifier_block *nb,
+				       unsigned long action, void *ptr)
+{
+	struct clk_notifier_data *data = ptr;
+	struct static_map_gov *d;
+	int ret;
+
+	if (action != POST_RATE_CHANGE)
+		return NOTIFY_OK;
+
+	mutex_lock(&state_lock);
+	d = container_of(nb, struct static_map_gov, clock_change_nb);
+
+	mutex_lock(&d->df->lock);
+	d->dev_clk_cur_freq = data->new_rate;
+	if (IS_ERR_VALUE(d->dev_clk_cur_freq)) {
+		mutex_unlock(&d->df->lock);
+		mutex_unlock(&state_lock);
+		return d->dev_clk_cur_freq;
+	}
+	d->dev_clk_cur_freq = d->dev_clk_cur_freq / 1000;
+
+	ret = update_devfreq(d->df);
+	if (ret)
+		dev_err(d->dev,
+			"Unable to update freq on request %d\n", ret);
+	mutex_unlock(&d->df->lock);
+	mutex_unlock(&state_lock);
+
+	return 0;
+}
+
+static int devfreq_static_map_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret = 0;
+	struct static_map_gov *gov_node;
+
+	mutex_lock(&state_lock);
+	gov_node = find_static_map_node(df);
+	if (!gov_node) {
+		mutex_unlock(&state_lock);
+		dev_err(df->dev.parent,
+				"Unable to find static map governor!\n");
+		return -ENODEV;
+	}
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		gov_node->clock_change_nb.notifier_call =
+						devfreq_clock_change_notify_cb;
+		gov_node->orig_data = df->data;
+		gov_node->df = df;
+		df->data = gov_node;
+		ret = clk_notifier_register(gov_node->dev_clk,
+					&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		break;
+	case DEVFREQ_GOV_STOP:
+		ret = clk_notifier_unregister(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		df->data = gov_node->orig_data;
+		gov_node->orig_data = NULL;
+		break;
+	case DEVFREQ_GOV_SUSPEND:
+		ret = clk_notifier_unregister(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to unregister clk notifier %d\n", ret);
+		}
+		mutex_lock(&df->lock);
+		gov_node->resume_freq = gov_node->dev_clk_cur_freq;
+		gov_node->dev_clk_cur_freq = 0;
+		update_devfreq(df);
+		mutex_unlock(&df->lock);
+		break;
+	case DEVFREQ_GOV_RESUME:
+		ret = clk_notifier_register(gov_node->dev_clk,
+						&gov_node->clock_change_nb);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Failed to register clock change notifier %d\n",
+									ret);
+		}
+		mutex_lock(&df->lock);
+		gov_node->dev_clk_cur_freq = gov_node->resume_freq;
+		update_devfreq(df);
+		gov_node->resume_freq = 0;
+		mutex_unlock(&df->lock);
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&state_lock);
+	return ret;
+}
+static struct devfreq_governor devfreq_gov_static_map = {
+	.name = "static_map",
+	.get_target_freq = devfreq_static_map_get_freq,
+	.event_handler = devfreq_static_map_ev_handler,
+};
+
+static int gov_static_map_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct static_map_gov *d;
+	int ret;
+	const char *dev_clk_name;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	d->dev = dev;
+
+	ret = of_property_read_string(dev->of_node, "qcom,dev_clk",
+							&dev_clk_name);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Failed to read device clock name %d\n", ret);
+		return ret;
+	}
+	d->dev_clk = devm_clk_get(dev, dev_clk_name);
+	if (IS_ERR(d->dev_clk))
+		return PTR_ERR(d->dev_clk);
+
+	d->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!d->of_node) {
+		dev_err(dev, "Couldn't find a target device.\n");
+		ret = -ENODEV;
+		return ret;
+	}
+
+	d->freq_map = init_core_dev_map(dev, NULL, "qcom,core-dev-table");
+	if (!d->freq_map) {
+		dev_err(dev, "Couldn't find the core-dev freq table!\n");
+		return -EINVAL;
+	}
+	mutex_lock(&static_map_lock);
+	list_add_tail(&d->list, &static_map_list);
+	mutex_unlock(&static_map_lock);
+
+	mutex_lock(&state_lock);
+	d->gov = &devfreq_gov_static_map;
+	if (!static_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_static_map);
+	if (ret)
+		dev_err(dev, "Failed to add governor %d\n", ret);
+	if (!ret)
+		static_use_cnt++;
+	mutex_unlock(&state_lock);
+
+	return ret;
+}
+
+static const struct of_device_id static_map_match_table[] = {
+	{ .compatible = "qcom,static-map"},
+	{}
+};
+
+static struct platform_driver gov_static_map_driver = {
+	.probe = gov_static_map_probe,
+	.driver = {
+		.name = "static-map",
+		.of_match_table = static_map_match_table,
+		.suppress_bind_attrs = true,
+	},
+};
+
+module_platform_driver(gov_static_map_driver);
+MODULE_DESCRIPTION("STATIC MAP GOVERNOR FOR DDR");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2ee..0676807 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
 {
 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
 	struct dev_pm_opp *opp;
-	unsigned long rate = *freq * KHZ;
+	unsigned long rate;
 
-	opp = devfreq_recommended_opp(dev, &rate, flags);
+	opp = devfreq_recommended_opp(dev, freq, flags);
 	if (IS_ERR(opp)) {
-		dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
 		return PTR_ERR(opp);
 	}
 	rate = dev_pm_opp_get_freq(opp);
@@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
 	clk_set_min_rate(tegra->emc_clock, rate);
 	clk_set_rate(tegra->emc_clock, 0);
 
-	*freq = rate;
-
 	return 0;
 }
 
@@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
 	struct tegra_devfreq_device *actmon_dev;
 
-	stat->current_frequency = tegra->cur_freq;
+	stat->current_frequency = tegra->cur_freq * KHZ;
 
 	/* To be used by the tegra governor */
 	stat->private_data = tegra;
@@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
 		target_freq = max(target_freq, dev->target_freq);
 	}
 
-	*freq = target_freq;
+	*freq = target_freq * KHZ;
 
 	return 0;
 }
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 53c1d6d..81ba4eb 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence)
 {
 	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
 	struct sync_timeline *parent = dma_fence_parent(fence);
+	unsigned long flags;
 
+	spin_lock_irqsave(fence->lock, flags);
 	if (!list_empty(&pt->link)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(fence->lock, flags);
-		if (!list_empty(&pt->link)) {
-			list_del(&pt->link);
-			rb_erase(&pt->node, &parent->pt_tree);
-		}
-		spin_unlock_irqrestore(fence->lock, flags);
+		list_del(&pt->link);
+		rb_erase(&pt->node, &parent->pt_tree);
 	}
+	spin_unlock_irqrestore(fence->lock, flags);
 
 	sync_timeline_put(parent);
 	dma_fence_free(fence);
@@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
 				p = &parent->rb_left;
 			} else {
 				if (dma_fence_get_rcu(&other->base)) {
-					dma_fence_put(&pt->base);
+					sync_timeline_put(obj);
+					kfree(pt);
 					pt = other;
 					goto unlock;
 				}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 2b11d96..9d782cc 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -898,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
 		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
 
 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-	if (rc)
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set DMA mask\n");
 		return rc;
+	}
 
 	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
 	if (!od)
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index a410657..012584c 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 					chain_node) {
 		pr_debug("\tcookie: %d slot: %d busy: %d "
-			"this_desc: %#x next_desc: %#x ack: %d\n",
+			"this_desc: %#x next_desc: %#llx ack: %d\n",
 			iter->async_tx.cookie, iter->idx, busy,
-			iter->async_tx.phys, iop_desc_get_next_desc(iter),
+			iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
 			async_tx_test_ack(&iter->async_tx));
 		prefetch(_iter);
 		prefetch(&_iter->async_tx);
@@ -315,9 +315,9 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 				int i;
 				dev_dbg(iop_chan->device->common.dev,
 					"allocated slot: %d "
-					"(desc %p phys: %#x) slots_per_op %d\n",
+					"(desc %p phys: %#llx) slots_per_op %d\n",
 					iter->idx, iter->hw_desc,
-					iter->async_tx.phys, slots_per_op);
+					(u64)iter->async_tx.phys, slots_per_op);
 
 				/* pre-ack all but the last descriptor */
 				if (num_slots != slots_per_op)
@@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
 		return NULL;
 	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 
-	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+	dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
 		__func__, len);
 
 	spin_lock_bh(&iop_chan->lock);
@@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
 	dev_dbg(iop_chan->device->common.dev,
-		"%s src_cnt: %d len: %u flags: %lx\n",
+		"%s src_cnt: %d len: %zu flags: %lx\n",
 		__func__, src_cnt, len, flags);
 
 	spin_lock_bh(&iop_chan->lock);
@@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
 	if (unlikely(!len))
 		return NULL;
 
-	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
 		__func__, src_cnt, len);
 
 	spin_lock_bh(&iop_chan->lock);
@@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
 	dev_dbg(iop_chan->device->common.dev,
-		"%s src_cnt: %d len: %u flags: %lx\n",
+		"%s src_cnt: %d len: %zu flags: %lx\n",
 		__func__, src_cnt, len, flags);
 
 	if (dmaf_p_disabled_continue(flags))
@@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 		return NULL;
 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
-	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
 		__func__, src_cnt, len);
 
 	spin_lock_bh(&iop_chan->lock);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 31e9fc1..c23b191 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -447,6 +447,83 @@ struct gpi_dev {
 	struct dentry *dentry;
 };
 
+static struct gpi_dev *gpi_dev_dbg;
+
+struct reg_info {
+	char *name;
+	u32 offset;
+	u32 val;
+};
+
+static const struct reg_info gpi_debug_ev_cntxt[] = {
+	{ "CONFIG", CNTXT_0_CONFIG },
+	{ "R_LENGTH", CNTXT_1_R_LENGTH },
+	{ "BASE_LSB", CNTXT_2_RING_BASE_LSB },
+	{ "BASE_MSB", CNTXT_3_RING_BASE_MSB },
+	{ "RP_LSB", CNTXT_4_RING_RP_LSB },
+	{ "RP_MSB", CNTXT_5_RING_RP_MSB },
+	{ "WP_LSB", CNTXT_6_RING_WP_LSB },
+	{ "WP_MSB", CNTXT_7_RING_WP_MSB },
+	{ "INT_MOD", CNTXT_8_RING_INT_MOD },
+	{ "INTVEC", CNTXT_9_RING_INTVEC },
+	{ "MSI_LSB", CNTXT_10_RING_MSI_LSB },
+	{ "MSI_MSB", CNTXT_11_RING_MSI_MSB },
+	{ "RP_UPDATE_LSB", CNTXT_12_RING_RP_UPDATE_LSB },
+	{ "RP_UPDATE_MSB", CNTXT_13_RING_RP_UPDATE_MSB },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_ch_cntxt[] = {
+	{ "CONFIG", CNTXT_0_CONFIG },
+	{ "R_LENGTH", CNTXT_1_R_LENGTH },
+	{ "BASE_LSB", CNTXT_2_RING_BASE_LSB },
+	{ "BASE_MSB", CNTXT_3_RING_BASE_MSB },
+	{ "RP_LSB", CNTXT_4_RING_RP_LSB },
+	{ "RP_MSB", CNTXT_5_RING_RP_MSB },
+	{ "WP_LSB", CNTXT_6_RING_WP_LSB },
+	{ "WP_MSB", CNTXT_7_RING_WP_MSB },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_regs[] = {
+	{ "DEBUG_PC", GPI_DEBUG_PC_FOR_DEBUG },
+	{ "SW_RF_10", GPI_DEBUG_SW_RF_n_READ(10) },
+	{ "SW_RF_11", GPI_DEBUG_SW_RF_n_READ(11) },
+	{ "SW_RF_12", GPI_DEBUG_SW_RF_n_READ(12) },
+	{ "SW_RF_21", GPI_DEBUG_SW_RF_n_READ(21) },
+	{ NULL },
+};
+
+static const struct reg_info gpi_debug_qsb_regs[] = {
+	{ "QSB_LOG_SEL", GPI_DEBUG_QSB_LOG_SEL },
+	{ "QSB_LOG_CLR", GPI_DEBUG_QSB_LOG_CLR },
+	{ "QSB_LOG_ERR_TRNS_ID", GPI_DEBUG_QSB_LOG_ERR_TRNS_ID },
+	{ "QSB_LOG_0", GPI_DEBUG_QSB_LOG_0 },
+	{ "QSB_LOG_1", GPI_DEBUG_QSB_LOG_1 },
+	{ "QSB_LOG_2", GPI_DEBUG_QSB_LOG_2 },
+	{ "LAST_MISC_ID_0", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(0) },
+	{ "LAST_MISC_ID_1", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(1) },
+	{ "LAST_MISC_ID_2", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(2) },
+	{ "LAST_MISC_ID_3", GPI_DEBUG_QSB_LOG_LAST_MISC_ID(3) },
+	{ NULL },
+};
+
+struct gpi_reg_table {
+	u64 timestamp;
+	struct reg_info *ev_cntxt_info;
+	struct reg_info *chan[MAX_CHANNELS_PER_GPII];
+	struct reg_info *gpi_debug_regs;
+	struct reg_info *gpii_cntxt;
+	struct reg_info *gpi_debug_qsb_regs;
+	u32 ev_scratch_0;
+	u32 ch_scratch_0[MAX_CHANNELS_PER_GPII];
+	void *ev_ring;
+	u32 ev_ring_len;
+	void *ch_ring[MAX_CHANNELS_PER_GPII];
+	u32 ch_ring_len[MAX_CHANNELS_PER_GPII];
+	u32 error_log;
+};
+
 struct gpii_chan {
 	struct virt_dma_chan vc;
 	u32 chid;
@@ -501,6 +578,9 @@ struct gpii {
 	atomic_t dbg_index;
 	char label[GPI_LABEL_SIZE];
 	struct dentry *dentry;
+	struct gpi_reg_table dbg_reg_table;
+	bool reg_table_dump;
+	u32 dbg_gpi_irq_cnt;
 };
 
 struct gpi_desc {
@@ -610,6 +690,156 @@ static inline void gpi_write_reg_field(struct gpii *gpii,
 	gpi_write_reg(gpii, addr, val);
 }
 
+static void gpi_dump_debug_reg(struct gpii *gpii)
+{
+	struct gpi_reg_table *dbg_reg_table = &gpii->dbg_reg_table;
+	struct reg_info *reg_info;
+	int chan;
+	const gfp_t gfp = GFP_ATOMIC;
+	const struct reg_info gpii_cntxt[] = {
+		{ "TYPE_IRQ", GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "TYPE_IRQ_MSK", GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "CH_IRQ", GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "EV_IRQ", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "CH_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "EV_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "IEOB_IRQ", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS
+					(gpii->gpii_id) },
+		{ "IEOB_IRQ_MSK", GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+					(gpii->gpii_id) },
+		{ "GLOB_IRQ", GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS
+					(gpii->gpii_id) },
+		{ NULL },
+	};
+
+	dbg_reg_table->timestamp = sched_clock();
+
+	if (!dbg_reg_table->gpii_cntxt) {
+		dbg_reg_table->gpii_cntxt = kzalloc(sizeof(gpii_cntxt), gfp);
+		if (!dbg_reg_table->gpii_cntxt)
+			return;
+		memcpy((void *)dbg_reg_table->gpii_cntxt, (void *)gpii_cntxt,
+		       sizeof(gpii_cntxt));
+	}
+
+	/* log gpii cntxt */
+	reg_info = dbg_reg_table->gpii_cntxt;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->regs + reg_info->offset);
+
+	if (!dbg_reg_table->ev_cntxt_info) {
+		dbg_reg_table->ev_cntxt_info =
+			kzalloc(sizeof(gpi_debug_ev_cntxt), gfp);
+		if (!dbg_reg_table->ev_cntxt_info)
+			return;
+		memcpy((void *)dbg_reg_table->ev_cntxt_info,
+			(void *)gpi_debug_ev_cntxt, sizeof(gpi_debug_ev_cntxt));
+	}
+
+	/* log ev cntxt */
+	reg_info = dbg_reg_table->ev_cntxt_info;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->ev_cntxt_base_reg +
+					      reg_info->offset);
+
+	/* dump channel cntxt registers */
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+		if (!dbg_reg_table->chan[chan]) {
+			dbg_reg_table->chan[chan] =
+				kzalloc(sizeof(gpi_debug_ch_cntxt), gfp);
+			if (!dbg_reg_table->chan[chan])
+				return;
+			memcpy((void *)dbg_reg_table->chan[chan],
+				(void *)gpi_debug_ch_cntxt,
+				sizeof(gpi_debug_ch_cntxt));
+		}
+		reg_info = dbg_reg_table->chan[chan];
+		for (; reg_info->name; reg_info++)
+			reg_info->val =
+			readl_relaxed(
+			gpii->gpii_chan[chan].ch_cntxt_base_reg +
+							reg_info->offset);
+	}
+
+	if (!dbg_reg_table->gpi_debug_regs) {
+		dbg_reg_table->gpi_debug_regs =
+			kzalloc(sizeof(gpi_debug_regs), gfp);
+		if (!dbg_reg_table->gpi_debug_regs)
+			return;
+		memcpy((void *)dbg_reg_table->gpi_debug_regs,
+			(void *)gpi_debug_regs, sizeof(gpi_debug_regs));
+	}
+
+	/* log debug register */
+	reg_info = dbg_reg_table->gpi_debug_regs;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->gpi_dev->regs +
+					reg_info->offset);
+
+	if (!dbg_reg_table->gpi_debug_qsb_regs) {
+		dbg_reg_table->gpi_debug_qsb_regs =
+			kzalloc(sizeof(gpi_debug_qsb_regs), gfp);
+		if (!dbg_reg_table->gpi_debug_qsb_regs)
+			return;
+		memcpy((void *)dbg_reg_table->gpi_debug_qsb_regs,
+			(void *)gpi_debug_qsb_regs,
+				sizeof(gpi_debug_qsb_regs));
+	}
+
+	/* log QSB register */
+	reg_info = dbg_reg_table->gpi_debug_qsb_regs;
+	for (; reg_info->name; reg_info++)
+		reg_info->val = readl_relaxed(gpii->gpi_dev->regs +
+					reg_info->offset);
+
+	/* dump scratch registers */
+	dbg_reg_table->ev_scratch_0 = readl_relaxed(gpii->regs +
+			GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id));
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++)
+		dbg_reg_table->ch_scratch_0[chan] = readl_relaxed(gpii->regs +
+				GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
+						gpii->gpii_chan[chan].chid));
+
+	/* Copy the ev ring */
+	if (!dbg_reg_table->ev_ring) {
+		dbg_reg_table->ev_ring_len = gpii->ev_ring.len;
+		dbg_reg_table->ev_ring =
+				kzalloc(dbg_reg_table->ev_ring_len, gfp);
+		if (!dbg_reg_table->ev_ring)
+			return;
+	}
+	memcpy(dbg_reg_table->ev_ring, gpii->ev_ring.base,
+		dbg_reg_table->ev_ring_len);
+
+	/* Copy Transfer rings */
+	for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+		struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
+
+		if (!dbg_reg_table->ch_ring[chan]) {
+			dbg_reg_table->ch_ring_len[chan] =
+					gpii_chan->ch_ring.len;
+			dbg_reg_table->ch_ring[chan] =
+				kzalloc(dbg_reg_table->ch_ring_len[chan], gfp);
+			if (!dbg_reg_table->ch_ring[chan])
+				return;
+		}
+
+		memcpy(dbg_reg_table->ch_ring[chan], gpii_chan->ch_ring.base,
+		       dbg_reg_table->ch_ring_len[chan]);
+	}
+
+	dbg_reg_table->error_log = readl_relaxed(gpii->regs +
+				GPI_GPII_n_ERROR_LOG_OFFS(gpii->gpii_id));
+
+	GPII_ERR(gpii, GPI_DBG_COMMON, "Global IRQ handling Exit\n");
+}
+
 static void gpi_disable_interrupts(struct gpii *gpii)
 {
 	struct {
@@ -996,6 +1226,37 @@ static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
 	}
 }
 
+/* processing gpi general error interrupts */
+static void gpi_process_gen_err_irq(struct gpii *gpii)
+{
+	u32 gpii_id = gpii->gpii_id;
+	u32 offset = GPI_GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
+	u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+	u32 chid;
+	struct gpii_chan *gpii_chan;
+
+	/* clear the status */
+	GPII_ERR(gpii, GPI_DBG_COMMON, "irq_stts:0x%x\n", irq_stts);
+
+	/* Notify the client about error */
+	for (chid = 0, gpii_chan = gpii->gpii_chan;
+	     chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
+		if (gpii_chan->client_info.callback)
+			gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
+					      irq_stts);
+
+	/* Clear the register */
+	offset = GPI_GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
+	gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+	gpii->dbg_gpi_irq_cnt++;
+
+	if (!gpii->reg_table_dump) {
+		gpi_dump_debug_reg(gpii);
+		gpii->reg_table_dump = true;
+	}
+}
+
 /* processing gpi level error interrupts */
 static void gpi_process_glob_err_irq(struct gpii *gpii)
 {
@@ -1151,6 +1412,7 @@ static irqreturn_t gpi_handle_irq(int irq, void *data)
 		if (type) {
 			GPII_CRITIC(gpii, GPI_DBG_COMMON,
 				 "Unhandled interrupt status:0x%x\n", type);
+			gpi_process_gen_err_irq(gpii);
 			goto exit_irq;
 		}
 		offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
@@ -2544,6 +2806,9 @@ static int gpi_probe(struct platform_device *pdev)
 	if (!gpi_dev)
 		return -ENOMEM;
 
+	/* debug purpose */
+	gpi_dev_dbg = gpi_dev;
+
 	gpi_dev->dev = &pdev->dev;
 	gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
 	gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
index acd74df..46ed27e 100644
--- a/drivers/dma/qcom/msm_gpi_mmio.h
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -216,3 +216,15 @@ enum CNTXT_OFFS {
 #define GPI_GPII_n_CH_k_SCRATCH_3_OFFS(n, k) \
 	(0x2006C + (0x4000 * (n)) + (0x80 * (k)))
 
+/* Debug registers */
+#define GPI_DEBUG_PC_FOR_DEBUG (0x5048)
+#define GPI_DEBUG_SW_RF_n_READ(n) (0x5100 + (0x4 * n))
+
+/* GPI_DEBUG_QSB registers */
+#define GPI_DEBUG_QSB_LOG_SEL (0x5050)
+#define GPI_DEBUG_QSB_LOG_CLR (0x5058)
+#define GPI_DEBUG_QSB_LOG_ERR_TRNS_ID (0x5060)
+#define GPI_DEBUG_QSB_LOG_0 (0x5064)
+#define GPI_DEBUG_QSB_LOG_1 (0x5068)
+#define GPI_DEBUG_QSB_LOG_2 (0x506C)
+#define GPI_DEBUG_QSB_LOG_LAST_MISC_ID(n) (0x5070 + (0x4*n))
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f4edfc5..3d55405 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
  * when the DMA hw is powered off.
  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
  */
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
 	D40_DREG_LCPA,
 	D40_DREG_LCLA,
 	D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
 
 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
 	D40_CHAN_REG_SSCFG,
 	D40_CHAN_REG_SSELT,
 	D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 06dd172..8c3c3e5 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1376,7 +1376,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
 
 	chan = &dmadev->chan[id];
 	if (!chan) {
-		dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+		dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
 		goto exit;
 	}
 
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 9272b17..6574cb5 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
 
 		ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
 						 nelm * 2);
-		if (ret)
+		if (ret) {
+			kfree(rsv_events);
 			return ret;
+		}
 
 		for (i = 0; i < nelm; i++) {
 			ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea..982631d 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
 
 	ecc->default_queue = info->default_queue;
 
-	for (i = 0; i < ecc->num_slots; i++)
-		edma_write_slot(ecc, i, &dummy_paramset);
-
 	if (info->rsv) {
 		/* Set the reserved slots in inuse list */
 		rsv_slots = info->rsv->rsv_slots;
@@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
 		}
 	}
 
+	for (i = 0; i < ecc->num_slots; i++) {
+		/* Reset only unused - not reserved - paRAM slots */
+		if (!test_bit(i, ecc->slot_inuse))
+			edma_write_slot(ecc, i, &dummy_paramset);
+	}
+
 	/* Clear the xbar mapped channels in unused list */
 	xbar_chans = info->xbar_chans;
 	if (xbar_chans) {
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index a4a931d..c192bdc 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1237,7 +1237,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
 	if (src_icg) {
 		d->ccr |= CCR_SRC_AMODE_DBLIDX;
 		d->ei = 1;
-		d->fi = src_icg;
+		d->fi = src_icg + 1;
 	} else if (xt->src_inc) {
 		d->ccr |= CCR_SRC_AMODE_POSTINC;
 		d->fi = 0;
@@ -1252,7 +1252,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
 	if (dst_icg) {
 		d->ccr |= CCR_DST_AMODE_DBLIDX;
 		sg->ei = 1;
-		sg->fi = dst_icg;
+		sg->fi = dst_icg + 1;
 	} else if (xt->dst_inc) {
 		d->ccr |= CCR_DST_AMODE_POSTINC;
 		sg->fi = 0;
@@ -1543,8 +1543,10 @@ static int omap_dma_probe(struct platform_device *pdev)
 
 		rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
 				      IRQF_SHARED, "omap-dma-engine", od);
-		if (rc)
+		if (rc) {
+			omap_dma_free(od);
 			return rc;
+		}
 	}
 
 	if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d7c3507..f9cf1a7 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -501,6 +501,46 @@
 	  Support for error detection and correction on the
           TI SoCs.
 
+config EDAC_CORTEX_ARM64
+	depends on ARM64
+	bool "ARM Cortex A CPUs L1/L2 Caches"
+	help
+	  Support for error detection and correction on the
+	  ARM Cortex A53 and A57 CPUs. For debugging issues having to do with
+	  stability and overall system health, you should probably say 'Y'
+	  here.
+
+config EDAC_CORTEX_ARM64_PANIC_ON_CE
+	depends on EDAC_CORTEX_ARM64
+	bool "Panic on correctable errors"
+	help
+	  Forcibly cause a kernel panic if an correctable error (CE) is
+	  detected, even though the error is (by definition) correctable and
+	  would otherwise result in no adverse system effects. This can reduce
+	  debugging times on hardware which may be operating at voltages or
+	  frequencies outside normal specification.
+
+	  For production builds, you should definitely say 'N' here.
+
+config EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+	depends on EDAC_CORTEX_ARM64
+	bool "Only check for parity errors when an irq is generated"
+	help
+	  In ARM64, parity errors will cause an interrupt
+	  to be triggered but may also cause a data abort to
+	  occur. Only check for EDAC errors for the interrupt.
+	  If unsure, say no.
+
+config EDAC_CORTEX_ARM64_PANIC_ON_UE
+	depends on EDAC_CORTEX_ARM64
+	bool "Panic on uncorrectable errors"
+	help
+	  Forcibly cause a kernel panic if an uncorrectable error (UE) is
+	  detected. This can reduce debugging times on hardware which may be
+	  operating at voltages or frequencies outside normal specification.
+
+	  For production builds, you should probably say 'N' here.
+
 config EDAC_QCOM
 	tristate "QCOM EDAC Controller"
 	depends on ARCH_QCOM && QCOM_LLCC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 210521a..f1b01d5 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -79,3 +79,4 @@
 obj-$(CONFIG_EDAC_XGENE)		+= xgene_edac.o
 obj-$(CONFIG_EDAC_TI)			+= ti_edac.o
 obj-$(CONFIG_EDAC_QCOM)			+= qcom_edac.o
+obj-$(CONFIG_EDAC_CORTEX_ARM64)		+= cortex_arm64_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 5762c3c..56de378 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
 	struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int irq = irq_desc_get_irq(desc);
+	unsigned long bits;
 
 	dberr = (irq == edac->db_irq) ? 1 : 0;
 	sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
 
 	regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
 
-	for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+	bits = irq_status;
+	for_each_set_bit(bit, &bits, 32) {
 		irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
 		if (irq)
 			generic_handle_irq(irq);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index e2addb2..94265e43 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2501,13 +2501,6 @@ static void decode_umc_error(int node_id, struct mce *m)
 		goto log_error;
 	}
 
-	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
-		err.err_code = ERR_NORM_ADDR;
-		goto log_error;
-	}
-
-	error_address_to_page_and_offset(sys_addr, &err);
-
 	if (!(m->status & MCI_STATUS_SYNDV)) {
 		err.err_code = ERR_SYND;
 		goto log_error;
@@ -2524,6 +2517,13 @@ static void decode_umc_error(int node_id, struct mce *m)
 
 	err.csrow = m->synd & 0x7;
 
+	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+		err.err_code = ERR_NORM_ADDR;
+		goto log_error;
+	}
+
+	error_address_to_page_and_offset(sys_addr, &err);
+
 log_error:
 	__log_ecc_error(mci, &err, ecc_type);
 }
@@ -3101,12 +3101,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 static inline void
 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
 {
-	u8 i, ecc_en = 1, cpk_en = 1;
+	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
 
 	for (i = 0; i < NUM_UMCS; i++) {
 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
 		}
 	}
 
@@ -3114,8 +3117,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
 	if (ecc_en) {
 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-		if (cpk_en)
+		if (!cpk_en)
+			return;
+
+		if (dev_x4)
 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+		else if (dev_x16)
+			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+		else
+			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
 	}
 }
 
diff --git a/drivers/edac/cortex_arm64_edac.c b/drivers/edac/cortex_arm64_edac.c
new file mode 100644
index 0000000..555d085
--- /dev/null
+++ b/drivers/edac/cortex_arm64_edac.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/perf_event.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/percpu.h>
+#include <linux/msm_rtb.h>
+
+#include <asm/cputype.h>
+#include <asm/esr.h>
+
+#include "edac_mc.h"
+#include "edac_device.h"
+
+#define A53_CPUMERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A53_CPUMERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A53_CPUMERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A53_CPUMERRSR_VALID(a)	((a) & (1 << 31))
+#define A53_CPUMERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A53_CPUMERRSR_CPUID(a)	(((a) >> 18) & 0x07)
+#define A53_CPUMERRSR_ADDR(a)	((a) & 0xfff)
+
+#define A53_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A53_L2MERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A53_L2MERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A53_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define A53_L2MERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A53_L2MERRSR_CPUID(a)	(((a) >> 18) & 0x0f)
+#define A53_L2MERRSR_INDEX(a)	(((a) >> 3) & 0x3fff)
+
+#define A57_CPUMERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A57_CPUMERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A57_CPUMERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A57_CPUMERRSR_VALID(a)	((a) & (1 << 31))
+#define A57_CPUMERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A57_CPUMERRSR_BANK(a)	(((a) >> 18) & 0x1f)
+#define A57_CPUMERRSR_INDEX(a)	((a) & 0x1ffff)
+
+#define A57_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define A57_L2MERRSR_OTHER(a)	(((a) >> 40) & 0xff)
+#define A57_L2MERRSR_REPT(a)	(((a) >> 32) & 0xff)
+#define A57_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define A57_L2MERRSR_RAMID(a)	(((a) >> 24) & 0x7f)
+#define A57_L2MERRSR_CPUID(a)	(((a) >> 18) & 0x0f)
+#define A57_L2MERRSR_INDEX(a)	((a) & 0x1ffff)
+
+#define KRYO2XX_GOLD_L2MERRSR_FATAL(a)	((a) & (1LL << 63))
+#define KRYO2XX_GOLD_L2MERRSR_OTHER(a)	(((a) >> 40) & 0x3f)
+#define KRYO2XX_GOLD_L2MERRSR_REPT(a)	(((a) >> 32) & 0x3f)
+#define KRYO2XX_GOLD_L2MERRSR_VALID(a)	((a) & (1 << 31))
+#define KRYO2XX_GOLD_L2MERRSR_RAMID(a)	((a) & (1 << 24))
+#define KRYO2XX_GOLD_L2MERRSR_WAY(a)	(((a) >> 18) & 0x0f)
+#define KRYO2XX_GOLD_L2MERRSR_INDEX(a)	(((a) >> 3) & 0x3fff)
+
+#define L2ECTLR_INT_ERR		(1 << 30)
+#define L2ECTLR_EXT_ERR		(1 << 29)
+
+#define ESR_SERROR(a)	((a) >> ESR_ELx_EC_SHIFT == ESR_ELx_EC_SERROR)
+#define ESR_VALID(a)	((a) & BIT(24))
+#define ESR_L2_DBE(a) (ESR_SERROR(a) && ESR_VALID(a) && \
+			(((a) & 0x00C00003) == 0x1))
+
+#define CCI_IMPRECISEERROR_REG	0x10
+
+#define L1_CACHE		0
+#define L2_CACHE		1
+#define CCI			2
+
+#define A53_L1_CE			0
+#define A53_L1_UE			1
+#define A53_L2_CE			2
+#define A53_L2_UE			3
+#define A57_L1_CE			4
+#define A57_L1_UE			5
+#define A57_L2_CE			6
+#define A57_L2_UE			7
+#define L2_EXT_UE			8
+#define CCI_UE				9
+#define KRYO2XX_SILVER_L1_CE		10
+#define KRYO2XX_SILVER_L1_UE		11
+#define KRYO2XX_SILVER_L2_CE		12
+#define KRYO2XX_SILVER_L2_UE		13
+#define KRYO2XX_GOLD_L2_CE		14
+#define KRYO2XX_GOLD_L2_UE		15
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE
+#define ARM64_ERP_PANIC_ON_UE 1
+#else
+#define ARM64_ERP_PANIC_ON_UE 0
+#endif
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_CE
+static int panic_on_ce = 1;
+#else
+static int panic_on_ce;
+#endif
+module_param(panic_on_ce, int, 0);
+
+#define EDAC_CPU	"arm64"
+
+enum error_type {
+	SBE,
+	DBE,
+};
+
+const char *err_name[] = {
+	"Single-bit",
+	"Double-bit",
+};
+
+struct erp_drvdata {
+	struct edac_device_ctl_info *edev_ctl;
+	void __iomem *cci_base;
+	struct notifier_block nb_pm;
+	struct notifier_block nb_panic;
+	struct work_struct work;
+	struct perf_event *memerr_counters[NR_CPUS];
+};
+
+static struct erp_drvdata *panic_handler_drvdata;
+static struct erp_drvdata *drv;
+struct erp_local_data {
+	struct erp_drvdata *drv;
+	enum error_type err;
+};
+
+#define MEM_ERROR_EVENT		0x1A
+
+struct errors_edac {
+	const char * const msg;
+	void (*func)(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg);
+};
+
+static const struct errors_edac errors[] = {
+	{"A53 L1 Correctable Error", edac_device_handle_ce },
+	{"A53 L1 Uncorrectable Error", edac_device_handle_ue },
+	{"A53 L2 Correctable Error", edac_device_handle_ce },
+	{"A53 L2 Uncorrectable Error", edac_device_handle_ue },
+	{"A57 L1 Correctable Error", edac_device_handle_ce },
+	{"A57 L1 Uncorrectable Error", edac_device_handle_ue },
+	{"A57 L2 Correctable Error", edac_device_handle_ce },
+	{"A57 L2 Uncorrectable Error", edac_device_handle_ue },
+	{"L2 External Error", edac_device_handle_ue },
+	{"CCI Error", edac_device_handle_ue },
+	{"Kryo2xx Silver L1 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Silver L1 Uncorrectable Error", edac_device_handle_ue },
+	{"Kryo2xx Silver L2 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Silver L2 Uncorrectable Error", edac_device_handle_ue },
+	{"Kryo2xx Gold L2 Correctable Error", edac_device_handle_ce },
+	{"Kryo2xx Gold L2 Uncorrectable Error", edac_device_handle_ue },
+};
+
+#define read_l2merrsr_el1 ({                                           \
+	u64 __val;                                                     \
+	asm("mrs %0, s3_1_c15_c2_3" : "=r" (__val));                  \
+	__val;                                                         \
+})
+
+#define read_l2ectlr_el1 ({						\
+	u32 __val;							\
+	asm("mrs %0, s3_1_c11_c0_3" : "=r" (__val));			\
+	__val;								\
+})
+
+#define read_cpumerrsr_el1 ({						\
+	u64 __val;							\
+	asm("mrs %0, s3_1_c15_c2_2" : "=r" (__val));			\
+	__val;								\
+})
+
+#define read_esr_el1 ({							\
+	u64 __val;							\
+	asm("mrs %0, esr_el1" : "=r" (__val));				\
+	__val;								\
+})
+
+#define write_l2merrsr_el1(val) ({					\
+	asm("msr s3_1_c15_c2_3, %0" : : "r" (val));			\
+})
+
+#define write_l2ectlr_el1(val) ({					\
+	asm("msr s3_1_c11_c0_3, %0" : : "r" (val));			\
+})
+
+#define write_cpumerrsr_el1(val) ({					\
+	asm("msr s3_1_c15_c2_2, %0" : : "r" (val));			\
+})
+
+static void kryo2xx_print_error_state_regs(void)
+{
+	u64 l2merrsr;
+	u64 cpumerrsr;
+	u32 esr_el1;
+	u32 l2ectlr;
+
+	cpumerrsr = read_cpumerrsr_el1;
+	l2merrsr = read_l2merrsr_el1;
+	esr_el1 = read_esr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	/* store data in uncached rtb logs */
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)cpumerrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)l2merrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)esr_el1));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)l2ectlr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CPUMERRSR value = %#llx\n",
+								cpumerrsr);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2MERRSR value = %#llx\n", l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "ESR value = %#x\n", esr_el1);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2ECTLR value = %#x\n", l2ectlr);
+	if (ESR_L2_DBE(esr_el1))
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Double bit error on dirty L2 cacheline\n");
+}
+
+static void kryo2xx_gold_print_error_state_regs(void)
+{
+	u64 l2merrsr;
+	u32 esr_el1;
+	u32 l2ectlr;
+
+	l2merrsr = read_l2merrsr_el1;
+	esr_el1 = read_esr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)l2merrsr);
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)esr_el1));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0),
+				(void *)((u64)l2ectlr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2MERRSR value = %#llx\n", l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "ESR value = %#x\n", esr_el1);
+	edac_printk(KERN_CRIT, EDAC_CPU, "L2ECTLR value = %#x\n", l2ectlr);
+	if (ESR_L2_DBE(esr_el1))
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Double bit error on dirty L2 cacheline\n");
+}
+
+static void kryo2xx_silver_parse_cpumerrsr(struct erp_local_data *ed)
+{
+	u64 cpumerrsr;
+	int cpuid;
+
+	cpumerrsr = read_cpumerrsr_el1;
+
+	if (!A53_CPUMERRSR_VALID(cpumerrsr))
+		return;
+
+	if (A53_CPUMERRSR_FATAL(cpumerrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU,
+			"Kryo2xx Silver CPU%d L1 %s Error detected\n",
+			smp_processor_id(), err_name[ed->err]);
+
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A53_CPUMERRSR_CPUID(cpumerrsr);
+
+	switch (A53_CPUMERRSR_RAMID(cpumerrsr)) {
+	case 0x0:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction tag RAM way is %d\n", cpuid);
+		break;
+	case 0x1:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction data RAM bank is %d\n", cpuid);
+		break;
+	case 0x8:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data tag RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0x9:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data data RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0xA:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data dirty RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU, "TLB RAM way is %d\n", cpuid);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A53_CPUMERRSR_RAMID(cpumerrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A53_CPUMERRSR_REPT(cpumerrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A53_CPUMERRSR_OTHER(cpumerrsr));
+
+	if (ed->err == SBE)
+		errors[KRYO2XX_SILVER_L1_CE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L1_CACHE,
+			errors[KRYO2XX_SILVER_L1_CE].msg);
+	else if (ed->err == DBE)
+		errors[KRYO2XX_SILVER_L1_UE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L1_CACHE,
+			errors[KRYO2XX_SILVER_L1_UE].msg);
+	write_cpumerrsr_el1(0);
+}
+
+static void kryo2xx_silver_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	u32 l2ectlr;
+	int cpuid;
+
+	l2merrsr = read_l2merrsr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	if (!A53_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (A53_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Kyro2xx Silver L2 %s Error detected\n",
+			err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A53_L2MERRSR_CPUID(l2merrsr);
+
+	switch (A53_L2MERRSR_RAMID(l2merrsr)) {
+	case 0x10:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 tag RAM way is %d\n", cpuid);
+		break;
+	case 0x11:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 data RAM bank is %d\n", cpuid);
+		break;
+	case 0x12:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"SCU snoop filter RAM cpu %d way is %d\n",
+				cpuid / 4, cpuid % 4);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A53_L2MERRSR_RAMID(l2merrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A53_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A53_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE)
+		errors[KRYO2XX_SILVER_L2_CE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L2_CACHE,
+			errors[KRYO2XX_SILVER_L2_CE].msg);
+	else if (ed->err == DBE)
+		errors[KRYO2XX_SILVER_L2_UE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L2_CACHE,
+			errors[KRYO2XX_SILVER_L2_UE].msg);
+	write_l2merrsr_el1(0);
+}
+
+
+static void ca57_parse_cpumerrsr(struct erp_local_data *ed)
+{
+	u64 cpumerrsr;
+	int bank;
+
+	cpumerrsr = read_cpumerrsr_el1;
+
+	if (!A57_CPUMERRSR_VALID(cpumerrsr))
+		return;
+
+	if (A57_CPUMERRSR_FATAL(cpumerrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Cortex A57 CPU%d L1 %s Error detected\n",
+					 smp_processor_id(), err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	bank = A57_CPUMERRSR_BANK(cpumerrsr);
+
+	switch (A57_CPUMERRSR_RAMID(cpumerrsr)) {
+	case 0x0:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction tag RAM bank %d\n", bank);
+		break;
+	case 0x1:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Instruction data RAM bank %d\n", bank);
+		break;
+	case 0x8:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data tag RAM bank %d\n", bank);
+		break;
+	case 0x9:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L1 Data data RAM bank %d\n", bank);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"TLB RAM bank %d\n", bank);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A57_CPUMERRSR_RAMID(cpumerrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A57_CPUMERRSR_REPT(cpumerrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A57_CPUMERRSR_OTHER(cpumerrsr));
+
+	if (ed->err == SBE)
+		errors[A57_L1_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[A57_L1_CE].msg);
+	else if (ed->err == DBE)
+		errors[A57_L1_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L1_CACHE, errors[A57_L1_UE].msg);
+	write_cpumerrsr_el1(0);
+}
+
+static void ca57_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	u32 l2ectlr;
+	int cpuid;
+
+	l2merrsr = read_l2merrsr_el1;
+	l2ectlr = read_l2ectlr_el1;
+
+	if (!A57_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (A57_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CortexA57 L2 %s Error detected\n",
+							err_name[ed->err]);
+	kryo2xx_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	cpuid = A57_L2MERRSR_CPUID(l2merrsr);
+
+	switch (A57_L2MERRSR_RAMID(l2merrsr)) {
+	case 0x10:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 tag RAM cpu %d way is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x11:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 data RAM cpu %d bank is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x12:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"SCU snoop tag RAM bank is %d\n", cpuid);
+		break;
+	case 0x14:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 dirty RAM cpu %d bank is %d\n",
+				cpuid / 2, cpuid % 2);
+		break;
+	case 0x18:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 inclusion PF RAM bank is %d\n", cpuid);
+		break;
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU,
+				"Error in unknown RAM ID: %d\n",
+				(int) A57_L2MERRSR_RAMID(l2merrsr));
+		break;
+	}
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+					 (int) A57_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+					 (int) A57_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE) {
+		errors[A57_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[A57_L2_CE].msg);
+	} else if (ed->err == DBE) {
+		errors[A57_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+					L2_CACHE, errors[A57_L2_UE].msg);
+	}
+	write_l2merrsr_el1(0);
+}
+
+
+static void kryo2xx_gold_parse_l2merrsr(struct erp_local_data *ed)
+{
+	u64 l2merrsr;
+	int ramid, way;
+
+	l2merrsr = read_l2merrsr_el1;
+
+	if (!KRYO2XX_GOLD_L2MERRSR_VALID(l2merrsr))
+		return;
+
+	if (KRYO2XX_GOLD_L2MERRSR_FATAL(l2merrsr))
+		ed->err = DBE;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Kryo2xx Gold L2 %s Error detected\n",
+							err_name[ed->err]);
+	kryo2xx_gold_print_error_state_regs();
+	if (ed->err == DBE)
+		edac_printk(KERN_CRIT, EDAC_CPU, "Fatal error\n");
+
+	way = KRYO2XX_GOLD_L2MERRSR_WAY(l2merrsr);
+	ramid = KRYO2XX_GOLD_L2MERRSR_RAMID(l2merrsr);
+
+	edac_printk(KERN_CRIT, EDAC_CPU,
+				"L2 %s RAM error in way 0x%02x, index 0x%04x\n",
+				ramid ? "data" : "tag",
+				(int) KRYO2XX_GOLD_L2MERRSR_WAY(l2merrsr),
+				(int) KRYO2XX_GOLD_L2MERRSR_INDEX(l2merrsr));
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "Repeated error count: %d\n",
+		(int) KRYO2XX_GOLD_L2MERRSR_REPT(l2merrsr));
+	edac_printk(KERN_CRIT, EDAC_CPU, "Other error count: %d\n",
+		(int) KRYO2XX_GOLD_L2MERRSR_OTHER(l2merrsr));
+
+	if (ed->err == SBE) {
+		errors[KRYO2XX_GOLD_L2_CE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L2_CACHE,
+			errors[KRYO2XX_GOLD_L2_CE].msg);
+	} else if (ed->err == DBE) {
+		errors[KRYO2XX_GOLD_L2_UE].func(ed->drv->edev_ctl,
+			smp_processor_id(), L2_CACHE,
+			errors[KRYO2XX_GOLD_L2_UE].msg);
+	}
+	write_l2merrsr_el1(0);
+}
+
+static DEFINE_SPINLOCK(local_handler_lock);
+static DEFINE_SPINLOCK(l2ectlr_lock);
+
+static void arm64_erp_local_handler(void *info)
+{
+	struct erp_local_data *errdata = info;
+	unsigned int cpuid = read_cpuid_id();
+	unsigned int partnum = read_cpuid_part_number();
+	unsigned long flags, flags2;
+	u32 l2ectlr;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	edac_printk(KERN_CRIT, EDAC_CPU, "%s error information from CPU %d, MIDR=%#08x:\n",
+		       err_name[errdata->err], raw_smp_processor_id(), cpuid);
+
+	switch (partnum) {
+	case ARM_CPU_PART_CORTEX_A53:
+	case ARM_CPU_PART_KRYO2XX_SILVER:
+		kryo2xx_silver_parse_cpumerrsr(errdata);
+		kryo2xx_silver_parse_l2merrsr(errdata);
+	break;
+
+	case ARM_CPU_PART_CORTEX_A72:
+	case ARM_CPU_PART_CORTEX_A57:
+		ca57_parse_cpumerrsr(errdata);
+		ca57_parse_l2merrsr(errdata);
+	break;
+
+	case ARM_CPU_PART_KRYO2XX_GOLD:
+		kryo2xx_gold_parse_l2merrsr(errdata);
+	break;
+
+	default:
+		edac_printk(KERN_CRIT, EDAC_CPU, "Unknown CPU Part Number in MIDR: %#04x (%#08x)\n",
+						 partnum, cpuid);
+	}
+
+	/* Acklowledge internal error in L2ECTLR */
+	spin_lock_irqsave(&l2ectlr_lock, flags2);
+
+	l2ectlr = read_l2ectlr_el1;
+
+	if (l2ectlr & L2ECTLR_INT_ERR) {
+		l2ectlr &= ~L2ECTLR_INT_ERR;
+		write_l2ectlr_el1(l2ectlr);
+	}
+
+	spin_unlock_irqrestore(&l2ectlr_lock, flags2);
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static irqreturn_t arm64_dbe_handler(int irq, void *drvdata)
+{
+	struct erp_local_data errdata;
+
+	errdata.drv = drvdata;
+	errdata.err = DBE;
+	edac_printk(KERN_CRIT, EDAC_CPU, "ARM64 CPU ERP: Double-bit error interrupt received!\n");
+
+	on_each_cpu(arm64_erp_local_handler, &errdata, 1);
+
+	return IRQ_HANDLED;
+}
+
+static void arm64_ext_local_handler(void *info)
+{
+	struct erp_drvdata *drv = info;
+	unsigned long flags, flags2;
+	u32 l2ectlr;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+
+	/* TODO: Shared locking for L2ECTLR access */
+	spin_lock_irqsave(&l2ectlr_lock, flags2);
+
+	l2ectlr = read_l2ectlr_el1;
+
+	if (l2ectlr & L2ECTLR_EXT_ERR) {
+		edac_printk(KERN_CRIT, EDAC_CPU,
+		    "L2 external error detected by CPU%d\n",
+		    smp_processor_id());
+
+		errors[L2_EXT_UE].func(drv->edev_ctl, smp_processor_id(),
+				       L2_CACHE, errors[L2_EXT_UE].msg);
+
+		l2ectlr &= ~L2ECTLR_EXT_ERR;
+		write_l2ectlr_el1(l2ectlr);
+	}
+
+	spin_unlock_irqrestore(&l2ectlr_lock, flags2);
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static irqreturn_t arm64_ext_handler(int irq, void *drvdata)
+{
+	edac_printk(KERN_CRIT, EDAC_CPU, "External error interrupt received!\n");
+
+	on_each_cpu(arm64_ext_local_handler, drvdata, 1);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t arm64_cci_handler(int irq, void *drvdata)
+{
+	struct erp_drvdata *drv = drvdata;
+	u32 cci_err_reg;
+
+	edac_printk(KERN_CRIT, EDAC_CPU, "CCI error interrupt received!\n");
+
+	if (drv->cci_base) {
+		cci_err_reg = readl_relaxed(drv->cci_base +
+							CCI_IMPRECISEERROR_REG);
+
+		edac_printk(KERN_CRIT, EDAC_CPU, "CCI imprecise error register: %#08x.\n",
+						 cci_err_reg);
+
+		/* This register has write-clear semantics */
+		writel_relaxed(cci_err_reg, drv->cci_base +
+							CCI_IMPRECISEERROR_REG);
+
+		/* Ensure error bits cleared before exiting ISR */
+		mb();
+	} else {
+		edac_printk(KERN_CRIT, EDAC_CPU, "CCI registers not available.\n");
+	}
+
+	errors[CCI_UE].func(drv->edev_ctl, 0, CCI, errors[CCI_UE].msg);
+
+	return IRQ_HANDLED;
+}
+#ifndef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+static void arm64_sbe_handler(struct perf_event *event,
+			      struct perf_sample_data *data,
+			      struct pt_regs *regs)
+{
+	struct erp_local_data errdata;
+	int cpu = raw_smp_processor_id();
+
+	errdata.drv = event->overflow_handler_context;
+	errdata.err = SBE;
+	edac_printk(KERN_CRIT, EDAC_CPU, "ARM64 CPU ERP: Single-bit error interrupt received on CPU %d!\n",
+					cpu);
+	arm64_erp_local_handler(&errdata);
+}
+#endif
+
+static int request_erp_irq(struct platform_device *pdev, const char *propname,
+			   const char *desc, irq_handler_t handler,
+			   void *ed)
+{
+	int rc;
+	struct resource *r;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, propname);
+
+	if (!r) {
+		pr_err("ARM64 CPU ERP: Could not find <%s> IRQ property. Proceeding anyway.\n",
+			propname);
+		return -EINVAL;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, r->start, NULL,
+				       handler,
+				       IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				       desc,
+				       ed);
+
+	if (rc) {
+		pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
+		       (int) r->start, rc, propname, desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void check_sbe_event(struct erp_drvdata *drv)
+{
+	unsigned int partnum = read_cpuid_part_number();
+	struct erp_local_data errdata;
+	unsigned long flags;
+
+	errdata.drv = drv;
+	errdata.err = SBE;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	switch (partnum) {
+	case ARM_CPU_PART_CORTEX_A53:
+	case ARM_CPU_PART_KRYO2XX_SILVER:
+		kryo2xx_silver_parse_cpumerrsr(&errdata);
+		kryo2xx_silver_parse_l2merrsr(&errdata);
+	break;
+
+	case ARM_CPU_PART_CORTEX_A72:
+	case ARM_CPU_PART_CORTEX_A57:
+		ca57_parse_cpumerrsr(&errdata);
+		ca57_parse_l2merrsr(&errdata);
+	break;
+
+	case ARM_CPU_PART_KRYO2XX_GOLD:
+		kryo2xx_gold_parse_l2merrsr(&errdata);
+	break;
+	}
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+#ifdef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+static void create_sbe_counter(int cpu, void *info)
+{ }
+#else
+static void create_sbe_counter(int cpu, void *info)
+{
+	struct erp_drvdata *drv = info;
+	struct perf_event *event = drv->memerr_counters[cpu];
+	struct perf_event_attr attr = {
+		.pinned = 1,
+		.disabled = 0, /* 0 will enable the counter upon creation */
+		.sample_period = 1, /* 1 will set the counter to max int */
+		.type = PERF_TYPE_RAW,
+		.config = MEM_ERROR_EVENT,
+		.size = sizeof(struct perf_event_attr),
+	};
+
+	if (event)
+		return;
+
+	/* Fails if cpu is not online */
+	event = perf_event_create_kernel_counter(&attr, cpu, NULL,
+							arm64_sbe_handler,
+							drv);
+	if (IS_ERR(event)) {
+		pr_err("PERF Event creation failed on cpu %d ptr_err %ld\n",
+							cpu, PTR_ERR(event));
+		return;
+	}
+	drv->memerr_counters[cpu] = event;
+}
+#endif
+
+static int arm64_pmu_cpu_pm_notify(struct notifier_block *self,
+					   unsigned long action, void *v)
+{
+	struct erp_drvdata *drv = container_of(self, struct erp_drvdata, nb_pm);
+
+	switch (action) {
+	case CPU_PM_EXIT:
+		check_sbe_event(drv);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+#ifndef CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY
+void arm64_check_cache_ecc(void *info)
+{
+	if (panic_handler_drvdata)
+		check_sbe_event(panic_handler_drvdata);
+}
+#else
+static inline void arm64_check_cache_ecc(void *info) {}
+#endif
+
+static int arm64_erp_panic_notify(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	arm64_check_cache_ecc(NULL);
+
+	return NOTIFY_OK;
+}
+
+static void arm64_monitor_cache_errors(struct edac_device_ctl_info *edev)
+{
+	struct cpumask cluster_mask, old_mask;
+	int cpu;
+
+	cpumask_clear(&cluster_mask);
+	cpumask_clear(&old_mask);
+
+	for_each_possible_cpu(cpu) {
+		cpumask_copy(&cluster_mask, topology_core_cpumask(cpu));
+		if (cpumask_equal(&cluster_mask, &old_mask))
+			continue;
+		cpumask_copy(&old_mask, &cluster_mask);
+		smp_call_function_any(&cluster_mask,
+				arm64_check_cache_ecc, NULL, 0);
+	}
+}
+
+static int edac_pmu_cpu_init(unsigned int cpu)
+{
+	create_sbe_counter(cpu, drv);
+	return 0;
+}
+
+static int arm64_cpu_erp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *r;
+	int cpu;
+	u32 poll_msec;
+
+	int rc, fail = 0;
+
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+
+	if (!drv)
+		return -ENOMEM;
+
+	drv->edev_ctl = edac_device_alloc_ctl_info(0, "cpu",
+					num_possible_cpus(), "L", 3, 1, NULL, 0,
+					edac_device_alloc_index());
+
+	if (!drv->edev_ctl)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "poll-delay-ms",
+							&poll_msec);
+	if (!rc && !IS_ENABLED(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)) {
+		drv->edev_ctl->edac_check = arm64_monitor_cache_errors;
+		drv->edev_ctl->poll_msec = poll_msec;
+		drv->edev_ctl->defer_work = 1;
+	}
+	drv->edev_ctl->dev = dev;
+	drv->edev_ctl->mod_name = dev_name(dev);
+	drv->edev_ctl->dev_name = dev_name(dev);
+	drv->edev_ctl->ctl_name = "cache";
+	drv->edev_ctl->panic_on_ce = panic_on_ce;
+	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+
+	rc = edac_device_add_device(drv->edev_ctl);
+	if (rc)
+		goto out_mem;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cci");
+	if (r)
+		drv->cci_base = devm_ioremap_resource(dev, r);
+
+	if (request_erp_irq(pdev, "pri-dbe-irq", "ARM64 primary DBE IRQ",
+			    arm64_dbe_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "sec-dbe-irq", "ARM64 secondary DBE IRQ",
+			    arm64_dbe_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "pri-ext-irq", "ARM64 primary ext IRQ",
+			    arm64_ext_handler, drv))
+		fail++;
+
+	if (request_erp_irq(pdev, "sec-ext-irq", "ARM64 secondary ext IRQ",
+			    arm64_ext_handler, drv))
+		fail++;
+
+	/*
+	 * We still try to register a handler for CCI errors even if we don't
+	 * have access to cci_base, but error reporting becomes best-effort in
+	 * that case.
+	 */
+	if (request_erp_irq(pdev, "cci-irq", "CCI error IRQ",
+			    arm64_cci_handler, drv))
+		fail++;
+
+	if (IS_ENABLED(CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY)) {
+		pr_err("ARM64 CPU ERP: SBE detection is disabled.\n");
+		goto out_irq;
+	}
+
+	drv->nb_pm.notifier_call = arm64_pmu_cpu_pm_notify;
+	cpu_pm_register_notifier(&(drv->nb_pm));
+	drv->nb_panic.notifier_call = arm64_erp_panic_notify;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &drv->nb_panic);
+	cpuhp_setup_state_nocalls(CPUHP_AP_EDAC_PMU_STARTING,
+		"AP_EDAC_PMU_STARTING", edac_pmu_cpu_init,
+		NULL);
+
+
+	get_online_cpus();
+	for_each_online_cpu(cpu)
+		create_sbe_counter(cpu, drv);
+	put_online_cpus();
+
+out_irq:
+	if (fail == of_irq_count(dev->of_node)) {
+		pr_err("ARM64 CPU ERP: Could not request any IRQs. Giving up.\n");
+		rc = -ENODEV;
+		goto out_dev;
+	}
+
+	panic_handler_drvdata = drv;
+
+	return 0;
+
+out_dev:
+	edac_device_del_device(dev);
+out_mem:
+	edac_device_free_ctl_info(drv->edev_ctl);
+	return rc;
+}
+
+static const struct of_device_id arm64_cpu_erp_match_table[] = {
+	{ .compatible = "arm,arm64-cpu-erp" },
+	{ }
+};
+
+static struct platform_driver arm64_cpu_erp_driver = {
+	.probe = arm64_cpu_erp_probe,
+	.driver = {
+		.name = "arm64_cpu_cache_erp",
+		.of_match_table = of_match_ptr(arm64_cpu_erp_match_table),
+	},
+};
+
+static int __init arm64_cpu_erp_init(void)
+{
+	return platform_driver_register(&arm64_cpu_erp_driver);
+}
+device_initcall_sync(arm64_cpu_erp_init);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7d3edd7..f59511b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1246,9 +1246,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
 	if (p > e->location)
 		*(p - 1) = '\0';
 
-	/* Report the error via the trace interface */
-	grain_bits = fls_long(e->grain) + 1;
+	/* Sanity-check driver-supplied grain value. */
+	if (WARN_ON_ONCE(!e->grain))
+		e->grain = 1;
 
+	grain_bits = fls_long(e->grain - 1);
+
+	/* Report the error via the trace interface */
 	if (IS_ENABLED(CONFIG_RAS))
 		trace_mc_event(type, e->msg, e->label, e->error_count,
 			       mci->mc_idx, e->top_layer, e->mid_layer,
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 473aeec..574bce6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -532,7 +532,11 @@ void ghes_edac_unregister(struct ghes *ghes)
 	if (!ghes_pvt)
 		return;
 
+	if (atomic_dec_return(&ghes_init))
+		return;
+
 	mci = ghes_pvt->mci;
+	ghes_pvt = NULL;
 	edac_mc_del_mc(mci->pdev);
 	edac_mc_free(mci);
 }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 903a4f1..0153c73 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void)
 	}
 }
 
+#define DNV_MCHBAR_SIZE  0x8000
+#define DNV_SB_PORT_SIZE 0x10000
 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 {
 	struct pci_dev *pdev;
 	char *base;
 	u64 addr;
+	unsigned long size;
 
 	if (op == 4) {
 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
 			addr = get_mem_ctrl_hub_base_addr();
 			if (!addr)
 				return -ENODEV;
+			size = DNV_MCHBAR_SIZE;
 		} else {
 			/* MMIO via sideband register base address */
 			addr = get_sideband_reg_base_addr();
 			if (!addr)
 				return -ENODEV;
 			addr += (port << 16);
+			size = DNV_SB_PORT_SIZE;
 		}
 
-		base = ioremap((resource_size_t)addr, 0x10000);
+		base = ioremap((resource_size_t)addr, size);
 		if (!base)
 			return -ENODEV;
 
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 86e47fe..2a0dcc9 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1081,27 +1081,12 @@ static int sdx55m_setup_hw(struct mdm_ctrl *mdm,
 		dev_err(mdm->dev, "Failed to parse DT gpios\n");
 		goto err_destroy_wrkq;
 	}
-	if (!of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
-		ret = mdm_pon_dt_init(mdm);
-		if (ret) {
-			esoc_mdm_log("Failed to parse PON DT gpios\n");
-			dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
-			goto err_destroy_wrkq;
-		}
-
-		ret = mdm_pon_setup(mdm);
-		if (ret) {
-			esoc_mdm_log("Failed to setup PON\n");
-			dev_err(mdm->dev, "Failed to setup PON\n");
-			goto err_destroy_wrkq;
-		}
-	}
 
 	ret = mdm_pinctrl_init(mdm);
 	if (ret) {
 		esoc_mdm_log("Failed to init pinctrl\n");
 		dev_err(mdm->dev, "Failed to init pinctrl\n");
-		goto err_release_ipc;
+		goto err_destroy_wrkq;
 	}
 
 	ret = mdm_configure_ipc(mdm, pdev);
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 1dfff3a..7d72ab1 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -74,50 +74,16 @@ static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
 /* This function can be called from atomic context. */
 static int sdx55m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
 {
-	struct device_node *node = mdm->dev->of_node;
 	int rc;
-	int soft_reset_direction_assert = 0,
-	    soft_reset_direction_de_assert = 1;
 
-	if (of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
-		esoc_mdm_log("Doing a Warm reset using SPMI\n");
-		rc = qpnp_pon_modem_pwr_off(PON_POWER_OFF_WARM_RESET);
-		if (rc) {
-			dev_err(mdm->dev, "SPMI warm reset failed\n");
-			esoc_mdm_log("SPMI warm reset failed\n");
-			return rc;
-		}
-		esoc_mdm_log("Warm reset done using SPMI\n");
-		return 0;
+	esoc_mdm_log("Doing a Warm reset using SPMI\n");
+	rc = qpnp_pon_modem_pwr_off(PON_POWER_OFF_WARM_RESET);
+	if (rc) {
+		dev_err(mdm->dev, "SPMI warm reset failed\n");
+		esoc_mdm_log("SPMI warm reset failed\n");
+		return rc;
 	}
-
-	if (mdm->soft_reset_inverted) {
-		soft_reset_direction_assert = 1;
-		soft_reset_direction_de_assert = 0;
-	}
-
-	esoc_mdm_log("RESET GPIO value (before doing a reset): %d\n",
-			gpio_get_value(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)));
-	esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
-				soft_reset_direction_assert);
-	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
-			soft_reset_direction_assert);
-	/*
-	 * Allow PS hold assert to be detected
-	 */
-	if (!atomic)
-		usleep_range(80000, 180000);
-	else
-		/*
-		 * The flow falls through this path as a part of the
-		 * panic handler, which has to executed atomically.
-		 */
-		mdelay(100);
-
-	esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
-				soft_reset_direction_de_assert);
-	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
-			soft_reset_direction_de_assert);
+	esoc_mdm_log("Warm reset done using SPMI\n");
 	return 0;
 }
 
@@ -325,6 +291,4 @@ struct mdm_pon_ops sdx55m_pon_ops = {
 	.pon = mdm4x_do_first_power_on,
 	.soft_reset = sdx55m_toggle_soft_reset,
 	.poff_force = sdx55m_power_down,
-	.dt_init = mdm4x_pon_dt_init,
-	.setup = mdm4x_pon_setup,
 };
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index de15bf5..da9c986 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -43,6 +43,15 @@
 	  Say Y here to enable GPIO based extcon support. Note that GPIO
 	  extcon supports single state per extcon instance.
 
+config EXTCON_STORAGE_CD_GPIO
+	tristate "Storage card detect GPIO extcon support"
+	depends on GPIOLIB || COMPILE_TEST
+	help
+	  Say Y here to enable removable storage card detect GPIO based
+	  extcon support. It helps when different kinds of storage cards
+	  share one detect GPIO. Note that storage card detect GPIO extcon
+	  supports single state per extcon instance.
+
 config EXTCON_INTEL_INT3496
 	tristate "Intel INT3496 ACPI device extcon driver"
 	depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0888fde..3ecee74 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_EXTCON_ARIZONA)	+= extcon-arizona.o
 obj-$(CONFIG_EXTCON_AXP288)	+= extcon-axp288.o
 obj-$(CONFIG_EXTCON_GPIO)	+= extcon-gpio.o
+obj-$(CONFIG_EXTCON_STORAGE_CD_GPIO)	+= extcon-storage-cd-gpio.o
 obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
 obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
 obj-$(CONFIG_EXTCON_MAX14577)	+= extcon-max14577.o
diff --git a/drivers/extcon/extcon-storage-cd-gpio.c b/drivers/extcon/extcon-storage-cd-gpio.c
new file mode 100644
index 0000000..189956d
--- /dev/null
+++ b/drivers/extcon/extcon-storage-cd-gpio.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+
+struct cd_gpio_extcon_data {
+	struct extcon_dev *edev;
+	int irq;
+	struct gpio_desc *gpiod;
+	unsigned int extcon_id;
+	unsigned long irq_flags;
+	struct pinctrl *pctrl;
+	struct pinctrl_state *pins_default;
+	unsigned int *supported_cable;
+};
+
+static irqreturn_t cd_gpio_threaded_irq_handler(int irq, void *dev_id)
+{
+	int state;
+	struct cd_gpio_extcon_data *data = dev_id;
+
+	state = gpiod_get_value_cansleep(data->gpiod);
+	extcon_set_state_sync(data->edev, data->extcon_id, state);
+
+	return IRQ_HANDLED;
+}
+
+static int extcon_parse_pinctrl_data(struct device *dev,
+				     struct cd_gpio_extcon_data *data)
+{
+	struct pinctrl *pctrl;
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	pctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(pctrl)) {
+		ret = PTR_ERR(pctrl);
+		goto out;
+	}
+	data->pctrl = pctrl;
+
+	/* Look-up and keep the state handy to be used later */
+	data->pins_default = pinctrl_lookup_state(data->pctrl, "default");
+	if (IS_ERR(data->pins_default)) {
+		ret = PTR_ERR(data->pins_default);
+		dev_err(dev, "Can't get default pinctrl state, ret %d\n", ret);
+	}
+out:
+	return ret;
+}
+
+static int extcon_populate_data(struct device *dev,
+				struct cd_gpio_extcon_data *data)
+{
+	struct device_node *np = dev->of_node;
+	u32 val;
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "extcon-id", &data->extcon_id);
+	if (ret) {
+		dev_err(dev, "failed to read extcon-id property, %d\n", ret);
+		goto out;
+	}
+
+	ret = of_property_read_u32(np, "irq-flags", &val);
+	if (ret) {
+		dev_err(dev, "failed to read irq-flags property, %d\n", ret);
+		goto out;
+	}
+	data->irq_flags = val;
+
+	ret = extcon_parse_pinctrl_data(dev, data);
+	if (ret)
+		dev_err(dev, "failed to parse pinctrl data\n");
+
+out:
+	return ret;
+}
+
+static int cd_gpio_extcon_probe(struct platform_device *pdev)
+{
+	struct cd_gpio_extcon_data *data;
+	struct device *dev = &pdev->dev;
+	int state, ret;
+
+	data = devm_kzalloc(dev, sizeof(struct cd_gpio_extcon_data),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (!data->irq_flags) {
+		/* try populating cd gpio extcon data from device tree */
+		ret = extcon_populate_data(dev, data);
+		if (ret)
+			return ret;
+	}
+	if (!data->irq_flags || data->extcon_id >= EXTCON_NUM)
+		return -EINVAL;
+
+	ret = pinctrl_select_state(data->pctrl, data->pins_default);
+	if (ret < 0)
+		dev_err(dev, "pinctrl state select failed, ret %d\n", ret);
+
+	data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
+	if (IS_ERR(data->gpiod))
+		return PTR_ERR(data->gpiod);
+
+	data->irq = gpiod_to_irq(data->gpiod);
+	if (data->irq <= 0)
+		return data->irq;
+
+	data->supported_cable = devm_kzalloc(dev,
+					     sizeof(*data->supported_cable) * 2,
+					     GFP_KERNEL);
+	if (!data->supported_cable)
+		return -ENOMEM;
+
+	data->supported_cable[0] = data->extcon_id;
+	data->supported_cable[1] = EXTCON_NONE;
+	/* Allocate the memory of extcon devie and register extcon device */
+	data->edev = devm_extcon_dev_allocate(dev, data->supported_cable);
+	if (IS_ERR(data->edev)) {
+		dev_err(dev, "failed to allocate extcon device\n");
+		return -ENOMEM;
+	}
+
+	ret = devm_extcon_dev_register(dev, data->edev);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_threaded_irq(dev, data->irq, NULL,
+				  cd_gpio_threaded_irq_handler,
+				  data->irq_flags | IRQF_ONESHOT,
+				  pdev->name, data);
+	if (ret < 0)
+		return ret;
+
+	ret = enable_irq_wake(data->irq);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, data);
+
+	/* Update initial state */
+	state = gpiod_get_value_cansleep(data->gpiod);
+	extcon_set_state(data->edev, data->extcon_id, state);
+
+	return 0;
+}
+
+static int cd_gpio_extcon_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cd_gpio_extcon_resume(struct device *dev)
+{
+	struct cd_gpio_extcon_data *data;
+	int state, ret = 0;
+
+	data = dev_get_drvdata(dev);
+	state = gpiod_get_value_cansleep(data->gpiod);
+	ret = extcon_set_state_sync(data->edev, data->extcon_id, state);
+	if (ret)
+		dev_err(dev, "%s: Failed to set extcon gpio state\n",
+				__func__);
+
+	return ret;
+}
+
+static const struct dev_pm_ops cd_gpio_extcon_pm_ops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cd_gpio_extcon_resume)
+};
+
+#define EXTCON_GPIO_PMOPS (&cd_gpio_extcon_pm_ops)
+
+#else
+#define EXTCON_GPIO_PMOPS NULL
+#endif
+
+static const struct of_device_id extcon_cd_gpio_of_match[] = {
+	{ .compatible = "extcon-storage-cd-gpio"},
+	{},
+};
+
+static struct platform_driver cd_gpio_extcon_driver = {
+	.probe		= cd_gpio_extcon_probe,
+	.remove		= cd_gpio_extcon_remove,
+	.driver		= {
+		.name	= "extcon-storage-cd-gpio",
+		.pm	= EXTCON_GPIO_PMOPS,
+		.of_match_table = of_match_ptr(extcon_cd_gpio_of_match),
+	},
+};
+
+module_platform_driver(cd_gpio_extcon_driver);
+
+MODULE_DESCRIPTION("Storage card detect GPIO based extcon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8f952f2..09119e3 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
 	struct scmi_shared_mem __iomem *mem = cinfo->payload;
 
+	/*
+	 * Ideally channel must be free by now unless OS timeout last
+	 * request and platform continued to process the same, wait
+	 * until it releases the shared memory, otherwise we may endup
+	 * overwriting its response with new message payload or vice-versa
+	 */
+	spin_until_cond(ioread32(&mem->channel_status) &
+			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
 	/* Mark channel busy + clear error */
 	iowrite32(0x0, &mem->channel_status);
 	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 6090d25..4045098 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -402,6 +402,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
 		printk(
 	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
 	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+	/* Fatal errors call __ghes_panic() before AER handler prints this */
+	if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+	    (gdata->error_severity & CPER_SEV_FATAL)) {
+		struct aer_capability_regs *aer;
+
+		aer = (struct aer_capability_regs *)pcie->aer_info;
+		printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+		       pfx, aer->uncor_status, aer->uncor_mask);
+		printk("%saer_uncor_severity: 0x%08x\n",
+		       pfx, aer->uncor_severity);
+		printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+		       aer->header_log.dw0, aer->header_log.dw1,
+		       aer->header_log.dw2, aer->header_log.dw3);
+	}
 }
 
 static void cper_print_tstamp(const char *pfx,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9..d54fca9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -281,6 +281,9 @@ static __init int efivar_ssdt_load(void)
 	void *data;
 	int ret;
 
+	if (!efivar_ssdt[0])
+		return 0;
+
 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
 	list_for_each_entry_safe(entry, aux, &entries, list) {
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1aa67bb..ebd3ffc 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -100,8 +100,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
 	return VPD_OK;
 }
 
-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
-				  const u8 *value, s32 value_len,
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+				  const u8 *value, u32 value_len,
 				  void *arg)
 {
 	int ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index 943acaa..6c7ab2b 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -19,8 +19,8 @@
 
 #include "vpd_decode.h"
 
-static int vpd_decode_len(const s32 max_len, const u8 *in,
-			  s32 *length, s32 *decoded_len)
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+			  u32 *length, u32 *decoded_len)
 {
 	u8 more;
 	int i = 0;
@@ -40,18 +40,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
 	} while (more);
 
 	*decoded_len = i;
-
 	return VPD_OK;
 }
 
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+			    u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+	u32 decoded_len;
+	u32 consumed = *_consumed;
+
+	if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+			   entry_len, &decoded_len) != VPD_OK)
+		return VPD_FAIL;
+	if (max_len - consumed < decoded_len)
+		return VPD_FAIL;
+
+	consumed += decoded_len;
+	*entry = input_buf + consumed;
+
+	/* entry_len is untrusted data and must be checked again. */
+	if (max_len - consumed < *entry_len)
+		return VPD_FAIL;
+
+	consumed += *entry_len;
+	*_consumed = consumed;
+	return VPD_OK;
+}
+
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
 		      vpd_decode_callback callback, void *callback_arg)
 {
 	int type;
-	int res;
-	s32 key_len;
-	s32 value_len;
-	s32 decoded_len;
+	u32 key_len;
+	u32 value_len;
 	const u8 *key;
 	const u8 *value;
 
@@ -66,26 +87,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
 	case VPD_TYPE_STRING:
 		(*consumed)++;
 
-		/* key */
-		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-				     &key_len, &decoded_len);
-		if (res != VPD_OK || *consumed + decoded_len >= max_len)
+		if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+				     &key_len) != VPD_OK)
 			return VPD_FAIL;
 
-		*consumed += decoded_len;
-		key = &input_buf[*consumed];
-		*consumed += key_len;
-
-		/* value */
-		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-				     &value_len, &decoded_len);
-		if (res != VPD_OK || *consumed + decoded_len > max_len)
+		if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+				     &value_len) != VPD_OK)
 			return VPD_FAIL;
 
-		*consumed += decoded_len;
-		value = &input_buf[*consumed];
-		*consumed += value_len;
-
 		if (type == VPD_TYPE_STRING)
 			return callback(key, key_len, value, value_len,
 					callback_arg);
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
index be3d62c..e921456 100644
--- a/drivers/firmware/google/vpd_decode.h
+++ b/drivers/firmware/google/vpd_decode.h
@@ -33,8 +33,8 @@ enum {
 };
 
 /* Callback for vpd_decode_string to invoke. */
-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
-				const u8 *value, s32 value_len,
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+				const u8 *value, u32 value_len,
 				void *arg);
 
 /*
@@ -52,7 +52,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
  * If one entry is successfully decoded, sends it to callback and returns the
  * result.
  */
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
 		      vpd_decode_callback callback, void *callback_arg);
 
 #endif  /* __VPD_DECODE_H */
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 855b1ca..9dcd24b 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -320,7 +320,7 @@ static struct tzdbg tzdbg = {
 static struct tzdbg_log_t *g_qsee_log;
 static dma_addr_t coh_pmem;
 static uint32_t debug_rw_buf_size;
-static struct qtee_shm shm;
+static uint64_t qseelog_shmbridge_handle;
 
 /*
  * Debugfs data structure and functions
@@ -852,34 +852,38 @@ static const struct file_operations tzdbg_fops = {
  */
 static void tzdbg_register_qsee_log_buf(struct platform_device *pdev)
 {
-	size_t len;
+	size_t len = QSEE_LOG_BUF_SIZE;
 	int ret = 0;
 	struct scm_desc desc = {0};
 	void *buf = NULL;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+	uint32_t ns_vm_nums = 1;
 
-	len = QSEE_LOG_BUF_SIZE;
-	ret = qtee_shmbridge_allocate_shm(len, &shm);
-	if (ret)
+	buf = dma_alloc_coherent(&pdev->dev, len, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("Failed to alloc memory for size %zu\n", len);
 		return;
-	buf = shm.vaddr;
-	coh_pmem = shm.paddr;
+	}
+	ret = qtee_shmbridge_register(coh_pmem,
+			len, ns_vmids, ns_vm_perms, ns_vm_nums,
+			PERM_READ | PERM_WRITE, &qseelog_shmbridge_handle);
+	if (ret) {
+		pr_err("failed to create bridge for qsee_log buffer\n");
+		dma_free_coherent(&pdev->dev, len, (void *)g_qsee_log,
+						coh_pmem);
+		return;
+	}
 
 	g_qsee_log = (struct tzdbg_log_t *)buf;
 	desc.args[0] = coh_pmem;
 	desc.args[1] = len;
 	desc.arginfo = 0x22;
 	ret = scm_call2(SCM_QSEEOS_FNID(1, 6), &desc);
-
-	if (ret) {
-		pr_err("%s: scm_call to register log buffer failed\n",
-			__func__);
-		goto err;
-	}
-
-	if (desc.ret[0] != QSEOS_RESULT_SUCCESS) {
+	if (ret || desc.ret[0] != QSEOS_RESULT_SUCCESS) {
 		pr_err(
-		"%s: scm_call to register log buf failed, resp result =%lld\n",
-		__func__, desc.ret[0]);
+		"%s: scm_call to register log buf failed, ret = %d, resp result =%lld\n",
+		__func__, ret, desc.ret[0]);
 		goto err;
 	}
 
@@ -887,7 +891,8 @@ static void tzdbg_register_qsee_log_buf(struct platform_device *pdev)
 	return;
 
 err:
-	qtee_shmbridge_free_shm(&shm);
+	qtee_shmbridge_deregister(qseelog_shmbridge_handle);
+	dma_free_coherent(&pdev->dev, len, (void *)g_qsee_log, coh_pmem);
 }
 
 static int  tzdbgfs_init(struct platform_device *pdev)
@@ -930,11 +935,14 @@ static void tzdbgfs_exit(struct platform_device *pdev)
 {
 	struct dentry           *dent_dir;
 
+	if (g_qsee_log) {
+		qtee_shmbridge_deregister(qseelog_shmbridge_handle);
+		dma_free_coherent(&pdev->dev, QSEE_LOG_BUF_SIZE,
+					 (void *)g_qsee_log, coh_pmem);
+	}
 	kzfree(tzdbg.disp_buf);
 	dent_dir = platform_get_drvdata(pdev);
 	debugfs_remove_recursive(dent_dir);
-	if (g_qsee_log)
-		qtee_shmbridge_free_shm(&shm);
 }
 
 static int __update_hypdbg_base(struct platform_device *pdev,
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e778af7..98c9871 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-direct.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 	phys_addr_t mem_to_map_phys;
 	phys_addr_t dest_phys;
 	phys_addr_t ptr_phys;
+	dma_addr_t ptr_dma;
 	size_t mem_to_map_sz;
 	size_t dest_sz;
 	size_t src_sz;
@@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
 			ALIGN(dest_sz, SZ_64);
 
-	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
 	if (!ptr)
 		return -ENOMEM;
+	ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
 
 	/* Fill source vmid detail */
 	src = ptr;
@@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 
 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
 				    ptr_phys, src_sz, dest_phys, dest_sz);
-	dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
 	if (ret) {
 		dev_err(__scm->dev,
 			"Assign memory protection call failed %d.\n", ret);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 7fa7447..5e35a66 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -463,9 +463,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
 	struct ti_sci_xfer *xfer;
 	int ret;
 
-	/* No need to setup flags since it is expected to respond */
 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
-				   0x0, sizeof(struct ti_sci_msg_hdr),
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(struct ti_sci_msg_hdr),
 				   sizeof(*rev_info));
 	if (IS_ERR(xfer)) {
 		ret = PTR_ERR(xfer);
@@ -593,9 +593,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
 	info = handle_to_ti_sci_info(handle);
 	dev = info->dev;
 
-	/* Response is expected, so need of any flags */
 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
-				   0, sizeof(*req), sizeof(*resp));
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
 	if (IS_ERR(xfer)) {
 		ret = PTR_ERR(xfer);
 		dev_err(dev, "Message alloc failed(%d)\n", ret);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 24b25c62..4925cae 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -207,7 +207,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
 		return -EIO;
 	}
 
-	if (!IS_ERR(conf->confd)) {
+	if (conf->confd) {
 		if (!gpiod_get_raw_value_cansleep(conf->confd)) {
 			dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
 			return -EIO;
@@ -265,10 +265,13 @@ static int altera_ps_probe(struct spi_device *spi)
 		return PTR_ERR(conf->status);
 	}
 
-	conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+	conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
 	if (IS_ERR(conf->confd)) {
-		dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
-			 PTR_ERR(conf->confd));
+		dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+			PTR_ERR(conf->confd));
+		return PTR_ERR(conf->confd);
+	} else if (!conf->confd) {
+		dev_warn(&spi->dev, "Not using confd gpio");
 	}
 
 	/* Register manager with unique name */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index df94021..fdc0e45 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -47,8 +47,7 @@
 #define SCOM_STATUS_PIB_RESP_MASK	0x00007000
 #define SCOM_STATUS_PIB_RESP_SHIFT	12
 
-#define SCOM_STATUS_ANY_ERR		(SCOM_STATUS_ERR_SUMMARY | \
-					 SCOM_STATUS_PROTECTION | \
+#define SCOM_STATUS_ANY_ERR		(SCOM_STATUS_PROTECTION | \
 					 SCOM_STATUS_PARITY |	  \
 					 SCOM_STATUS_PIB_ABORT | \
 					 SCOM_STATUS_PIB_RESP_MASK)
@@ -260,11 +259,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
 	/* Return -EBUSY on PIB abort to force a retry */
 	if (status & SCOM_STATUS_PIB_ABORT)
 		return -EBUSY;
-	if (status & SCOM_STATUS_ERR_SUMMARY) {
-		fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
-				 sizeof(uint32_t));
-		return -EIO;
-	}
 	return 0;
 }
 
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 6cf2e2c..4935cda 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -529,11 +529,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
 		}
 
 		for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
-			girq = irq_find_mapping(chip->irq.domain,
-					bank * SPRD_EIC_PER_BANK_NR + n);
+			u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+			girq = irq_find_mapping(chip->irq.domain, offset);
 
 			generic_handle_irq(girq);
-			sprd_eic_toggle_trigger(chip, girq, n);
+			sprd_eic_toggle_trigger(chip, girq, offset);
 		}
 	}
 }
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c5e009f..cf2604e 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
@@ -23,6 +24,11 @@
 
 #include "gpiolib.h"
 
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+		 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
  *
@@ -174,10 +180,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
 	event->irq_requested = true;
 
 	/* Make sure we trigger the initial state of edge-triggered IRQs */
-	value = gpiod_get_raw_value_cansleep(event->desc);
-	if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-	    ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-		event->handler(event->irq, event);
+	if (run_edge_events_on_boot &&
+	    (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+		value = gpiod_get_raw_value_cansleep(event->desc);
+		if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+		    ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+			event->handler(event->irq, event);
+	}
 }
 
 static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
@@ -1253,3 +1262,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
 late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+		}
+	},
+	{} /* Terminating entry */
+};
+
+static int acpi_gpio_setup_params(void)
+{
+	if (run_edge_events_on_boot < 0) {
+		if (dmi_check_system(run_edge_events_on_boot_blacklist))
+			run_edge_events_on_boot = 0;
+		else
+			run_edge_events_on_boot = 1;
+	}
+
+	return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3afb621..bbc58021 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -525,6 +525,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
 		return -EINVAL;
 
 	/*
+	 * Do not allow both INPUT & OUTPUT flags to be set as they are
+	 * contradictory.
+	 */
+	if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+	    (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+		return -EINVAL;
+
+	/*
 	 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
 	 * the hardware actually supports enabling both at the same time the
 	 * electrical result would be disastrous.
@@ -916,7 +924,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 	}
 
 	/* This is just wrong: we don't look for events on output lines */
-	if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+	if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+	    (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+	    (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
 		ret = -EINVAL;
 		goto out_free_label;
 	}
@@ -930,10 +940,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 
 	if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 		set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-	if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
-		set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-	if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
-		set_bit(FLAG_OPEN_SOURCE, &desc->flags);
 
 	ret = gpiod_direction_input(desc);
 	if (ret)
@@ -1082,9 +1088,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
 			lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
 		if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
-			lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+			lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+					   GPIOLINE_FLAG_IS_OUT);
 		if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
-			lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+			lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+					   GPIOLINE_FLAG_IS_OUT);
 
 		if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
 			return -EFAULT;
@@ -2648,8 +2656,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 		if (!ret)
 			goto set_output_value;
 		/* Emulate open drain by not actively driving the line high */
-		if (value)
-			return gpiod_direction_input(desc);
+		if (value) {
+			ret = gpiod_direction_input(desc);
+			goto set_output_flag;
+		}
 	}
 	else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
 		ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2657,8 +2667,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 		if (!ret)
 			goto set_output_value;
 		/* Emulate open source by not actively driving the line low */
-		if (!value)
-			return gpiod_direction_input(desc);
+		if (!value) {
+			ret = gpiod_direction_input(desc);
+			goto set_output_flag;
+		}
 	} else {
 		gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
 					    PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -2666,6 +2678,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 
 set_output_value:
 	return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+	/*
+	 * When emulating open-source or open-drain functionalities by not
+	 * actively driving the line (setting mode to input) we still need to
+	 * set the IS_OUT flag or otherwise we won't be able to set the line
+	 * value anymore.
+	 */
+	if (ret == 0)
+		set_bit(FLAG_IS_OUT, &desc->flags);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
@@ -2979,8 +3002,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 
 	if (value) {
 		err = chip->direction_input(chip, offset);
-		if (!err)
-			clear_bit(FLAG_IS_OUT, &desc->flags);
 	} else {
 		err = chip->direction_output(chip, offset, 0);
 		if (!err)
@@ -3010,8 +3031,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
 			set_bit(FLAG_IS_OUT, &desc->flags);
 	} else {
 		err = chip->direction_input(chip, offset);
-		if (!err)
-			clear_bit(FLAG_IS_OUT, &desc->flags);
 	}
 	trace_gpio_direction(desc_to_gpio(desc), !value, err);
 	if (err < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 92b11de..354c8b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
 	{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+	{ 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5fb937..65cecfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b40e9c7..5e29f14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -841,6 +841,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 	if (ret == -EPROBE_DEFER)
 		return ret;
 
+#ifdef CONFIG_DRM_AMDGPU_SI
+	if (!amdgpu_si_support) {
+		switch (flags & AMD_ASIC_MASK) {
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			dev_info(&pdev->dev,
+				 "SI support provided by radeon.\n");
+			dev_info(&pdev->dev,
+				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+				);
+			return -ENODEV;
+		}
+	}
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	if (!amdgpu_cik_support) {
+		switch (flags & AMD_ASIC_MASK) {
+		case CHIP_KAVERI:
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_KABINI:
+		case CHIP_MULLINS:
+			dev_info(&pdev->dev,
+				 "CIK support provided by radeon.\n");
+			dev_info(&pdev->dev,
+				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+				);
+			return -ENODEV;
+		}
+	}
+#endif
+
 	/* Get rid of things like offb */
 	ret = amdgpu_kick_out_firmware_fb(pdev);
 	if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 51b5e977..f4e9d1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	/* ring tests don't use a job */
 	if (job) {
 		vm = job->vm;
-		fence_ctx = job->base.s_fence->scheduled.context;
+		fence_ctx = job->base.s_fence ?
+			job->base.s_fence->scheduled.context : 0;
 	} else {
 		vm = NULL;
 		fence_ctx = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c0396e8..ba10577 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	struct amdgpu_device *adev;
 	int r, acpi_status;
 
-#ifdef CONFIG_DRM_AMDGPU_SI
-	if (!amdgpu_si_support) {
-		switch (flags & AMD_ASIC_MASK) {
-		case CHIP_TAHITI:
-		case CHIP_PITCAIRN:
-		case CHIP_VERDE:
-		case CHIP_OLAND:
-		case CHIP_HAINAN:
-			dev_info(dev->dev,
-				 "SI support provided by radeon.\n");
-			dev_info(dev->dev,
-				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
-				);
-			return -ENODEV;
-		}
-	}
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-	if (!amdgpu_cik_support) {
-		switch (flags & AMD_ASIC_MASK) {
-		case CHIP_KAVERI:
-		case CHIP_BONAIRE:
-		case CHIP_HAWAII:
-		case CHIP_KABINI:
-		case CHIP_MULLINS:
-			dev_info(dev->dev,
-				 "CIK support provided by radeon.\n");
-			dev_info(dev->dev,
-				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
-				);
-			return -ENODEV;
-		}
-	}
-#endif
-
 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
 	if (adev == NULL) {
 		return -ENOMEM;
@@ -562,6 +527,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
 			sh_num = 0xffffffff;
 
+		if (info->read_mmr_reg.count > 128)
+			return -EINVAL;
+
 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
 		if (!regs)
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f540..17862b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1070,7 +1070,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint32_t rptr;
 	unsigned i;
 	int r, timeout = adev->usec_timeout;
 
@@ -1084,6 +1084,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 			  ring->idx, r);
 		return r;
 	}
+
+	rptr = amdgpu_ring_get_rptr(ring);
+
 	amdgpu_ring_write(ring, VCE_CMD_END);
 	amdgpu_ring_commit(ring);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 400fc74..205e683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -431,7 +431,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint32_t rptr;
 	unsigned i;
 	int r;
 
@@ -441,6 +441,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 			  ring->idx, r);
 		return r;
 	}
+
+	rptr = amdgpu_ring_get_rptr(ring);
+
 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 4656849..7824116 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -82,7 +82,8 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
 
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index c364ef9..77c9f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
 			if (orig != data)
 				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
 
-			if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+			if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
 				if (orig != data)
@@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
 
 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
 			data &= ~LS2_EXIT_TIME_MASK;
-			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
 				data |= LS2_EXIT_TIME(5);
 			if (orig != data)
 				si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
 
 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
 			data &= ~LS2_EXIT_TIME_MASK;
-			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
 				data |= LS2_EXIT_TIME(5);
 			if (orig != data)
 				si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d4070839..80613a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -170,7 +170,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint32_t rptr;
 	unsigned i;
 	int r;
 
@@ -180,6 +180,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 			  ring->idx, r);
 		return r;
 	}
+
+	rptr = amdgpu_ring_get_rptr(ring);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 057151b..ce16b83 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint32_t rptr;
 	unsigned i;
 	int r;
 
@@ -188,6 +188,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 			  ring->me, ring->idx, r);
 		return r;
 	}
+
+	rptr = amdgpu_ring_get_rptr(ring);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 5aba50f..938d005 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -310,6 +310,7 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x67CF, &polaris10_device_info },	/* Polaris10 */
 	{ 0x67D0, &polaris10_vf_device_info },	/* Polaris10 vf*/
 	{ 0x67DF, &polaris10_device_info },	/* Polaris10 */
+	{ 0x6FDF, &polaris10_device_info },	/* Polaris10 */
 	{ 0x67E0, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67E1, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67E3, &polaris11_device_info },	/* Polaris11 */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 221de24..3b07a31 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1462,6 +1462,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
 }
 
 static const struct backlight_ops amdgpu_dm_backlight_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = amdgpu_dm_backlight_get_brightness,
 	.update_status	= amdgpu_dm_backlight_update_status,
 };
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 59445c8..c85bea7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -377,9 +377,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 	drm_connector_attach_encoder(&aconnector->base,
 				     &aconnector->mst_encoder->base);
 
-	/*
-	 * TODO: understand why this one is needed
-	 */
 	drm_object_attach_property(
 		&connector->base,
 		dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index e4a8b33..16614d7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -32,10 +32,8 @@
 
 calcs_ccflags := -mhard-float -msse $(cc_stack_align)
 
-# Use -msse2 only with clang:
-#   https://bugs.freedesktop.org/show_bug.cgi?id=109487
 ifdef CONFIG_CC_IS_CLANG
-calcs_cc_flags += -msse2
+calcs_ccflags += -msse2
 endif
 
 CFLAGS_dcn_calcs.o := $(calcs_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f4b89d1..2b2efe4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1585,6 +1585,14 @@ void dc_set_power_state(
 		dc_resource_state_construct(dc, dc->current_state);
 
 		dc->hwss.init_hw(dc);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+		if (dc->hwss.init_sys_ctx != NULL &&
+			dc->vm_pa_config.valid) {
+			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
+		}
+#endif
+
 		break;
 	default:
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f0d68aa..d440b28 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -229,12 +229,10 @@ bool resource_construct(
 				DC_ERR("DC: failed to create audio!\n");
 				return false;
 			}
-
 			if (!aud->funcs->endpoint_valid(aud)) {
 				aud->funcs->destroy(&aud);
 				break;
 			}
-
 			pool->audios[i] = aud;
 			pool->audio_count++;
 		}
@@ -1703,24 +1701,25 @@ static struct audio *find_first_free_audio(
 		const struct resource_pool *pool,
 		enum engine_id id)
 {
-	int i;
-	for (i = 0; i < pool->audio_count; i++) {
+	int i, available_audio_count;
+
+	available_audio_count = pool->audio_count;
+
+	for (i = 0; i < available_audio_count; i++) {
 		if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
 			/*we have enough audio endpoint, find the matching inst*/
 			if (id != i)
 				continue;
-
 			return pool->audios[i];
 		}
 	}
 
-    /* use engine id to find free audio */
-	if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+	/* use engine id to find free audio */
+	if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
 		return pool->audios[id];
 	}
-
 	/*not found the matching one, first come first serve*/
-	for (i = 0; i < pool->audio_count; i++) {
+	for (i = 0; i < available_audio_count; i++) {
 		if (res_ctx->is_audio_acquired[i] == false) {
 			return pool->audios[i];
 		}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 7f6d724..abb559ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -611,6 +611,8 @@ void dce_aud_az_configure(
 
 	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
 		value);
+	DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
+		audio->inst, value, audio_info->display_name);
 
 	/*
 	*write the port ID:
@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
 	.az_configure = dce_aud_az_configure,
 	.destroy = dce_aud_destroy,
 };
-
 void dce_aud_destroy(struct audio **audio)
 {
 	struct dce_audio *aud = DCE_AUD(*audio);
@@ -953,7 +954,6 @@ struct audio *dce_audio_create(
 	audio->regs = reg;
 	audio->shifts = shifts;
 	audio->masks = masks;
-
 	return &audio->base;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 5d95a99..f8904f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format(
 		seg_distr[7] = 4;
 		seg_distr[8] = 4;
 		seg_distr[9] = 4;
+		seg_distr[10] = 1;
 
 		region_start = -10;
-		region_end = 0;
+		region_end = 1;
 	}
 
 	for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 648c80c..934ffe1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -32,8 +32,6 @@
 
 dml_ccflags := -mhard-float -msse $(cc_stack_align)
 
-# Use -msse2 only with clang:
-#   https://bugs.freedesktop.org/show_bug.cgi?id=109487
 ifdef CONFIG_CC_IS_CLANG
 dml_ccflags += -msse2
 endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b52ccab..c7c5050 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4052,6 +4052,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 
 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
+	if (data->frame_time_x2 < 280) {
+		pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
+		data->frame_time_x2 = 280;
+	}
+
 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
 
 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index c9a15ba..0adfc53 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1222,17 +1222,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
-	}
 	return 0;
 }
 
 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
 		return smum_send_msg_to_smc_with_parameter(
 			hwmgr,
 			PPSMC_MSG_UVDPowerON,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 2aab1b4..cede78c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -669,20 +669,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
 	for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
 		table->WatermarkRow[1][i].MinClock =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+			1000));
 		table->WatermarkRow[1][i].MaxClock =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
-			100);
+			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+			1000));
 		table->WatermarkRow[1][i].MinUclk =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+			1000));
 		table->WatermarkRow[1][i].MaxUclk =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+			1000));
 		table->WatermarkRow[1][i].WmSetting = (uint8_t)
 				wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
 	}
@@ -690,20 +690,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
 	for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
 		table->WatermarkRow[0][i].MinClock =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+			1000));
 		table->WatermarkRow[0][i].MaxClock =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+			1000));
 		table->WatermarkRow[0][i].MinUclk =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+			1000));
 		table->WatermarkRow[0][i].MaxUclk =
 			cpu_to_le16((uint16_t)
-			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-			1000);
+			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+			1000));
 		table->WatermarkRow[0][i].WmSetting = (uint8_t)
 				wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
 	}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 373700c..224fa1e 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 
 
 	/* Enable extended register access */
-	ast_enable_mmio(dev);
 	ast_open_key(ast);
+	ast_enable_mmio(dev);
 
 	/* Find out whether P2A works or whether to use device-tree */
 	ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
 {
 	struct ast_private *ast = dev->dev_private;
 
+	/* enable standard VGA decode */
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
 	ast_release_firmware(dev);
 	kfree(ast->dp501_fw_addr);
 	ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 8bb355d..9d92d2d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -600,7 +600,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
 		return -EINVAL;
 	ast_open_key(ast);
 
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 
 	ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
 	ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d4213..c1d1ac5 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
 {
 	struct ast_private *ast = dev->dev_private;
 
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 }
 
 
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6..7683f45 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -46,6 +46,7 @@
 config DRM_LVDS_ENCODER
 	tristate "Transparent parallel to LVDS encoder support"
 	depends on OF
+	select DRM_KMS_HELPER
 	select DRM_PANEL_BRIDGE
 	help
 	  Support for transparent parallel to LVDS encoders that don't require
@@ -128,6 +129,18 @@
 	---help---
 	  Texas Instruments TFP410 DVI/HDMI Transmitter driver
 
+config DRM_LONTIUM_LT9611UXC
+	bool "LT LT9611 DSI/HDMI Bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	select DRM_MIPI_DSI
+	help
+	  Say Y here if you want support for the HDMI interface.
+	  It is Lontium Semiconductor DSI/HDMI Transmitter driver.
+	  This only need to uploade firmware and do not need add
+	  too much other configs when running.
+
 source "drivers/gpu/drm/bridge/analogix/Kconfig"
 
 source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 35f88d4..32ab583 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -14,4 +14,5 @@
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lt9611uxc.o
 obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index d68986c..84abf5d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1040,16 +1040,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
 	if (ret)
 		return ret;
 
+	/* Check whether panel supports fast training */
+	ret = analogix_dp_fast_link_train_detection(dp);
+	if (ret)
+		dp->psr_enable = false;
+
 	if (dp->psr_enable) {
 		ret = analogix_dp_enable_sink_psr(dp);
 		if (ret)
 			return ret;
 	}
 
-	/* Check whether panel supports fast training */
-	ret =  analogix_dp_fast_link_train_detection(dp);
-	if (ret)
-		dp->psr_enable = false;
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
new file mode 100644
index 0000000..cbe9572
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -0,0 +1,1771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/component.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware.h>
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/string.h>
+
+#define CFG_HPD_INTERRUPTS BIT(0)
+#define CFG_EDID_INTERRUPTS BIT(1)
+#define CFG_CEC_INTERRUPTS BIT(2)
+#define CFG_VID_CHK_INTERRUPTS BIT(3)
+
+#define EDID_SEG_SIZE 256
+#define READ_BUF_MAX_SIZE 64
+#define WRITE_BUF_MAX_SIZE 64
+#define HPD_UEVENT_BUFFER_SIZE 30
+#define VERSION_NUM	0x32
+
+struct lt9611_reg_cfg {
+	u8 reg;
+	u8 val;
+};
+
+enum lt9611_fw_upgrade_status {
+	UPDATE_SUCCESS = 0,
+	UPDATE_RUNNING = 1,
+	UPDATE_FAILED = 2,
+};
+
+struct lt9611_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+};
+
+struct lt9611_video_cfg {
+	u32 h_active;
+	u32 h_front_porch;
+	u32 h_pulse_width;
+	u32 h_back_porch;
+	bool h_polarity;
+	u32 v_active;
+	u32 v_front_porch;
+	u32 v_pulse_width;
+	u32 v_back_porch;
+	bool v_polarity;
+	u32 pclk_khz;
+	bool interlaced;
+	u32 vic;
+	enum hdmi_picture_aspect ar;
+	u32 num_of_lanes;
+	u32 num_of_intfs;
+	u8 scaninfo;
+};
+
+struct lt9611 {
+	struct device *dev;
+	struct drm_bridge bridge;
+
+	struct device_node *host_node;
+	struct mipi_dsi_device *dsi;
+	struct drm_connector connector;
+
+	u8 i2c_addr;
+	int irq;
+	bool ac_mode;
+
+	u32 irq_gpio;
+	u32 reset_gpio;
+	u32 hdmi_ps_gpio;
+	u32 hdmi_en_gpio;
+
+	unsigned int num_vreg;
+	struct lt9611_vreg *vreg_config;
+
+	struct i2c_client *i2c_client;
+
+	enum drm_connector_status status;
+	bool power_on;
+
+	/* get display modes from device tree */
+	bool non_pluggable;
+	u32 num_of_modes;
+	struct list_head mode_list;
+
+	struct drm_display_mode curr_mode;
+	struct lt9611_video_cfg video_cfg;
+
+	u8 edid_buf[EDID_SEG_SIZE];
+	u8 i2c_wbuf[WRITE_BUF_MAX_SIZE];
+	u8 i2c_rbuf[READ_BUF_MAX_SIZE];
+	bool hdmi_mode;
+	enum lt9611_fw_upgrade_status fw_status;
+};
+
+struct lt9611_timing_info {
+	u16 xres;
+	u16 yres;
+	u8 bpp;
+	u8 fps;
+	u8 lanes;
+	u8 intfs;
+};
+
+static struct lt9611_timing_info lt9611_supp_timing_cfg[] = {
+	{3840, 2160, 24, 30, 4, 2}, /* 3840x2160 24bit 30Hz 4Lane 2ports */
+	{1920, 1080, 24, 60, 4, 1}, /* 1080P 24bit 60Hz 4lane 1port */
+	{1920, 1080, 24, 30, 3, 1}, /* 1080P 24bit 30Hz 3lane 1port */
+	{1920, 1080, 24, 24, 3, 1},
+	{720, 480, 24, 60, 2, 1},
+	{720, 576, 24, 50, 2, 1},
+	{640, 480, 24, 60, 2, 1},
+	{0xffff, 0xffff, 0xff, 0xff, 0xff},
+};
+
+static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct lt9611, bridge);
+}
+
+static struct lt9611 *connector_to_lt9611(struct drm_connector *connector)
+{
+	return container_of(connector, struct lt9611, connector);
+}
+
+/*
+ * Write one reg with more values;
+ * Reg -> value0, value1, value2.
+ */
+
+static int lt9611_write(struct lt9611 *pdata, u8 reg,
+		const u8 *buf, int size)
+{
+	struct i2c_client *client = pdata->i2c_client;
+	struct i2c_msg msg = {
+		.addr = client->addr,
+		.flags = 0,
+		.len = size + 1,
+		.buf = pdata->i2c_wbuf,
+	};
+
+	pdata->i2c_wbuf[0] = reg;
+	if (size > (WRITE_BUF_MAX_SIZE - 1)) {
+		pr_err("invalid write buffer size %d\n", size);
+		return -EINVAL;
+	}
+
+	memcpy(pdata->i2c_wbuf + 1, buf, size);
+
+	if (i2c_transfer(client->adapter, &msg, 1) < 1) {
+		pr_err("i2c write failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Write one reg with one value;
+ * Reg -> value
+ */
+static int lt9611_write_byte(struct lt9611 *pdata, const u8 reg, u8 value)
+{
+	struct i2c_client *client = pdata->i2c_client;
+	struct i2c_msg msg = {
+		.addr = client->addr,
+		.flags = 0,
+		.len = 2,
+		.buf = pdata->i2c_wbuf,
+	};
+
+	memset(pdata->i2c_wbuf, 0, WRITE_BUF_MAX_SIZE);
+	pdata->i2c_wbuf[0] = reg;
+	pdata->i2c_wbuf[1] = value;
+
+	if (i2c_transfer(client->adapter, &msg, 1) < 1) {
+		pr_err("i2c write failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Write more regs with more values;
+ * Reg1 -> value1
+ * Reg2 -> value2
+ */
+static void lt9611_write_array(struct lt9611 *pdata,
+	struct lt9611_reg_cfg *reg_arry, int size)
+{
+	int i = 0;
+
+	for (i = 0; i < size; i++)
+		lt9611_write_byte(pdata, reg_arry[i].reg, reg_arry[i].val);
+}
+
+static int lt9611_read(struct lt9611 *pdata, u8 reg, char *buf, u32 size)
+{
+	struct i2c_client *client = pdata->i2c_client;
+	struct i2c_msg msg[2] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = pdata->i2c_wbuf,
+		},
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = size,
+			.buf = pdata->i2c_rbuf,
+		}
+	};
+
+	memset(pdata->i2c_wbuf, 0x0, WRITE_BUF_MAX_SIZE);
+	memset(pdata->i2c_rbuf, 0x0, READ_BUF_MAX_SIZE);
+	pdata->i2c_wbuf[0] = reg;
+
+	if (i2c_transfer(client->adapter, msg, 2) != 2) {
+		pr_err("i2c read failed\n");
+		return -EIO;
+	}
+
+	memcpy(buf, pdata->i2c_rbuf, size);
+
+	return 0;
+}
+
+void lt9611_config(struct lt9611 *pdata)
+{
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0xFF, 0x80},
+		{0xEE, 0x01},
+		{0x5E, 0xDF},
+		{0x58, 0x00},
+		{0x59, 0x50},
+		{0x5A, 0x10},
+		{0x5A, 0x00},
+	};
+
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+u8 lt9611_get_version(struct lt9611 *pdata)
+{
+	u8 revison = 0;
+
+	lt9611_write_byte(pdata, 0xFF, 0x80);
+	lt9611_write_byte(pdata, 0xEE, 0x01);
+	lt9611_write_byte(pdata, 0xFF, 0xB0);
+
+	if (!lt9611_read(pdata, 0x21, &revison, 1))
+		pr_info("LT9611 revison: 0x%x\n", revison);
+	else
+		pr_err("LT9611 get revison failed\n");
+
+	lt9611_write_byte(pdata, 0xFF, 0x80);
+	lt9611_write_byte(pdata, 0xEE, 0x00);
+
+	return revison;
+}
+
+void lt9611_flash_write_en(struct lt9611 *pdata)
+{
+	struct lt9611_reg_cfg reg_cfg0[] = {
+		{0xFF, 0x81},
+		{0x08, 0xBF},
+	};
+
+	struct lt9611_reg_cfg reg_cfg1[] = {
+		{0xFF, 0x80},
+		{0x5A, 0x04},
+		{0x5A, 0x00},
+	};
+
+	lt9611_write_array(pdata, reg_cfg0, ARRAY_SIZE(reg_cfg0));
+	msleep(20);
+	lt9611_write_byte(pdata, 0x08, 0xFF);
+	msleep(20);
+	lt9611_write_array(pdata, reg_cfg1, ARRAY_SIZE(reg_cfg1));
+}
+
+void lt9611_block_erase(struct lt9611 *pdata)
+{
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0xFF, 0x80},
+		{0xEE, 0x01},
+		{0x5A, 0x04},
+		{0x5A, 0x00},
+		{0x5B, 0x00},
+		{0x5C, 0x00},
+		{0x5D, 0x00},
+		{0x5A, 0x01},
+		{0x5A, 0x00},
+	};
+
+	pr_info("LT9611 block erase\n");
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+	msleep(3000);
+}
+
+void lt9611_flash_read_addr_set(struct lt9611 *pdata, u32 addr)
+{
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0x5E, 0x5F},
+		{0x5A, 0xA0},
+		{0x5A, 0x80},
+		{0x5B, (addr & 0xFF0000) >> 16},
+		{0x5C, (addr & 0xFF00) >> 8},
+		{0x5D, addr & 0xFF},
+		{0x5A, 0x90},
+		{0x5A, 0x80},
+		{0x58, 0x21},
+	};
+
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+void lt9611_fw_read_back(struct lt9611 *pdata, u8 *buff, int size)
+{
+	u8 page_data[32];
+	int page_number = 0, i = 0, addr = 0;
+
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0xFF, 0x80},
+		{0xEE, 0x01},
+		{0x5A, 0x84},
+		{0x5A, 0x80},
+	};
+	/*
+	 * Read 32 bytes once.
+	 */
+	page_number = size / 32;
+	if (size % 32)
+		page_number++;
+
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+
+	for (i = 0; i < page_number; i++) {
+		memset(page_data, 0x0, 32);
+		lt9611_flash_read_addr_set(pdata, addr);
+		lt9611_read(pdata, 0x5F, page_data, 32);
+		memcpy(buff, page_data, 32);
+		buff += 32;
+		addr += 32;
+	}
+}
+
+void lt9611_flash_write_config(struct lt9611 *pdata)
+{
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0xFF, 0x80},
+		{0x5E, 0xDF},
+		{0x5A, 0x20},
+		{0x5A, 0x00},
+		{0x58, 0x21},
+	};
+
+	lt9611_flash_write_en(pdata);
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+void lt9611_flash_write_addr_set(struct lt9611 *pdata, u32 addr)
+{
+	struct lt9611_reg_cfg reg_cfg[] = {
+		{0x5B, (addr & 0xFF0000) >> 16},
+		{0x5C, (addr & 0xFF00) >> 8},
+		{0x5D, addr & 0xFF},
+		{0x5A, 0x10},
+		{0x5A, 0x00},
+	};
+
+	lt9611_write_array(pdata, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+void lt9611_firmware_write(struct lt9611 *pdata, const u8 *f_data,
+		int size)
+{
+	u8 last_buf[32];
+	int i = 0, page_size = 32;
+	int start_addr = 0, total_page = 0, rest_data = 0;
+
+	total_page = size / page_size;
+	rest_data = size % page_size;
+
+	for (i = 0; i < total_page; i++) {
+		lt9611_flash_write_config(pdata);
+		lt9611_write(pdata, 0x59, f_data, page_size);
+		lt9611_flash_write_addr_set(pdata, start_addr);
+		start_addr += page_size;
+		f_data += page_size;
+		msleep(20);
+	}
+
+	if (rest_data > 0) {
+		memset(last_buf, 0xFF, 32);
+		memcpy(last_buf, f_data, rest_data);
+		lt9611_flash_write_config(pdata);
+		lt9611_write(pdata, 0x59, last_buf, page_size);
+
+		lt9611_flash_write_addr_set(pdata, start_addr);
+		msleep(20);
+	}
+	msleep(20);
+
+	pr_info("LT9611 FW write over, total size: %d, page: %d, reset: %d\n",
+		size, total_page, rest_data);
+}
+
+void lt9611_firmware_upgrade(struct lt9611 *pdata,
+			const struct firmware *cfg)
+{
+	int i = 0;
+	u8 *fw_read_data = NULL;
+	int data_len = (int)cfg->size;
+
+	pr_info("LT9611 FW total size %d\n", data_len);
+
+	fw_read_data = kzalloc(ALIGN(data_len, 32), GFP_KERNEL);
+	if (!fw_read_data)
+		return;
+
+	pdata->fw_status = UPDATE_RUNNING;
+	lt9611_config(pdata);
+
+	/*
+	 * Need erase block 2 timess here.
+	 * Sometimes, erase can fail.
+	 * This is a workaroud.
+	 */
+	for (i = 0; i < 2; i++)
+		lt9611_block_erase(pdata);
+
+	lt9611_firmware_write(pdata, cfg->data, data_len);
+	msleep(20);
+	lt9611_fw_read_back(pdata, fw_read_data, data_len);
+
+	if (!memcmp(cfg->data, fw_read_data, data_len)) {
+		pdata->fw_status = UPDATE_SUCCESS;
+		pr_debug("LT9611 Firmware upgrade success.\n");
+	} else {
+		pdata->fw_status = UPDATE_FAILED;
+		pr_err("LT9611 Firmware upgrade failed\n");
+	}
+
+	kfree(fw_read_data);
+}
+
+static void lt9611_firmware_cb(const struct firmware *cfg, void *data)
+{
+	struct lt9611 *pdata = (struct lt9611 *)data;
+
+	if (!cfg) {
+		pr_err("LT9611 get firmware failed\n");
+		return;
+	}
+
+	lt9611_firmware_upgrade(pdata, cfg);
+	release_firmware(cfg);
+}
+
+static int lt9611_parse_dt_modes(struct device_node *np,
+					struct list_head *head,
+					u32 *num_of_modes)
+{
+	int rc = 0;
+	struct drm_display_mode *mode;
+	u32 mode_count = 0;
+	struct device_node *node = NULL;
+	struct device_node *root_node = NULL;
+	u32 h_front_porch, h_pulse_width, h_back_porch;
+	u32 v_front_porch, v_pulse_width, v_back_porch;
+	bool h_active_high, v_active_high;
+	u32 flags = 0;
+
+	root_node = of_get_child_by_name(np, "lt,customize-modes");
+	if (!root_node) {
+		root_node = of_parse_phandle(np, "lt,customize-modes", 0);
+		if (!root_node) {
+			pr_info("No entry present for lt,customize-modes\n");
+			goto end;
+		}
+	}
+
+	for_each_child_of_node(root_node, node) {
+		rc = 0;
+		mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+		if (!mode) {
+			pr_err("Out of memory\n");
+			rc =  -ENOMEM;
+			continue;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-h-active",
+						&mode->hdisplay);
+		if (rc) {
+			pr_err("failed to read h-active, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-h-front-porch",
+						&h_front_porch);
+		if (rc) {
+			pr_err("failed to read h-front-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-h-pulse-width",
+						&h_pulse_width);
+		if (rc) {
+			pr_err("failed to read h-pulse-width, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-h-back-porch",
+						&h_back_porch);
+		if (rc) {
+			pr_err("failed to read h-back-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		h_active_high = of_property_read_bool(node,
+						"lt,mode-h-active-high");
+
+		rc = of_property_read_u32(node, "lt,mode-v-active",
+						&mode->vdisplay);
+		if (rc) {
+			pr_err("failed to read v-active, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-v-front-porch",
+						&v_front_porch);
+		if (rc) {
+			pr_err("failed to read v-front-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-v-pulse-width",
+						&v_pulse_width);
+		if (rc) {
+			pr_err("failed to read v-pulse-width, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-v-back-porch",
+						&v_back_porch);
+		if (rc) {
+			pr_err("failed to read v-back-porch, rc=%d\n", rc);
+			goto fail;
+		}
+
+		v_active_high = of_property_read_bool(node,
+						"lt,mode-v-active-high");
+
+		rc = of_property_read_u32(node, "lt,mode-refresh-rate",
+						&mode->vrefresh);
+		if (rc) {
+			pr_err("failed to read refresh-rate, rc=%d\n", rc);
+			goto fail;
+		}
+
+		rc = of_property_read_u32(node, "lt,mode-clock-in-khz",
+						&mode->clock);
+		if (rc) {
+			pr_err("failed to read clock, rc=%d\n", rc);
+			goto fail;
+		}
+
+		mode->hsync_start = mode->hdisplay + h_front_porch;
+		mode->hsync_end = mode->hsync_start + h_pulse_width;
+		mode->htotal = mode->hsync_end + h_back_porch;
+		mode->vsync_start = mode->vdisplay + v_front_porch;
+		mode->vsync_end = mode->vsync_start + v_pulse_width;
+		mode->vtotal = mode->vsync_end + v_back_porch;
+		if (h_active_high)
+			flags |= DRM_MODE_FLAG_PHSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NHSYNC;
+		if (v_active_high)
+			flags |= DRM_MODE_FLAG_PVSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NVSYNC;
+		mode->flags = flags;
+
+		if (!rc) {
+			mode_count++;
+			list_add_tail(&mode->head, head);
+		}
+
+		drm_mode_set_name(mode);
+
+		pr_debug("mode[%s] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %x %dkHZ\n",
+			mode->name, mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal, mode->vdisplay,
+			mode->vsync_start, mode->vsync_end, mode->vtotal,
+			mode->vrefresh, mode->flags, mode->clock);
+fail:
+		if (rc) {
+			kfree(mode);
+			continue;
+		}
+	}
+
+	if (num_of_modes)
+		*num_of_modes = mode_count;
+
+end:
+	return rc;
+}
+
+
+static int lt9611_parse_dt(struct device *dev,
+	struct lt9611 *pdata)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *end_node;
+	int ret = 0;
+
+	end_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+	if (!end_node) {
+		pr_err("remote endpoint not found\n");
+		return -ENODEV;
+	}
+
+	pdata->host_node = of_graph_get_remote_port_parent(end_node);
+	of_node_put(end_node);
+	if (!pdata->host_node) {
+		pr_err("remote node not found\n");
+		return -ENODEV;
+	}
+	of_node_put(pdata->host_node);
+
+	pdata->irq_gpio =
+		of_get_named_gpio(np, "lt,irq-gpio", 0);
+	if (!gpio_is_valid(pdata->irq_gpio)) {
+		pr_err("irq gpio not specified\n");
+		ret = -EINVAL;
+	}
+	pr_debug("irq_gpio=%d\n", pdata->irq_gpio);
+
+	pdata->reset_gpio =
+		of_get_named_gpio(np, "lt,reset-gpio", 0);
+	if (!gpio_is_valid(pdata->reset_gpio)) {
+		pr_err("reset gpio not specified\n");
+		ret = -EINVAL;
+	}
+	pr_debug("reset_gpio=%d\n", pdata->reset_gpio);
+
+	pdata->hdmi_ps_gpio =
+		of_get_named_gpio(np, "lt,hdmi-ps-gpio", 0);
+	if (!gpio_is_valid(pdata->hdmi_ps_gpio))
+		pr_debug("hdmi ps gpio not specified\n");
+	else
+		pr_debug("hdmi_ps_gpio=%d\n", pdata->hdmi_ps_gpio);
+
+	pdata->hdmi_en_gpio =
+		of_get_named_gpio(np, "lt,hdmi-en-gpio", 0);
+	if (!gpio_is_valid(pdata->hdmi_en_gpio))
+		pr_debug("hdmi en gpio not specified\n");
+	else
+		pr_debug("hdmi_en_gpio=%d\n", pdata->hdmi_en_gpio);
+
+	pdata->ac_mode = of_property_read_bool(np, "lt,ac-mode");
+	pr_debug("ac_mode=%d\n", pdata->ac_mode);
+
+	pdata->non_pluggable = of_property_read_bool(np, "lt,non-pluggable");
+	pr_debug("non_pluggable = %d\n", pdata->non_pluggable);
+	if (pdata->non_pluggable) {
+		INIT_LIST_HEAD(&pdata->mode_list);
+		ret = lt9611_parse_dt_modes(np,
+			&pdata->mode_list, &pdata->num_of_modes);
+	}
+
+	return ret;
+}
+
+static int lt9611_gpio_configure(struct lt9611 *pdata, bool on)
+{
+	int ret = 0;
+
+	if (on) {
+		ret = gpio_request(pdata->reset_gpio,
+			"lt9611-reset-gpio");
+		if (ret) {
+			pr_err("lt9611 reset gpio request failed\n");
+			goto error;
+		}
+
+		ret = gpio_direction_output(pdata->reset_gpio, 0);
+		if (ret) {
+			pr_err("lt9611 reset gpio direction failed\n");
+			goto reset_error;
+		}
+
+		if (gpio_is_valid(pdata->hdmi_en_gpio)) {
+			ret = gpio_request(pdata->hdmi_en_gpio,
+					"lt9611-hdmi-en-gpio");
+			if (ret) {
+				pr_err("lt9611 hdmi en gpio request failed\n");
+				goto reset_error;
+			}
+
+			ret = gpio_direction_output(pdata->hdmi_en_gpio, 1);
+			if (ret) {
+				pr_err("lt9611 hdmi en gpio direction failed\n");
+				goto hdmi_en_error;
+			}
+		}
+
+		if (gpio_is_valid(pdata->hdmi_ps_gpio)) {
+			ret = gpio_request(pdata->hdmi_ps_gpio,
+				"lt9611-hdmi-ps-gpio");
+			if (ret) {
+				pr_err("lt9611 hdmi ps gpio request failed\n");
+				goto hdmi_en_error;
+			}
+
+			ret = gpio_direction_input(pdata->hdmi_ps_gpio);
+			if (ret) {
+				pr_err("lt9611 hdmi ps gpio direction failed\n");
+				goto hdmi_ps_error;
+			}
+		}
+
+		ret = gpio_request(pdata->irq_gpio, "lt9611-irq-gpio");
+		if (ret) {
+			pr_err("lt9611 irq gpio request failed\n");
+			goto hdmi_ps_error;
+		}
+
+		ret = gpio_direction_input(pdata->irq_gpio);
+		if (ret) {
+			pr_err("lt9611 irq gpio direction failed\n");
+			goto irq_error;
+		}
+	} else {
+		gpio_free(pdata->irq_gpio);
+		if (gpio_is_valid(pdata->hdmi_ps_gpio))
+			gpio_free(pdata->hdmi_ps_gpio);
+		if (gpio_is_valid(pdata->hdmi_en_gpio))
+			gpio_free(pdata->hdmi_en_gpio);
+		gpio_free(pdata->reset_gpio);
+	}
+
+	return ret;
+
+
+irq_error:
+	gpio_free(pdata->irq_gpio);
+hdmi_ps_error:
+	if (gpio_is_valid(pdata->hdmi_ps_gpio))
+		gpio_free(pdata->hdmi_ps_gpio);
+hdmi_en_error:
+	if (gpio_is_valid(pdata->hdmi_en_gpio))
+		gpio_free(pdata->hdmi_en_gpio);
+reset_error:
+	gpio_free(pdata->reset_gpio);
+error:
+	return ret;
+}
+
+static int lt9611_read_device_id(struct lt9611 *pdata)
+{
+	u8 rev0 = 0, rev1 = 0;
+	int ret = 0;
+
+	lt9611_write_byte(pdata, 0xFF, 0x80);
+	lt9611_write_byte(pdata, 0xEE, 0x01);
+	lt9611_write_byte(pdata, 0xFF, 0x81);
+
+	if (!lt9611_read(pdata, 0x00, &rev0, 1) &&
+		!lt9611_read(pdata, 0x01, &rev1, 1)) {
+		pr_info("LT9611 id: 0x%x\n", (rev0 << 8) | rev1);
+	} else {
+		pr_err("LT9611 get id failed\n");
+		ret = -1;
+	}
+
+	lt9611_write_byte(pdata, 0xFF, 0x80);
+	lt9611_write_byte(pdata, 0xEE, 0x00);
+
+	return ret;
+}
+
+static irqreturn_t lt9611_irq_thread_handler(int irq, void *dev_id)
+{
+	pr_debug("irq_thread_handler\n");
+	return IRQ_HANDLED;
+}
+
+static void lt9611_reset(struct lt9611 *pdata, bool on_off)
+{
+	pr_debug("reset: %d\n", on_off);
+	if (on_off) {
+		gpio_set_value(pdata->reset_gpio, 1);
+		msleep(20);
+		gpio_set_value(pdata->reset_gpio, 0);
+		msleep(20);
+		gpio_set_value(pdata->reset_gpio, 1);
+		msleep(20);
+	} else {
+		gpio_set_value(pdata->reset_gpio, 0);
+	}
+}
+
+static void lt9611_assert_5v(struct lt9611 *pdata)
+{
+	if (gpio_is_valid(pdata->hdmi_en_gpio)) {
+		gpio_set_value(pdata->hdmi_en_gpio, 1);
+		msleep(20);
+	}
+}
+
+static int lt9611_config_vreg(struct device *dev,
+	struct lt9611_vreg *in_vreg, int num_vreg, bool config)
+{
+	int i = 0, rc = 0;
+	struct lt9611_vreg *curr_vreg = NULL;
+
+	if (!in_vreg || !num_vreg)
+		return rc;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+					curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				pr_err("%s get failed. rc=%d\n",
+						curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+
+			rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+			if (rc < 0) {
+				pr_err("%s set vltg fail\n",
+						curr_vreg->vreg_name);
+				goto vreg_set_voltage_fail;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+	regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		goto vreg_unconfig;
+	}
+	return rc;
+}
+
+static int lt9611_get_dt_supply(struct device *dev,
+		struct lt9611 *pdata)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_root_node = NULL;
+	struct device_node *supply_node = NULL;
+
+	if (!dev || !pdata) {
+		pr_err("invalid input param dev:%pK pdata:%pK\n", dev, pdata);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+
+	pdata->num_vreg = 0;
+	supply_root_node = of_get_child_by_name(of_node,
+			"lt,supply-entries");
+	if (!supply_root_node) {
+		pr_info("no supply entry present\n");
+		return 0;
+	}
+
+	pdata->num_vreg = of_get_available_child_count(supply_root_node);
+	if (pdata->num_vreg == 0) {
+		pr_info("no vreg present\n");
+		return 0;
+	}
+
+	pr_debug("vreg found. count=%d\n", pdata->num_vreg);
+	pdata->vreg_config = devm_kzalloc(dev, sizeof(struct lt9611_vreg) *
+			pdata->num_vreg, GFP_KERNEL);
+	if (!pdata->vreg_config)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(supply_root_node, supply_node) {
+		const char *st = NULL;
+
+		rc = of_property_read_string(supply_node,
+				"lt,supply-name", &st);
+		if (rc) {
+			pr_err("error reading name. rc=%d\n", rc);
+			goto error;
+		}
+
+		strlcpy(pdata->vreg_config[i].vreg_name, st,
+				sizeof(pdata->vreg_config[i].vreg_name));
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading min volt. rc=%d\n", rc);
+			goto error;
+		}
+		pdata->vreg_config[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading max volt. rc=%d\n", rc);
+			goto error;
+		}
+		pdata->vreg_config[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-enable-load", &tmp);
+		if (rc)
+			pr_debug("no supply enable load value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].enable_load = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-disable-load", &tmp);
+		if (rc)
+			pr_debug("no supply disable load value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].disable_load = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-pre-on-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply pre on sleep value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-pre-off-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply pre off sleep value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-post-on-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply post on sleep value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+				"lt,supply-post-off-sleep", &tmp);
+		if (rc)
+			pr_debug("no supply post off sleep value. rc=%d\n", rc);
+
+		pdata->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d\n",
+				pdata->vreg_config[i].vreg_name,
+				pdata->vreg_config[i].min_voltage,
+				pdata->vreg_config[i].max_voltage,
+				pdata->vreg_config[i].enable_load,
+				pdata->vreg_config[i].disable_load);
+		++i;
+
+		rc = 0;
+	}
+
+	rc = lt9611_config_vreg(dev,
+			pdata->vreg_config, pdata->num_vreg, true);
+	if (rc)
+		goto error;
+
+	return rc;
+
+error:
+	if (pdata->vreg_config) {
+		devm_kfree(dev, pdata->vreg_config);
+		pdata->vreg_config = NULL;
+		pdata->num_vreg = 0;
+	}
+
+	return rc;
+}
+
+static void lt9611_put_dt_supply(struct device *dev,
+		struct lt9611 *pdata)
+{
+	if (!dev || !pdata) {
+		pr_err("invalid input param dev:%pK pdata:%pK\n", dev, pdata);
+		return;
+	}
+
+	lt9611_config_vreg(dev,
+			pdata->vreg_config, pdata->num_vreg, false);
+
+	if (pdata->vreg_config) {
+		devm_kfree(dev, pdata->vreg_config);
+		pdata->vreg_config = NULL;
+	}
+	pdata->num_vreg = 0;
+}
+
+static int lt9611_enable_vreg(struct lt9611 *pdata, int enable)
+{
+	int i = 0, rc = 0;
+	bool need_sleep;
+	struct lt9611_vreg *in_vreg = pdata->vreg_config;
+	int num_vreg = pdata->num_vreg;
+
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				pr_err("%s regulator error. rc=%d\n",
+						in_vreg[i].vreg_name, rc);
+				goto vreg_set_opt_mode_fail;
+			}
+
+			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+			if (in_vreg[i].pre_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].pre_on_sleep * 1000,
+						in_vreg[i].pre_on_sleep * 1000);
+
+			rc = regulator_set_load(in_vreg[i].vreg,
+					in_vreg[i].enable_load);
+			if (rc < 0) {
+				pr_err("%s set opt m fail\n",
+						in_vreg[i].vreg_name);
+				goto vreg_set_opt_mode_fail;
+			}
+
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (in_vreg[i].post_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].post_on_sleep * 1000,
+					in_vreg[i].post_on_sleep * 1000);
+			if (rc < 0) {
+				pr_err("%s enable failed\n",
+						in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			if (in_vreg[i].pre_off_sleep)
+				usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+
+			regulator_set_load(in_vreg[i].vreg,
+					in_vreg[i].disable_load);
+			regulator_disable(in_vreg[i].vreg);
+
+			if (in_vreg[i].post_off_sleep)
+				usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+		}
+	}
+	return rc;
+
+disable_vreg:
+	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+	for (i--; i >= 0; i--) {
+		if (in_vreg[i].pre_off_sleep)
+			usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+
+		regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].disable_load);
+		regulator_disable(in_vreg[i].vreg);
+
+		if (in_vreg[i].post_off_sleep)
+			usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+	}
+
+	return rc;
+}
+
+static struct lt9611_timing_info *lt9611_get_supported_timing(
+		struct drm_display_mode *mode)
+{
+	int i = 0;
+
+	while (lt9611_supp_timing_cfg[i].xres != 0xffff) {
+		if (lt9611_supp_timing_cfg[i].xres == mode->hdisplay &&
+			lt9611_supp_timing_cfg[i].yres == mode->vdisplay &&
+			lt9611_supp_timing_cfg[i].fps ==
+					drm_mode_vrefresh(mode)) {
+			return &lt9611_supp_timing_cfg[i];
+		}
+		i++;
+	}
+
+	return NULL;
+}
+
+/* TODO: intf/lane number needs info from both DSI host and client */
+static int lt9611_get_intf_num(struct lt9611 *pdata,
+	struct drm_display_mode *mode)
+{
+	int num_of_intfs = 0;
+	struct lt9611_timing_info *timing =
+			lt9611_get_supported_timing(mode);
+
+	if (timing)
+		num_of_intfs = timing->intfs;
+	else {
+		pr_err("interface number not defined by bridge chip\n");
+		num_of_intfs = 0;
+	}
+
+	return num_of_intfs;
+}
+
+static int lt9611_get_lane_num(struct lt9611 *pdata,
+	struct drm_display_mode *mode)
+{
+	int num_of_lanes = 0;
+	struct lt9611_timing_info *timing =
+				lt9611_get_supported_timing(mode);
+
+	if (timing)
+		num_of_lanes = timing->lanes;
+	else {
+		pr_err("lane number not defined by bridge chip\n");
+		num_of_lanes = 0;
+	}
+
+	return num_of_lanes;
+}
+
+static void lt9611_get_video_cfg(struct lt9611 *pdata,
+	struct drm_display_mode *mode,
+	struct lt9611_video_cfg *video_cfg)
+{
+	int rc = 0;
+	struct hdmi_avi_infoframe avi_frame;
+
+	memset(&avi_frame, 0, sizeof(avi_frame));
+
+	video_cfg->h_active = mode->hdisplay;
+	video_cfg->v_active = mode->vdisplay;
+	video_cfg->h_front_porch = mode->hsync_start - mode->hdisplay;
+	video_cfg->v_front_porch = mode->vsync_start - mode->vdisplay;
+	video_cfg->h_back_porch = mode->htotal - mode->hsync_end;
+	video_cfg->v_back_porch = mode->vtotal - mode->vsync_end;
+	video_cfg->h_pulse_width = mode->hsync_end - mode->hsync_start;
+	video_cfg->v_pulse_width = mode->vsync_end - mode->vsync_start;
+	video_cfg->pclk_khz = mode->clock;
+
+	video_cfg->h_polarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
+	video_cfg->v_polarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
+
+	video_cfg->num_of_lanes = lt9611_get_lane_num(pdata, mode);
+	video_cfg->num_of_intfs = lt9611_get_intf_num(pdata, mode);
+
+	pr_debug("video=h[%d,%d,%d,%d] v[%d,%d,%d,%d] pclk=%d lane=%d intf=%d\n",
+		video_cfg->h_active, video_cfg->h_front_porch,
+		video_cfg->h_pulse_width, video_cfg->h_back_porch,
+		video_cfg->v_active, video_cfg->v_front_porch,
+		video_cfg->v_pulse_width, video_cfg->v_back_porch,
+		video_cfg->pclk_khz, video_cfg->num_of_lanes,
+		video_cfg->num_of_intfs);
+
+	rc = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, mode, false);
+	if (rc) {
+		pr_err("get avi frame failed ret=%d\n", rc);
+	} else {
+		video_cfg->scaninfo = avi_frame.scan_mode;
+		video_cfg->ar = avi_frame.picture_aspect;
+		video_cfg->vic = avi_frame.video_code;
+		pr_debug("scaninfo=%d ar=%d vic=%d\n",
+			video_cfg->scaninfo, video_cfg->ar, video_cfg->vic);
+	}
+}
+
+/* connector funcs */
+static enum drm_connector_status
+lt9611_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct lt9611 *pdata = connector_to_lt9611(connector);
+		pdata->status = connector_status_connected;
+
+	return pdata->status;
+}
+
+static int lt9611_read_edid(struct lt9611 *pdata)
+{
+	return 0;
+}
+
+/* TODO: add support for more extension blocks */
+static int lt9611_get_edid_block(void *data, u8 *buf, unsigned int block,
+				  size_t len)
+{
+
+	pr_info("get edid block: block=%d, len=%d\n", block, (int)len);
+
+	return 0;
+}
+
+static void lt9611_set_preferred_mode(struct drm_connector *connector)
+{
+	struct lt9611 *pdata = connector_to_lt9611(connector);
+	struct drm_display_mode *mode;
+	const char *string;
+
+	/* use specified mode as preferred */
+	if (!of_property_read_string(pdata->dev->of_node,
+			"lt,preferred-mode", &string)) {
+		list_for_each_entry(mode, &connector->probed_modes, head) {
+			if (!strcmp(mode->name, string))
+				mode->type |= DRM_MODE_TYPE_PREFERRED;
+		}
+	}
+}
+
+static int lt9611_connector_get_modes(struct drm_connector *connector)
+{
+	struct lt9611 *pdata = connector_to_lt9611(connector);
+	struct drm_display_mode *mode, *m;
+	unsigned int count = 0;
+
+	if (pdata->non_pluggable) {
+		list_for_each_entry(mode, &pdata->mode_list, head) {
+			m = drm_mode_duplicate(connector->dev, mode);
+			if (!m) {
+				pr_err("failed to add hdmi mode %dx%d\n",
+					mode->hdisplay, mode->vdisplay);
+				break;
+			}
+			drm_mode_probed_add(connector, m);
+		}
+		count = pdata->num_of_modes;
+	} else {
+		struct edid *edid;
+
+		edid = drm_do_get_edid(connector, lt9611_get_edid_block, pdata);
+
+		drm_connector_update_edid_property(connector, edid);
+		count = drm_add_edid_modes(connector, edid);
+
+		pdata->hdmi_mode = drm_detect_hdmi_monitor(edid);
+		pr_info("hdmi_mode = %d\n", pdata->hdmi_mode);
+
+		kfree(edid);
+	}
+
+	lt9611_set_preferred_mode(connector);
+
+	return count;
+}
+
+static enum drm_mode_status lt9611_connector_mode_valid(
+	struct drm_connector *connector, struct drm_display_mode *mode)
+{
+
+	struct lt9611_timing_info *timing =
+			lt9611_get_supported_timing(mode);
+
+	return timing ? MODE_OK : MODE_BAD;
+}
+
+/* bridge funcs */
+static void lt9611_bridge_enable(struct drm_bridge *bridge)
+{
+	pr_debug("bridge enable\n");
+}
+
+static void lt9611_bridge_disable(struct drm_bridge *bridge)
+{
+	pr_debug("bridge disable\n");
+}
+
+static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adj_mode)
+{
+	struct lt9611 *pdata = bridge_to_lt9611(bridge);
+	struct lt9611_video_cfg *video_cfg = &pdata->video_cfg;
+	int ret = 0;
+
+	pr_debug(" hdisplay=%d, vdisplay=%d, vrefresh=%d, clock=%d\n",
+		adj_mode->hdisplay, adj_mode->vdisplay,
+		adj_mode->vrefresh, adj_mode->clock);
+
+	drm_mode_copy(&pdata->curr_mode, adj_mode);
+
+	memset(video_cfg, 0, sizeof(struct lt9611_video_cfg));
+	lt9611_get_video_cfg(pdata, adj_mode, video_cfg);
+
+	/* TODO: update intf number of host */
+	if (video_cfg->num_of_lanes != pdata->dsi->lanes) {
+		mipi_dsi_detach(pdata->dsi);
+		pdata->dsi->lanes = video_cfg->num_of_lanes;
+		ret = mipi_dsi_attach(pdata->dsi);
+		if (ret)
+			pr_err("failed to change host lanes\n");
+	}
+}
+
+static const struct drm_connector_helper_funcs
+		lt9611_connector_helper_funcs = {
+	.get_modes = lt9611_connector_get_modes,
+	.mode_valid = lt9611_connector_mode_valid,
+};
+
+
+static const struct drm_connector_funcs lt9611_connector_funcs = {
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.detect = lt9611_connector_detect,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+
+static int lt9611_bridge_attach(struct drm_bridge *bridge)
+{
+	struct mipi_dsi_host *host;
+	struct mipi_dsi_device *dsi;
+	struct lt9611 *pdata = bridge_to_lt9611(bridge);
+	int ret;
+	const struct mipi_dsi_device_info info = { .type = "lt9611",
+						   .channel = 0,
+						   .node = NULL,
+						 };
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	ret = drm_connector_init(bridge->dev, &pdata->connector,
+				 &lt9611_connector_funcs,
+				 DRM_MODE_CONNECTOR_HDMIA);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector: %d\n", ret);
+		return ret;
+	}
+
+	drm_connector_helper_add(&pdata->connector,
+				 &lt9611_connector_helper_funcs);
+
+	ret = drm_connector_register(&pdata->connector);
+	if (ret) {
+		DRM_ERROR("Failed to register connector: %d\n", ret);
+		return ret;
+	}
+
+	pdata->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	ret = drm_connector_attach_encoder(&pdata->connector,
+						bridge->encoder);
+	if (ret) {
+		DRM_ERROR("Failed to link up connector to encoder: %d\n", ret);
+		return ret;
+	}
+
+	host = of_find_mipi_dsi_host_by_node(pdata->host_node);
+	if (!host) {
+		pr_err("failed to find dsi host\n");
+		return -EPROBE_DEFER;
+	}
+
+	dsi = mipi_dsi_device_register_full(host, &info);
+	if (IS_ERR(dsi)) {
+		pr_err("failed to create dsi device\n");
+		ret = PTR_ERR(dsi);
+		goto err_dsi_device;
+	}
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+			  MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_BLLP |
+			  MIPI_DSI_MODE_VIDEO_EOF_BLLP;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		pr_err("failed to attach dsi to host\n");
+		goto err_dsi_attach;
+	}
+
+	pdata->dsi = dsi;
+
+	return 0;
+
+err_dsi_attach:
+	mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+	return ret;
+}
+
+static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	struct lt9611 *pdata = bridge_to_lt9611(bridge);
+
+	pr_debug("bridge pre_enable\n");
+	lt9611_reset(pdata, true);
+}
+
+static bool lt9611_bridge_mode_fixup(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void lt9611_bridge_post_disable(struct drm_bridge *bridge)
+{
+	pr_debug("bridge post_disable\n");
+
+}
+
+static const struct drm_bridge_funcs lt9611_bridge_funcs = {
+	.attach = lt9611_bridge_attach,
+	.mode_fixup   = lt9611_bridge_mode_fixup,
+	.pre_enable   = lt9611_bridge_pre_enable,
+	.enable = lt9611_bridge_enable,
+	.disable = lt9611_bridge_disable,
+	.post_disable = lt9611_bridge_post_disable,
+	.mode_set = lt9611_bridge_mode_set,
+};
+
+/* sysfs */
+static int lt9611_dump_debug_info(struct lt9611 *pdata)
+{
+	if (!pdata->power_on) {
+		pr_err("device is not power on\n");
+		return -EINVAL;
+	}
+
+	lt9611_read_edid(pdata);
+
+	return 0;
+}
+
+static ssize_t dump_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct lt9611 *pdata = dev_get_drvdata(dev);
+
+	if (!pdata) {
+		pr_err("pdata is NULL\n");
+		return -EINVAL;
+	}
+
+	lt9611_dump_debug_info(pdata);
+
+	return count;
+}
+
+static ssize_t firmware_upgrade_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct lt9611 *pdata = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (!pdata) {
+		pr_err("pdata is NULL\n");
+		return -EINVAL;
+	}
+
+	ret = request_firmware_nowait(THIS_MODULE, true,
+		"lt9611_fw.bin", &pdata->i2c_client->dev, GFP_KERNEL, pdata,
+		lt9611_firmware_cb);
+	if (ret)
+		dev_err(&pdata->i2c_client->dev,
+			"Failed to invoke firmware loader: %d\n", ret);
+	else
+		pr_info("LT9611 starts upgrade, waiting for about 40s...\n");
+
+	return count;
+}
+
+static ssize_t firmware_upgrade_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct lt9611 *pdata = dev_get_drvdata(dev);
+
+	return snprintf(buf, 4, "%d\n", pdata->fw_status);
+}
+
+static DEVICE_ATTR_WO(dump_info);
+static DEVICE_ATTR_RW(firmware_upgrade);
+
+static struct attribute *lt9611_sysfs_attrs[] = {
+	&dev_attr_dump_info.attr,
+	&dev_attr_firmware_upgrade.attr,
+	NULL,
+};
+
+static struct attribute_group lt9611_sysfs_attr_grp = {
+	.attrs = lt9611_sysfs_attrs,
+};
+
+static int lt9611_sysfs_init(struct device *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = sysfs_create_group(&dev->kobj, &lt9611_sysfs_attr_grp);
+	if (rc)
+		pr_err("%s: sysfs group creation failed %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void lt9611_sysfs_remove(struct device *dev)
+{
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return;
+	}
+
+	sysfs_remove_group(&dev->kobj, &lt9611_sysfs_attr_grp);
+}
+
+static int lt9611_probe(struct i2c_client *client,
+	 const struct i2c_device_id *id)
+{
+	struct lt9611 *pdata;
+	int ret = 0;
+
+	if (!client || !client->dev.of_node) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		pr_err("device doesn't support I2C\n");
+		return -ENODEV;
+	}
+
+	pdata = devm_kzalloc(&client->dev,
+		sizeof(struct lt9611), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = lt9611_parse_dt(&client->dev, pdata);
+	if (ret) {
+		pr_err("failed to parse device tree\n");
+		goto err_dt_parse;
+	}
+
+	ret = lt9611_get_dt_supply(&client->dev, pdata);
+	if (ret) {
+		pr_err("failed to get dt supply\n");
+		goto err_dt_parse;
+	}
+
+	pdata->dev = &client->dev;
+	pdata->i2c_client = client;
+
+	ret = lt9611_gpio_configure(pdata, true);
+	if (ret) {
+		pr_err("failed to configure GPIOs\n");
+		goto err_dt_supply;
+	}
+
+	lt9611_assert_5v(pdata);
+
+	ret = lt9611_enable_vreg(pdata, true);
+	if (ret) {
+		pr_err("failed to enable vreg\n");
+		goto err_i2c_prog;
+	}
+
+	lt9611_reset(pdata, true);
+
+	pdata->irq = gpio_to_irq(pdata->irq_gpio);
+	ret = request_threaded_irq(pdata->irq, NULL, lt9611_irq_thread_handler,
+		IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lt9611", pdata);
+	if (ret) {
+		pr_err("failed to request irq\n");
+		goto err_i2c_prog;
+	}
+
+	ret = lt9611_read_device_id(pdata);
+	if (ret) {
+		pr_err("failed to read chip rev\n");
+		goto err_sysfs_init;
+	}
+
+	msleep(200);
+
+	i2c_set_clientdata(client, pdata);
+	dev_set_drvdata(&client->dev, pdata);
+
+	if (lt9611_get_version(pdata) == VERSION_NUM) {
+		pr_info("LT9611 works, no need to upgrade FW\n");
+	} else {
+		ret = request_firmware_nowait(THIS_MODULE, true,
+			"lt9611_fw.bin", &client->dev, GFP_KERNEL, pdata,
+				lt9611_firmware_cb);
+		if (ret) {
+			dev_err(&client->dev,
+				"Failed to invoke firmware loader: %d\n", ret);
+			goto err_sysfs_init;
+		} else
+			return 0;
+	}
+
+	ret = lt9611_sysfs_init(&client->dev);
+	if (ret) {
+		pr_err("sysfs init failed\n");
+		goto err_sysfs_init;
+	}
+
+#if IS_ENABLED(CONFIG_OF)
+	pdata->bridge.of_node = client->dev.of_node;
+#endif
+
+	pdata->bridge.funcs = &lt9611_bridge_funcs;
+	drm_bridge_add(&pdata->bridge);
+
+	return 0;
+
+err_sysfs_init:
+	disable_irq(pdata->irq);
+	free_irq(pdata->irq, pdata);
+err_i2c_prog:
+	lt9611_gpio_configure(pdata, false);
+err_dt_supply:
+	lt9611_put_dt_supply(&client->dev, pdata);
+err_dt_parse:
+	devm_kfree(&client->dev, pdata);
+	return ret;
+}
+
+static int lt9611_remove(struct i2c_client *client)
+{
+	int ret = -EINVAL;
+	struct lt9611 *pdata = i2c_get_clientdata(client);
+	struct drm_display_mode *mode, *n;
+
+	if (!pdata)
+		goto end;
+
+	mipi_dsi_detach(pdata->dsi);
+	mipi_dsi_device_unregister(pdata->dsi);
+
+	drm_bridge_remove(&pdata->bridge);
+
+	lt9611_sysfs_remove(&client->dev);
+
+	disable_irq(pdata->irq);
+	free_irq(pdata->irq, pdata);
+
+	ret = lt9611_gpio_configure(pdata, false);
+
+	lt9611_put_dt_supply(&client->dev, pdata);
+
+	if (pdata->non_pluggable) {
+		list_for_each_entry_safe(mode, n, &pdata->mode_list, head) {
+			list_del(&mode->head);
+			kfree(mode);
+		}
+	}
+
+	devm_kfree(&client->dev, pdata);
+
+end:
+	return ret;
+}
+
+
+static struct i2c_device_id lt9611_id[] = {
+	{ "lt,lt9611uxc", 0},
+	{}
+};
+
+static const struct of_device_id lt9611_match_table[] = {
+	{.compatible = "lt,lt9611uxc"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, lt9611_match_table);
+
+static struct i2c_driver lt9611_driver = {
+	.driver = {
+		.name = "lt9611",
+		.of_match_table = lt9611_match_table,
+	},
+	.probe = lt9611_probe,
+	.remove = lt9611_remove,
+	.id_table = lt9611_id,
+};
+
+static int __init lt9611_init(void)
+{
+	return i2c_add_driver(&lt9611_driver);
+}
+
+static void __exit lt9611_exit(void)
+{
+	i2c_del_driver(&lt9611_driver);
+}
+
+module_init(lt9611_init);
+module_exit(lt9611_exit);
+
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index aaca524..d728b6c 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -302,7 +302,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
 			       struct drm_dp_aux_msg *msg)
 {
 	struct tc_data *tc = aux_to_tc(aux);
-	size_t size = min_t(size_t, 8, msg->size);
+	size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
 	u8 request = msg->request & ~DP_AUX_I2C_MOT;
 	u8 *buf = msg->buffer;
 	u32 tmp = 0;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index c3e3213..9dc109d 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -64,7 +64,12 @@ static int tfp410_get_modes(struct drm_connector *connector)
 
 	drm_connector_update_edid_property(connector, edid);
 
-	return drm_add_edid_modes(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+
+	kfree(edid);
+
+	return ret;
+
 fallback:
 	/* No EDID, fallback on the XGA standard modes */
 	ret = drm_add_modes_noedid(connector, 1920, 1200);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 86026a5..cb3cc5a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1706,6 +1706,27 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 	struct drm_connector *connector = conn_state->connector;
 	struct drm_crtc_state *crtc_state;
 
+	/*
+	 * For compatibility with legacy users, we want to make sure that
+	 * we allow DPMS On<->Off modesets on unregistered connectors, since
+	 * legacy modesetting users will not be expecting these to fail. We do
+	 * not however, want to allow legacy users to assign a connector
+	 * that's been unregistered from sysfs to another CRTC, since doing
+	 * this with a now non-existent connector could potentially leave us
+	 * in an invalid state.
+	 *
+	 * Since the connector can be unregistered at any point during an
+	 * atomic check or commit, this is racy. But that's OK: all we care
+	 * about is ensuring that userspace can't use this connector for new
+	 * configurations after it's been notified that the connector is no
+	 * longer present.
+	 */
+	if (!READ_ONCE(connector->registered) && crtc) {
+		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+				 connector->base.id, connector->name);
+		return -EINVAL;
+	}
+
 	if (conn_state->crtc == crtc)
 		return 0;
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 751e358..f946ef7 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -56,8 +56,6 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 					   struct drm_dp_mst_branch *mstb,
 					   struct drm_dp_mst_port *port);
-static int drm_dp_send_clear_payload_table(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_branch *mstb);
 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
 				 u8 *guid);
 
@@ -591,8 +589,6 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
 	case DP_POWER_DOWN_PHY:
 	case DP_POWER_UP_PHY:
 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
-	case DP_CLEAR_PAYLOAD_ID_TABLE:
-		return true;
 	default:
 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
 		return false;
@@ -735,15 +731,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
 	return 0;
 }
 
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
-{
-	struct drm_dp_sideband_msg_req_body req;
-
-	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
-	drm_dp_encode_sideband_req(&req, msg);
-	return 0;
-}
-
 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
 					struct drm_dp_vcpi *vcpi)
 {
@@ -1176,6 +1163,14 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 		port->aux.dev = dev->dev;
 		created = true;
 	} else {
+		if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+				port->pdt == DP_PEER_DEVICE_SST_SINK) {
+			if (!port->cached_edid)
+				port->cached_edid = drm_get_edid(
+						port->connector,
+						&port->aux.ddc);
+		}
+
 		old_pdt = port->pdt;
 		old_ddps = port->ddps;
 	}
@@ -1249,6 +1244,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
 	int old_pdt;
 	int old_ddps;
 	bool dowork = false;
+	bool dohotplug = false;
 	port = drm_dp_get_port(mstb, conn_stat->port_number);
 	if (!port)
 		return;
@@ -1273,6 +1269,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
 		    port->port_num < DP_MST_LOGICAL_PORT_0) {
 			kfree(port->cached_edid);
 			port->cached_edid = NULL;
+			dohotplug = true;
 		}
 
 		drm_dp_port_teardown_pdt(port, old_pdt);
@@ -1285,6 +1282,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
 	if (dowork)
 		queue_work(system_long_wq, &mstb->mgr->work);
 
+	if (dohotplug)
+		(*mstb->mgr->cbs->hotplug)(mstb->mgr);
 }
 
 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -1365,6 +1364,40 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
 	return mstb;
 }
 
+static void drm_dp_reset_sink_mstb_link_address_sent(
+		struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_branch *mstb)
+{
+	struct drm_dp_mst_port *port;
+	struct drm_dp_mst_branch *mstb_child;
+	struct drm_dp_mst_branch *mstb_parent;
+
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (port->input)
+			continue;
+
+		if (!port->ddps)
+			continue;
+
+		if (port->mstb) {
+			mstb_child = drm_dp_get_validated_mstb_ref(mgr,
+					port->mstb);
+			if (mstb_child) {
+				drm_dp_reset_sink_mstb_link_address_sent(mgr,
+						mstb_child);
+				drm_dp_put_mst_branch_device(mstb_child);
+			}
+		} else {
+			mstb_parent = drm_dp_get_validated_mstb_ref(mgr,
+					port->parent);
+			if (mstb_parent) {
+				mstb_parent->link_address_sent = false;
+				drm_dp_put_mst_branch_device(mstb_parent);
+			}
+		}
+	}
+}
+
 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 					       struct drm_dp_mst_branch *mstb)
 {
@@ -1405,6 +1438,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
 	}
 	mutex_unlock(&mgr->lock);
 	if (mstb) {
+		drm_dp_reset_sink_mstb_link_address_sent(mgr, mstb);
 		drm_dp_check_and_send_link_address(mgr, mstb);
 		drm_dp_put_mst_branch_device(mstb);
 	}
@@ -1830,32 +1864,6 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 }
 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
 
-static int drm_dp_send_clear_payload_table(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_branch *mstb)
-{
-	struct drm_dp_sideband_msg_tx *txmsg;
-	int len, ret;
-
-	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
-	if (!txmsg)
-		return -ENOMEM;
-
-	txmsg->dst = mstb;
-	len = build_clear_payload_id_table(txmsg);
-	drm_dp_queue_down_tx(mgr, txmsg);
-
-	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
-	if (ret > 0) {
-		if (txmsg->reply.reply_type == 1)
-			ret = -EINVAL;
-		else
-			ret = 0;
-	}
-	kfree(txmsg);
-
-	return ret;
-}
-
 int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
 		struct drm_dp_mst_port *port,
 		struct drm_dp_mst_dsc_info *dsc_info)
@@ -2315,8 +2323,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
 		}
 
-		drm_dp_send_clear_payload_table(mgr, mstb);
-
 		queue_work(system_long_wq, &mgr->work);
 
 		ret = 0;
@@ -2568,8 +2574,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 			drm_dp_update_port(mstb, &msg.u.conn_stat);
 
 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
-			(*mgr->cbs->hotplug)(mgr);
-
 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
 			if (!mstb)
@@ -3506,6 +3510,8 @@ static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
  */
 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
 {
+	int rc;
+
 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
 	aux->ddc.algo_data = aux;
 	aux->ddc.retries = 3;
@@ -3518,7 +3524,11 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
 		sizeof(aux->ddc.name));
 
-	return i2c_add_adapter(&aux->ddc);
+	mutex_lock(&aux->i2c_mutex);
+	rc = i2c_add_adapter(&aux->ddc);
+	mutex_unlock(&aux->i2c_mutex);
+
+	return rc;
 }
 
 /**
@@ -3527,5 +3537,7 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
  */
 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
 {
+	mutex_lock(&aux->i2c_mutex);
 	i2c_del_adapter(&aux->ddc);
+	mutex_unlock(&aux->i2c_mutex);
 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 650554a..1c1fee0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -174,6 +174,9 @@ static const struct edid_quirk {
 	/* Medion MD 30217 PG */
 	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
+	/* Lenovo G50 */
+	{ "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
 	/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
 	{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 5f278c6..29a57e1 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -264,6 +264,18 @@ void drm_file_free(struct drm_file *file)
 	kfree(file);
 }
 
+static void drm_close_helper(struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+
+	mutex_lock(&dev->filelist_mutex);
+	list_del(&file_priv->lhead);
+	mutex_unlock(&dev->filelist_mutex);
+
+	drm_file_free(file_priv);
+}
+
 static int drm_setup(struct drm_device * dev)
 {
 	int ret;
@@ -325,8 +337,10 @@ int drm_open(struct inode *inode, struct file *filp)
 		goto err_undo;
 	if (need_setup) {
 		retcode = drm_setup(dev);
-		if (retcode)
+		if (retcode) {
+			drm_close_helper(filp);
 			goto err_undo;
+		}
 	}
 	return 0;
 
@@ -480,11 +494,7 @@ int drm_release(struct inode *inode, struct file *filp)
 
 	DRM_DEBUG("open_count = %d\n", dev->open_count);
 
-	mutex_lock(&dev->filelist_mutex);
-	list_del(&file_priv->lhead);
-	mutex_unlock(&dev->filelist_mutex);
-
-	drm_file_free(file_priv);
+	drm_close_helper(filp);
 
 	if (!--dev->open_count)
 		drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 138680b..f867223 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
 	m32.size = map.size;
 	m32.type = map.type;
 	m32.flags = map.flags;
-	m32.handle = ptr_to_compat(map.handle);
+	m32.handle = ptr_to_compat((void __user *)map.handle);
 	m32.mtrr = map.mtrr;
 	if (copy_to_user(argp, &m32, sizeof(m32)))
 		return -EFAULT;
@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
 
 	m32.offset = map.offset;
 	m32.mtrr = map.mtrr;
-	m32.handle = ptr_to_compat(map.handle);
+	m32.handle = ptr_to_compat((void __user *)map.handle);
 	if (map.handle != compat_ptr(m32.handle))
 		pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
 				   map.handle, m32.type, m32.offset);
@@ -529,7 +529,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
 	if (err)
 		return err;
 
-	req32.handle = ptr_to_compat(req.handle);
+	req32.handle = ptr_to_compat((void __user *)req.handle);
 	if (copy_to_user(argp, &req32, sizeof(req32)))
 		return -EFAULT;
 
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index b44bed5..cc354b4 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -82,6 +82,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+	.width = 720,
+	.height = 1280,
+	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
 	.width = 800,
 	.height = 1280,
@@ -109,6 +115,12 @@ static const struct dmi_system_id orientation_data[] = {
 		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
 		},
 		.driver_data = (void *)&gpd_micropc,
+	}, {	/* GPD MicroPC (later BIOS versions with proper DMI strings) */
+		.matches = {
+		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
+		},
+		.driver_data = (void *)&lcd720x1280_rightside_up,
 	}, {	/*
 		 * GPD Pocket, note that the the DMI data is less generic then
 		 * it seems, devices with a board-vendor of "AMI Corporation"
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d18b7e2..c0b2613 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -581,6 +581,9 @@ static void output_poll_execute(struct work_struct *work)
 	enum drm_connector_status old_status;
 	bool repoll = false, changed;
 
+	if (!dev->mode_config.poll_enabled)
+		return;
+
 	/* Pick up any changes detected by the probe functions. */
 	changed = dev->mode_config.delayed_event;
 	dev->mode_config.delayed_event = false;
@@ -735,7 +738,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
  */
 void drm_kms_helper_poll_fini(struct drm_device *dev)
 {
-	drm_kms_helper_poll_disable(dev);
+	if (!dev->mode_config.poll_enabled)
+		return;
+
+	dev->mode_config.poll_enabled = false;
+	cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 450a51b..063a0fc 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -793,8 +793,10 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
+	mutex_lock(&dev->mode_config.blob_lock);
 	list_for_each_entry(bt, &file_priv->blobs, head_file)
 		count++;
+	mutex_unlock(&dev->mode_config.blob_lock);
 
 	if (count >= MAX_BLOB_PROP_COUNT)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 28cdcf7..d1859bc 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
 	write_sequnlock(&vblank->seqlock);
 }
 
+static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+	return vblank->max_vblank_count ?: dev->max_vblank_count;
+}
+
 /*
  * "No hw counter" fallback implementation of .get_vblank_counter() hook,
  * if there is no useable hardware frame counter available.
  */
 static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
 {
-	WARN_ON_ONCE(dev->max_vblank_count != 0);
+	WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
 	return 0;
 }
 
@@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
 	ktime_t t_vblank;
 	int count = DRM_TIMESTAMP_MAXRETRIES;
 	int framedur_ns = vblank->framedur_ns;
+	u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
 
 	/*
 	 * Interrupts were disabled prior to this call, so deal with counter
@@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
 		rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
 	} while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
 
-	if (dev->max_vblank_count != 0) {
+	if (max_vblank_count) {
 		/* trust the hw counter when it's around */
-		diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
+		diff = (cur_vblank - vblank->last) & max_vblank_count;
 	} else if (rc && framedur_ns) {
 		u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
 
@@ -1205,6 +1213,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
 EXPORT_SYMBOL(drm_crtc_vblank_reset);
 
 /**
+ * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
+ * @crtc: CRTC in question
+ * @max_vblank_count: max hardware vblank counter value
+ *
+ * Update the maximum hardware vblank counter value for @crtc
+ * at runtime. Useful for hardware where the operation of the
+ * hardware vblank counter depends on the currently active
+ * display configuration.
+ *
+ * For example, if the hardware vblank counter does not work
+ * when a specific connector is active the maximum can be set
+ * to zero. And when that specific connector isn't active the
+ * maximum can again be set to the appropriate non-zero value.
+ *
+ * If used, must be called before drm_vblank_on().
+ */
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+				   u32 max_vblank_count)
+{
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = drm_crtc_index(crtc);
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+	WARN_ON(dev->max_vblank_count);
+	WARN_ON(!READ_ONCE(vblank->inmodeset));
+
+	vblank->max_vblank_count = max_vblank_count;
+}
+EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
+
+/**
  * drm_crtc_vblank_on - enable vblank events on a CRTC
  * @crtc: CRTC in question
  *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 0ddb6ee..df22843 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
 	scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
 	do {
 		cpu_relax();
-	} while (retry > 1 &&
+	} while (--retry > 1 &&
 		 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
 	do {
 		cpu_relax();
 		scaler_write(1, SCALER_INT_EN);
-	} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+	} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
 
 	return retry ? 0 : -EIO;
 }
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 663a7c9..d0e216d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
 		((a)->lrca == (b)->lrca))
 
-#define get_last_workload(q) \
-	(list_empty(q) ? NULL : container_of(q->prev, \
-	struct intel_vgpu_workload, list))
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
@@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct list_head *q = workload_q_head(vgpu, ring_id);
-	struct intel_vgpu_workload *last_workload = get_last_workload(q);
+	struct intel_vgpu_workload *last_workload = NULL;
 	struct intel_vgpu_workload *workload = NULL;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	u64 ring_context_gpa;
@@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	head &= RB_HEAD_OFF_MASK;
 	tail &= RB_TAIL_OFF_MASK;
 
-	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
-		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
-		gvt_dbg_el("ctx head %x real head %lx\n", head,
-				last_workload->rb_tail);
-		/*
-		 * cannot use guest context head pointer here,
-		 * as it might not be updated at this time
-		 */
-		head = last_workload->rb_tail;
+	list_for_each_entry_reverse(last_workload, q, list) {
+
+		if (same_context(&last_workload->ctx_desc, desc)) {
+			gvt_dbg_el("ring id %d cur workload == last\n",
+					ring_id);
+			gvt_dbg_el("ctx head %x real head %lx\n", head,
+					last_workload->rb_tail);
+			/*
+			 * cannot use guest context head pointer here,
+			 * as it might not be updated at this time
+			 */
+			head = last_workload->rb_tail;
+			break;
+		}
 	}
 
 	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f9ce35d..e063e98 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	if (!IS_GEN5(dev_priv))
 		return -ENODEV;
 
+	intel_runtime_pm_get(dev_priv);
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	seq_printf(m, "GFX power: %ld\n", gfx);
 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
 
+	intel_runtime_pm_put(dev_priv);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f8cfd16..a4b4ab7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1120,6 +1120,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	pci_set_master(pdev);
 
+	/*
+	 * We don't have a max segment size, so set it to the max so sg's
+	 * debugging layer doesn't complain
+	 */
+	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
 	/* overlay on gen2 is broken and can't address above 1G */
 	if (IS_GEN2(dev_priv)) {
 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 03cda19..9372877 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1874,20 +1874,28 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	 * pages from.
 	 */
 	if (!obj->base.filp) {
-		i915_gem_object_put(obj);
-		return -ENXIO;
+		addr = -ENXIO;
+		goto err;
+	}
+
+	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+		addr = -EINVAL;
+		goto err;
 	}
 
 	addr = vm_mmap(obj->base.filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
+	if (IS_ERR_VALUE(addr))
+		goto err;
+
 	if (args->flags & I915_MMAP_WC) {
 		struct mm_struct *mm = current->mm;
 		struct vm_area_struct *vma;
 
 		if (down_write_killable(&mm->mmap_sem)) {
-			i915_gem_object_put(obj);
-			return -EINTR;
+			addr = -EINTR;
+			goto err;
 		}
 		vma = find_vma(mm, addr);
 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1896,17 +1904,20 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		else
 			addr = -ENOMEM;
 		up_write(&mm->mmap_sem);
+		if (IS_ERR_VALUE(addr))
+			goto err;
 
 		/* This may race, but that's ok, it only gets set */
 		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
 	}
 	i915_gem_object_put(obj);
-	if (IS_ERR((void *)addr))
-		return addr;
 
 	args->addr_ptr = (uint64_t) addr;
-
 	return 0;
+
+err:
+	i915_gem_object_put(obj);
+	return addr;
 }
 
 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
@@ -5595,6 +5606,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		i915_gem_cleanup_userptr(dev_priv);
 
 	if (ret == -EIO) {
+		mutex_lock(&dev_priv->drm.struct_mutex);
+
 		/*
 		 * Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
@@ -5605,7 +5618,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 					"Failed to initialize GPU, declaring it wedged!\n");
 			i915_gem_set_wedged(dev_priv);
 		}
-		ret = 0;
+
+		/* Minimal basic recovery for KMS */
+		ret = i915_ggtt_enable_hw(dev_priv);
+		i915_gem_restore_gtt_mappings(dev_priv);
+		i915_gem_restore_fences(dev_priv);
+		intel_init_clock_gating(dev_priv);
+
+		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
 
 	i915_gem_drain_freed_objects(dev_priv);
@@ -5615,6 +5635,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
 	i915_gem_suspend_late(dev_priv);
+	intel_disable_gt_powersave(dev_priv);
 
 	/* Flush any outstanding unpin_work. */
 	i915_gem_drain_workqueue(dev_priv);
@@ -5626,6 +5647,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 	i915_gem_contexts_fini(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
+	intel_cleanup_gt_powersave(dev_priv);
+
 	intel_uc_fini_misc(dev_priv);
 	i915_gem_cleanup_userptr(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 16f5d2d..4e070af 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6531,7 +6531,7 @@ enum {
 #define   PLANE_CTL_YUV422_UYVY			(1 << 16)
 #define   PLANE_CTL_YUV422_YVYU			(2 << 16)
 #define   PLANE_CTL_YUV422_VYUY			(3 << 16)
-#define   PLANE_CTL_DECOMPRESSION_ENABLE	(1 << 15)
+#define   PLANE_CTL_RENDER_DECOMPRESSION_ENABLE	(1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE	(1 << 14)
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE		(1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK			(0x7 << 10)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 869cf4a..a6cb3e0 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -100,6 +100,9 @@ static struct _balloon_info_ bl_info;
 static void vgt_deballoon_space(struct i915_ggtt *ggtt,
 				struct drm_mm_node *node)
 {
+	if (!drm_mm_node_allocated(node))
+		return;
+
 	DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
 			 node->start,
 			 node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c7..7b4906e 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2209,6 +2209,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
 		min_cdclk = max(2 * 96000, min_cdclk);
 
 	/*
+	 * "For DP audio configuration, cdclk frequency shall be set to
+	 *  meet the following requirements:
+	 *  DP Link Frequency(MHz) | Cdclk frequency(MHz)
+	 *  270                    | 320 or higher
+	 *  162                    | 200 or higher"
+	 */
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+		min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
+	/*
 	 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
 	 * than 320000KHz.
 	 */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c64..01fa982 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 			u8 eu_disabled_mask;
 			u32 n_disabled;
 
-			if (!(sseu->subslice_mask[ss] & BIT(ss)))
+			if (!(sseu->subslice_mask[s] & BIT(ss)))
 				/* skip disabled subslice */
 				continue;
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd44d0..6902fd2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2712,6 +2712,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 	if (size_aligned * 2 > dev_priv->stolen_usable_size)
 		return false;
 
+	switch (fb->modifier) {
+	case DRM_FORMAT_MOD_LINEAR:
+	case I915_FORMAT_MOD_X_TILED:
+	case I915_FORMAT_MOD_Y_TILED:
+		break;
+	default:
+		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
+				 fb->modifier);
+		return false;
+	}
+
 	mutex_lock(&dev->struct_mutex);
 	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
 							     base_aligned,
@@ -2721,8 +2732,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 	if (!obj)
 		return false;
 
-	if (plane_config->tiling == I915_TILING_X)
-		obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
+	switch (plane_config->tiling) {
+	case I915_TILING_NONE:
+		break;
+	case I915_TILING_X:
+	case I915_TILING_Y:
+		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
+		break;
+	default:
+		MISSING_CASE(plane_config->tiling);
+		return false;
+	}
 
 	mode_cmd.pixel_format = fb->format->format;
 	mode_cmd.width = fb->width;
@@ -3561,11 +3581,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
 	case I915_FORMAT_MOD_Y_TILED:
 		return PLANE_CTL_TILED_Y;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
-		return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
+		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
 	case I915_FORMAT_MOD_Yf_TILED:
 		return PLANE_CTL_TILED_YF;
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
+		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
 	default:
 		MISSING_CASE(fb_modifier);
 	}
@@ -8812,13 +8832,14 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 		fb->modifier = I915_FORMAT_MOD_X_TILED;
 		break;
 	case PLANE_CTL_TILED_Y:
-		if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+		plane_config->tiling = I915_TILING_Y;
+		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
 			fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
 		else
 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
 		break;
 	case PLANE_CTL_TILED_YF:
-		if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
 		else
 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
@@ -15951,8 +15972,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	flush_work(&dev_priv->atomic_helper.free_work);
 	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
 
-	intel_disable_gt_powersave(dev_priv);
-
 	/*
 	 * Interrupts and polling as the first thing to avoid creating havoc.
 	 * Too much stuff here (turning of connectors, ...) would
@@ -15980,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
 	intel_cleanup_overlay(dev_priv);
 
-	intel_cleanup_gt_powersave(dev_priv);
-
 	intel_teardown_gmbus(dev_priv);
 
 	destroy_workqueue(dev_priv->modeset_wq);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f92079e..20cd4c8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4739,6 +4739,22 @@ intel_dp_long_pulse(struct intel_connector *connector,
 		 */
 		status = connector_status_disconnected;
 		goto out;
+	} else {
+		/*
+		 * If display is now connected check links status,
+		 * there has been known issues of link loss triggering
+		 * long pulse.
+		 *
+		 * Some sinks (eg. ASUS PB287Q) seem to perform some
+		 * weird HPD ping pong during modesets. So we can apparently
+		 * end up with HPD going low during a modeset, and then
+		 * going back up soon after. And once that happens we must
+		 * retrain the link to get a picture. That's in case no
+		 * userspace component reacted to intermittent HPD dip.
+		 */
+		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+		intel_dp_retrain_link(encoder, ctx);
 	}
 
 	/*
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1fec0c7..58ba149 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -408,8 +408,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
 	struct intel_dp *intel_dp = intel_connector->mst_port;
 	struct intel_crtc *crtc = to_intel_crtc(state->crtc);
 
-	if (!READ_ONCE(connector->registered))
-		return NULL;
 	return &intel_dp->mst_encoders[crtc->pipe]->base.base;
 }
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index fd83046..947bc6d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 	struct mtk_drm_private *private = drm->dev_private;
 	struct platform_device *pdev;
 	struct device_node *np;
+	struct device *dma_dev;
 	int ret;
 
 	if (!iommu_present(&platform_bus_type))
@@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 		goto err_component_unbind;
 	}
 
-	private->dma_dev = &pdev->dev;
+	dma_dev = &pdev->dev;
+	private->dma_dev = dma_dev;
+
+	/*
+	 * Configure the DMA segment size to make sure we get contiguous IOVA
+	 * when importing PRIME buffers.
+	 */
+	if (!dma_dev->dma_parms) {
+		private->dma_parms_allocated = true;
+		dma_dev->dma_parms =
+			devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+				     GFP_KERNEL);
+	}
+	if (!dma_dev->dma_parms) {
+		ret = -ENOMEM;
+		goto err_component_unbind;
+	}
+
+	ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dma_dev, "Failed to set DMA segment size\n");
+		goto err_unset_dma_parms;
+	}
 
 	/*
 	 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 	drm->irq_enabled = true;
 	ret = drm_vblank_init(drm, MAX_CRTC);
 	if (ret < 0)
-		goto err_component_unbind;
+		goto err_unset_dma_parms;
 
 	drm_kms_helper_poll_init(drm);
 	drm_mode_config_reset(drm);
 
 	return 0;
 
+err_unset_dma_parms:
+	if (private->dma_parms_allocated)
+		dma_dev->dma_parms = NULL;
 err_component_unbind:
 	component_unbind_all(drm->dev, drm);
 err_config_cleanup:
@@ -309,9 +335,14 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 {
+	struct mtk_drm_private *private = drm->dev_private;
+
 	drm_kms_helper_poll_fini(drm);
 	drm_atomic_helper_shutdown(drm);
 
+	if (private->dma_parms_allocated)
+		private->dma_dev->dma_parms = NULL;
+
 	component_unbind_all(drm->dev, drm);
 	drm_mode_config_cleanup(drm);
 }
@@ -327,6 +358,18 @@ static const struct file_operations mtk_drm_fops = {
 	.compat_ioctl = drm_compat_ioctl,
 };
 
+/*
+ * We need to override this because the device used to import the memory is
+ * not dev->dev, as drm_gem_prime_import() expects.
+ */
+struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
+						struct dma_buf *dma_buf)
+{
+	struct mtk_drm_private *private = dev->dev_private;
+
+	return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
+}
+
 static struct drm_driver mtk_drm_driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
 			   DRIVER_ATOMIC,
@@ -338,7 +381,7 @@ static struct drm_driver mtk_drm_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export = drm_gem_prime_export,
-	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_import = mtk_drm_gem_prime_import,
 	.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
 	.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
 	.gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -523,12 +566,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
 			comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
 			if (!comp) {
 				ret = -ENOMEM;
+				of_node_put(node);
 				goto err_node;
 			}
 
 			ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
-			if (ret)
+			if (ret) {
+				of_node_put(node);
 				goto err_node;
+			}
 
 			private->ddp_comp[comp_id] = comp;
 		}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index ecc00ca..8fa60d4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -59,6 +59,8 @@ struct mtk_drm_private {
 	} commit;
 
 	struct drm_atomic_state *suspend_state;
+
+	bool dma_parms_allocated;
 };
 
 extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80df..c7daae5 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -120,6 +120,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
 					      OSD_COLOR_MATRIX_32_ARGB;
 		break;
+	case DRM_FORMAT_XBGR8888:
+		/* For XRGB, replace the pixel's alpha by 0xFF */
+		writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+					      OSD_COLOR_MATRIX_32_ABGR;
+		break;
 	case DRM_FORMAT_ARGB8888:
 		/* For ARGB, use the pixel's alpha */
 		writel_bits_relaxed(OSD_REPLACE_EN, 0,
@@ -127,6 +134,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
 					      OSD_COLOR_MATRIX_32_ARGB;
 		break;
+	case DRM_FORMAT_ABGR8888:
+		/* For ARGB, use the pixel's alpha */
+		writel_bits_relaxed(OSD_REPLACE_EN, 0,
+				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+					      OSD_COLOR_MATRIX_32_ABGR;
+		break;
 	case DRM_FORMAT_RGB888:
 		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
 					      OSD_COLOR_MATRIX_24_RGB;
@@ -196,7 +210,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
 
 static const uint32_t supported_drm_formats[] = {
 	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_RGB888,
 	DRM_FORMAT_RGB565,
 };
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 96fb5f6..cc4ea55 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
 	}
 
 	msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
-	if (!msm_host->byte_clk_src) {
-		ret = -ENODEV;
+	if (IS_ERR(msm_host->byte_clk_src)) {
+		ret = PTR_ERR(msm_host->byte_clk_src);
 		pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
 		goto exit;
 	}
 
 	msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
-	if (!msm_host->pixel_clk_src) {
-		ret = -ENODEV;
+	if (IS_ERR(msm_host->pixel_clk_src)) {
+		ret = PTR_ERR(msm_host->pixel_clk_src);
 		pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
 		goto exit;
 	}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 89bd242..610139b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1298,7 +1298,8 @@ static int add_gpu_components(struct device *dev,
 	if (!np)
 		return 0;
 
-	drm_of_component_match_add(dev, matchptr, compare_of, np);
+	if (of_device_is_available(np))
+		drm_of_component_match_add(dev, matchptr, compare_of, np);
 
 	of_node_put(np);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f889d41..10107e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -759,7 +759,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
 
 	slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
 	r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
-	WARN_ON(!r);
+	if (!r)
+		DRM_DEBUG_KMS("Failed to allocate VCPI\n");
 
 	if (!mstm->links++)
 		nv50_outp_acquire(mstm->outp);
@@ -1516,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 			nv_encoder->aux = aux;
 		}
 
-		if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
+		if (nv_connector->type != DCB_CONNECTOR_eDP &&
+		    (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
 		    ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
 			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
 					    nv_connector->base.base.id,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d81a99b..b041ffb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -169,14 +169,34 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
 	 */
 	switch (mode) {
 	case DRM_MODE_SCALE_CENTER:
-		asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
-		asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
-		/* fall-through */
+		/* NOTE: This will cause scaling when the input is
+		 * larger than the output.
+		 */
+		asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
+		asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
+		break;
 	case DRM_MODE_SCALE_ASPECT:
-		if (asyh->view.oH < asyh->view.oW) {
+		/* Determine whether the scaling should be on width or on
+		 * height. This is done by comparing the aspect ratios of the
+		 * sizes. If the output AR is larger than input AR, that means
+		 * we want to change the width (letterboxed on the
+		 * left/right), otherwise on the height (letterboxed on the
+		 * top/bottom).
+		 *
+		 * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
+		 * screen will have letterboxes on the left/right. However a
+		 * 16:9 (1.777) AR image on that same screen will have
+		 * letterboxes on the top/bottom.
+		 *
+		 * inputAR = iW / iH; outputAR = oW / oH
+		 * outputAR > inputAR is equivalent to oW * iH > iW * oH
+		 */
+		if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
+			/* Recompute output width, i.e. left/right letterbox */
 			u32 r = (asyh->view.iW << 19) / asyh->view.iH;
 			asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
 		} else {
+			/* Recompute output height, i.e. top/bottom letterbox */
 			u32 r = (asyh->view.iH << 19) / asyh->view.iW;
 			asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 7143ea4..33a9fb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 		info->min     = min(info->base,
 				    info->base + info->step * info->vidmask);
 		info->max     = nvbios_rd32(bios, volt + 0x0e);
+		if (!info->max)
+			info->max = max(info->base, info->base + info->step * info->vidmask);
 		break;
 	case 0x50:
 		info->min     = nvbios_rd32(bios, volt + 0x0a);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404..a11637b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 		u8 *ptr = msg->buf;
 
 		while (remaining) {
-			u8 cnt = (remaining > 16) ? 16 : remaining;
-			u8 cmd;
+			u8 cnt, retries, cmd;
 
 			if (msg->flags & I2C_M_RD)
 				cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 			if (mcnt || remaining > 16)
 				cmd |= 4; /* MOT */
 
-			ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
-			if (ret < 0) {
-				nvkm_i2c_aux_release(aux);
-				return ret;
+			for (retries = 0, cnt = 0;
+			     retries < 32 && !cnt;
+			     retries++) {
+				cnt = min_t(u8, remaining, 16);
+				ret = aux->func->xfer(aux, true, cmd,
+						      msg->addr, ptr, &cnt);
+				if (ret < 0)
+					goto out;
+			}
+			if (!cnt) {
+				AUX_TRACE(aux, "no data after 32 retries");
+				ret = -EIO;
+				goto out;
 			}
 
 			ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 		msg++;
 	}
 
+	ret = num;
+out:
 	nvkm_i2c_aux_release(aux);
-	return num;
+	return ret;
 }
 
 static u32
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index cb80dda..7e9e2f0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1110,7 +1110,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
 	.model			=	DSS_MODEL_OMAP3,
-	.fck_div_max		=	32,
+	.fck_div_max		=	31,
 	.fck_freq_max		=	173000000,
 	.dss_fck_multiplier	=	1,
 	.parent_clk_name	=	"dpll4_ck",
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 2c9c972..9a2cb8a 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
 
 	/* Look up the DSI host.  It needs to probe before we do. */
 	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (!endpoint)
+		return -ENODEV;
+
 	dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+	if (!dsi_host_node)
+		goto error;
+
 	host = of_find_mipi_dsi_host_by_node(dsi_host_node);
 	of_node_put(dsi_host_node);
 	if (!host) {
@@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
 	}
 
 	info.node = of_graph_get_remote_port(endpoint);
+	if (!info.node)
+		goto error;
+
 	of_node_put(endpoint);
 
 	ts->dsi = mipi_dsi_device_register_full(host, &info);
@@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
 		return ret;
 
 	return 0;
+
+error:
+	of_node_put(endpoint);
+	return -ENODEV;
 }
 
 static int rpi_touchscreen_remove(struct i2c_client *i2c)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b1d41c4..654fea2 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -436,6 +436,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
+	.pixelclock = { 26400000, 33300000, 46800000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 16, 210, 354 },
+	.hback_porch = { 45, 36, 6 },
+	.hsync_len = { 1, 10, 40 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 7, 22, 147 },
+	.vback_porch = { 22, 13, 3 },
+	.vsync_len = { 1, 10, 20 },
+	.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+		DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
+};
+
+static const struct panel_desc armadeus_st0700_adapt = {
+	.timings = &santek_st0700i5y_rbslw_f_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 154,
+		.height = 86,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
 static const struct drm_display_mode auo_b101aw03_mode = {
 	.clock = 51450,
 	.hdisplay = 1024,
@@ -663,9 +689,9 @@ static const struct panel_desc auo_g133han01 = {
 static const struct display_timing auo_g185han01_timings = {
 	.pixelclock = { 120000000, 144000000, 175000000 },
 	.hactive = { 1920, 1920, 1920 },
-	.hfront_porch = { 18, 60, 74 },
-	.hback_porch = { 12, 44, 54 },
-	.hsync_len = { 10, 24, 32 },
+	.hfront_porch = { 36, 120, 148 },
+	.hback_porch = { 24, 88, 108 },
+	.hsync_len = { 20, 48, 64 },
 	.vactive = { 1080, 1080, 1080 },
 	.vfront_porch = { 6, 10, 40 },
 	.vback_porch = { 2, 5, 20 },
@@ -2331,6 +2357,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "ampire,am800480r3tmqwa1h",
 		.data = &ampire_am800480r3tmqwa1h,
 	}, {
+		.compatible = "armadeus,st0700-adapt",
+		.data = &armadeus_st0700_adapt,
+	}, {
 		.compatible = "auo,b101aw03",
 		.data = &auo_b101aw03,
 	}, {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 414642e..de656f5 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -751,7 +751,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
 
 		radeon_encoder->output_csc = val;
 
-		if (connector->encoder->crtc) {
+		if (connector->encoder && connector->encoder->crtc) {
 			struct drm_crtc *crtc  = connector->encoder->crtc;
 			struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2a7977a..c26f09b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 static int radeon_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
+	unsigned long flags = 0;
 	int ret;
 
+	if (!ent)
+		return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
+
+	flags = ent->driver_data;
+
+	if (!radeon_si_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			dev_info(&pdev->dev,
+				 "SI support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+	if (!radeon_cik_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_KAVERI:
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_KABINI:
+		case CHIP_MULLINS:
+			dev_info(&pdev->dev,
+				 "CIK support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+
 	if (vga_switcheroo_client_probe_defer(pdev))
 		return -EPROBE_DEFER;
 
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a8fb6f..3ff8357 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	struct radeon_device *rdev;
 	int r, acpi_status;
 
-	if (!radeon_si_support) {
-		switch (flags & RADEON_FAMILY_MASK) {
-		case CHIP_TAHITI:
-		case CHIP_PITCAIRN:
-		case CHIP_VERDE:
-		case CHIP_OLAND:
-		case CHIP_HAINAN:
-			dev_info(dev->dev,
-				 "SI support disabled by module param\n");
-			return -ENODEV;
-		}
-	}
-	if (!radeon_cik_support) {
-		switch (flags & RADEON_FAMILY_MASK) {
-		case CHIP_KAVERI:
-		case CHIP_BONAIRE:
-		case CHIP_HAWAII:
-		case CHIP_KABINI:
-		case CHIP_MULLINS:
-			dev_info(dev->dev,
-				 "CIK support disabled by module param\n");
-			return -ENODEV;
-		}
-	}
-
 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
 	if (rdev == NULL) {
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 080f053..6a4da3a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
 
 static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
-	.suspend = rockchip_dp_suspend,
+	.suspend_late = rockchip_dp_suspend,
 	.resume_early = rockchip_dp_resume,
 #endif
 };
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 808d9fb..477d0a2 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -19,6 +19,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_plane_helper.h>
@@ -825,6 +826,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
 };
 
 static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
+	.prepare_fb = drm_gem_fb_prepare_fb,
 	.atomic_check = ltdc_plane_atomic_check,
 	.atomic_update = ltdc_plane_atomic_update,
 	.atomic_disable = ltdc_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0fb300d..e186877 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -184,6 +184,12 @@ static void tilcdc_fini(struct drm_device *dev)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
 
+#ifdef CONFIG_CPU_FREQ
+	if (priv->freq_transition.notifier_call)
+		cpufreq_unregister_notifier(&priv->freq_transition,
+					    CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
 	if (priv->crtc)
 		tilcdc_crtc_shutdown(priv->crtc);
 
@@ -198,12 +204,6 @@ static void tilcdc_fini(struct drm_device *dev)
 	drm_mode_config_cleanup(dev);
 	tilcdc_remove_external_device(dev);
 
-#ifdef CONFIG_CPU_FREQ
-	if (priv->freq_transition.notifier_call)
-		cpufreq_unregister_notifier(&priv->freq_transition,
-					    CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
 	if (priv->clk)
 		clk_put(priv->clk);
 
@@ -274,17 +274,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
 		goto init_failed;
 	}
 
-#ifdef CONFIG_CPU_FREQ
-	priv->freq_transition.notifier_call = cpufreq_transition;
-	ret = cpufreq_register_notifier(&priv->freq_transition,
-			CPUFREQ_TRANSITION_NOTIFIER);
-	if (ret) {
-		dev_err(dev, "failed to register cpufreq notifier\n");
-		priv->freq_transition.notifier_call = NULL;
-		goto init_failed;
-	}
-#endif
-
 	if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
 		priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
 
@@ -361,6 +350,17 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
 	}
 	modeset_init(ddev);
 
+#ifdef CONFIG_CPU_FREQ
+	priv->freq_transition.notifier_call = cpufreq_transition;
+	ret = cpufreq_register_notifier(&priv->freq_transition,
+			CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		dev_err(dev, "failed to register cpufreq notifier\n");
+		priv->freq_transition.notifier_call = NULL;
+		goto init_failed;
+	}
+#endif
+
 	if (priv->is_componentized) {
 		ret = component_bind_all(dev, ddev);
 		if (ret < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6fe91c1..185655f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -273,15 +273,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 		else
 			ret = vmf_insert_pfn(&cvma, address, pfn);
 
-		/*
-		 * Somebody beat us to this PTE or prefaulting to
-		 * an already populated PTE, or prefaulting error.
-		 */
-
-		if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
-			break;
-		else if (unlikely(ret & VM_FAULT_ERROR))
-			goto out_io_unlock;
+		/* Never error on prefaulted PTEs */
+		if (unlikely((ret & VM_FAULT_ERROR))) {
+			if (i == 0)
+				goto out_io_unlock;
+			else
+				break;
+		}
 
 		address += PAGE_SIZE;
 		if (unlikely(++page_offset >= page_last))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d4..0af048d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 				     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
 		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
 			kfree(reply);
-
+			reply = NULL;
 			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
 				/* A checkpoint occurred. Retry. */
 				continue;
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
 			kfree(reply);
-
+			reply = NULL;
 			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
 				/* A checkpoint occurred. Retry. */
 				continue;
@@ -389,7 +389,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 		break;
 	}
 
-	if (retries == RETRIES)
+	if (!reply)
 		return -EINVAL;
 
 	*msg_len = reply_len;
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index 2b0d75115..b0e45d5 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -2,11 +2,11 @@
 config QCOM_KGSL
 	tristate "Qualcomm Technologies, Inc. 3D Graphics driver"
 	depends on ARCH_QCOM
+	depends on QCOM_QFPROM
 	select GENERIC_ALLOCATOR
 	select FW_LOADER
 	select PM_DEVFREQ
 	select QCOM_SCM
-	select NVMEM
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	select DEVFREQ_GOV_PERFORMANCE
 	select DEVFREQ_GOV_QCOM_ADRENO_TZ
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 490a3d8..ea56fe5 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -946,6 +946,7 @@
 #define A6XX_GMU_CM3_SYSRESET			0x1F800
 #define A6XX_GMU_CM3_BOOT_CONFIG		0x1F801
 #define A6XX_GMU_CX_GMU_WFI_CONFIG		0x1F802
+#define A6XX_GMU_CX_GMU_WDOG_CTRL		0x1F813
 #define A6XX_GMU_CM3_FW_BUSY			0x1F81A
 #define A6XX_GMU_CM3_FW_INIT_RESULT		0x1F81C
 #define A6XX_GMU_CM3_CFG			0x1F82D
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 93f6ba0..50f9507 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -194,7 +194,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -220,7 +219,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -286,7 +284,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -306,7 +303,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -384,7 +380,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_256K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -510,7 +505,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -593,7 +587,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_256K + SZ_16K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -612,7 +605,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -740,6 +732,44 @@ static const struct adreno_reglist a630_vbif_regs[] = {
 	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+
+/* For a615, a616, a618, a630, a640 and a680 */
+static const struct a6xx_protected_regs a630_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e01, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x00e03, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09e70, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0be20, 0x0d5ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 30, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x11c00, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -749,7 +779,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -764,7 +793,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.hwcg_count = ARRAY_SIZE(a630_hwcg_regs),
 	.vbif = a630_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a630_protected_regs,
 };
 
 /* For a615, a616 and a618 */
@@ -848,7 +878,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -863,7 +892,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -875,7 +905,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -891,6 +920,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
 	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -954,16 +984,55 @@ static const struct adreno_reglist a650_gbif_regs[] = {
 	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+/* These are for a620 and a650 */
+static const struct a6xx_protected_regs a620_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e01, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x00e03, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x08e80, 0x090ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x09e60, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0b608, 0x0b60f, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 30, 0x0be20, 0x0d5ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 32, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x11c00, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.base = {
-		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
+		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
-			ADRENO_IFPC,
+			ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD |
+			ADRENO_APRIV,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -980,6 +1049,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
 	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1034,7 +1105,7 @@ static const struct adreno_reglist a640_hwcg_regs[] = {
 	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
 };
 
-/* These apply to a640, a680 and a612 */
+/* These apply to a640, a680, a612 and a610 */
 static const struct adreno_reglist a640_vbif_regs[] = {
 	{A6XX_GBIF_QSB_SIDE0, 0x00071620},
 	{A6XX_GBIF_QSB_SIDE1, 0x00071620},
@@ -1052,7 +1123,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M, //Verified 1MB
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1067,7 +1137,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a630_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1127,11 +1199,10 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, 0),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
-			ADRENO_IFPC,
+			ADRENO_IFPC | ADRENO_APRIV,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1146,7 +1217,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
 	.pdc_in_aop = true,
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
@@ -1154,11 +1227,11 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
-			ADRENO_IFPC | ADRENO_PREEMPTION,
+			ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD |
+			ADRENO_LM | ADRENO_APRIV,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1174,6 +1247,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
 	.veto_fal10 = true,
 	.pdc_in_aop = true,
 	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1183,7 +1258,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_2M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1198,14 +1272,16 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a630_protected_regs,
+	.disable_tseskip = true,
 };
 
 static const struct adreno_reglist a612_hwcg_regs[] = {
 	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
 	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
-	{A6XX_RBBM_CLOCK_HYST_SP0, 0x00000081},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
 	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
 	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
 	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
@@ -1259,7 +1335,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_4K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1273,6 +1348,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
 	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1284,7 +1360,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1299,7 +1374,30 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
-	.hang_detect_cycles = 0x3fffff,
+	.hang_detect_cycles = 0xcfffff,
+	.protected_regs = a630_protected_regs,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a610 = {
+	{
+		DEFINE_ADRENO_REV(ADRENO_REV_A610, 6, 1, 0, ANY_ID),
+		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
+			ADRENO_PREEMPTION,
+		.gpudev = &adreno_a6xx_gpudev,
+		.gmem_base = 0x100000,
+		.gmem_size = (SZ_128K + SZ_4K),
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
+	},
+	.prim_fifo_threshold = 0x00080000,
+	.sqefw_name = "a630_sqe.fw",
+	.zap_name = "a610_zap",
+	.hwcg = a612_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
+	.vbif = a640_vbif_regs,
+	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_gpu_core *adreno_gpulist[] = {
@@ -1331,4 +1429,5 @@ static const struct adreno_gpu_core *adreno_gpulist[] = {
 	&adreno_gpu_core_a680.base,
 	&adreno_gpu_core_a612.base,
 	&adreno_gpu_core_a616.base,
+	&adreno_gpu_core_a610.base,
 };
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index a53171e..0f9608e 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -34,6 +34,7 @@ static unsigned int counter_delta(struct kgsl_device *device,
 static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
 	.bus = {
 		.max = 350,
+		.floating = true,
 	},
 	.device_id = KGSL_DEVICE_3D0,
 };
@@ -332,6 +333,28 @@ void adreno_fault_detect_stop(struct adreno_device *adreno_dev)
 	adreno_dev->fast_hang_detect = 0;
 }
 
+#define GMU_CM3_CFG_NONMASKINTR_SHIFT	9
+
+/* Send an NMI to the GMU */
+void adreno_gmu_send_nmi(struct adreno_device *adreno_dev)
+{
+	/* Mask so there's no interrupt caused by NMI */
+	adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
+
+	/* Make sure the interrupt is masked before causing it */
+	wmb();
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		adreno_write_gmureg(adreno_dev,
+				ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
+	adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_CM3_CFG,
+			(1 << GMU_CM3_CFG_NONMASKINTR_SHIFT));
+
+	/* Make sure the NMI is invoked before we proceed*/
+	wmb();
+}
+
 /*
  * A workqueue callback responsible for actually turning on the GPU after a
  * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
@@ -604,6 +627,10 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 					"Status=0x%x Unmasked status=0x%x Mask=0x%x\n",
 					shadow_status & irq_params->mask,
 					shadow_status, irq_params->mask);
+				adreno_set_gpu_fault(adreno_dev,
+						ADRENO_GMU_FAULT);
+				adreno_dispatcher_schedule(KGSL_DEVICE
+						(adreno_dev));
 				goto done;
 			}
 			fence_retries++;
@@ -877,32 +904,6 @@ static void adreno_of_get_ca_aware_properties(struct adreno_device *adreno_dev,
 	}
 }
 
-static int _of_property_read_ddrtype(struct device_node *node, const char *base,
-		u32 *ptr)
-{
-	char str[32];
-	int ddr = of_fdt_get_ddrtype();
-
-	/* of_fdt_get_ddrtype returns error if the DDR type isn't determined */
-	if (ddr >= 0) {
-		int ret;
-
-		/* Construct expanded string for the DDR type  */
-		ret = snprintf(str, sizeof(str), "%s-ddr%d", base, ddr);
-
-		/* WARN_ON() if the array size was too small for the string */
-		if (WARN_ON(ret > sizeof(str)))
-			return -ENOMEM;
-
-		/* Read the expanded string */
-		if (!of_property_read_u32(node, str, ptr))
-			return 0;
-	}
-
-	/* Read the default string */
-	return of_property_read_u32(node, base, ptr);
-}
-
 static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
 		struct device_node *node)
 {
@@ -947,7 +948,10 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
 			&level->gpu_freq))
 			return -EINVAL;
 
-		ret = _of_property_read_ddrtype(child,
+		of_property_read_u32(child, "qcom,acd-level",
+			&level->acd_level);
+
+		ret = kgsl_of_property_read_ddrtype(child,
 			"qcom,bus-freq", &level->bus_freq);
 		if (ret) {
 			dev_err(device->dev,
@@ -957,11 +961,11 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
 		}
 
 		level->bus_min = level->bus_freq;
-		_of_property_read_ddrtype(child,
+		kgsl_of_property_read_ddrtype(child,
 			"qcom,bus-min", &level->bus_min);
 
 		level->bus_max = level->bus_freq;
-		_of_property_read_ddrtype(child,
+		kgsl_of_property_read_ddrtype(child,
 			"qcom,bus-max", &level->bus_max);
 	}
 
@@ -1337,12 +1341,30 @@ static int adreno_read_speed_bin(struct platform_device *pdev,
 	return 0;
 }
 
+static int adreno_probe_efuse(struct platform_device *pdev,
+					struct adreno_device *adreno_dev)
+{
+	int ret;
+
+	ret = adreno_read_speed_bin(pdev, adreno_dev);
+	if (ret)
+		return ret;
+
+	ret = nvmem_cell_read_u32(&pdev->dev, "isense_slope",
+					&adreno_dev->lm_slope);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	return 0;
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *of_id;
 	struct adreno_device *adreno_dev;
 	struct kgsl_device *device;
 	int status;
+	unsigned int priv;
 
 	of_id = of_match_device(adreno_match_table, &pdev->dev);
 	if (!of_id)
@@ -1363,7 +1385,7 @@ static int adreno_probe(struct platform_device *pdev)
 
 	adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
 
-	status = adreno_read_speed_bin(pdev, adreno_dev);
+	status = adreno_probe_efuse(pdev, adreno_dev);
 	if (status)
 		return status;
 
@@ -1427,6 +1449,18 @@ static int adreno_probe(struct platform_device *pdev)
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
 		device->mmu.features |= KGSL_MMU_IO_COHERENT;
 
+	/* Allocate the memstore for storing timestamps and other useful info */
+	priv = KGSL_MEMDESC_CONTIG;
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		priv |= KGSL_MEMDESC_PRIVILEGED;
+
+	status = kgsl_allocate_global(device, &device->memstore,
+		KGSL_MEMSTORE_SIZE, 0, priv, "memstore");
+
+	if (status)
+		goto out;
+
 	status = adreno_ringbuffer_probe(adreno_dev);
 	if (status)
 		goto out;
@@ -1478,6 +1512,7 @@ static int adreno_probe(struct platform_device *pdev)
 out:
 	if (status) {
 		adreno_ringbuffer_close(adreno_dev);
+		kgsl_free_global(device, &device->memstore);
 		kgsl_device_platform_remove(device);
 		device->pdev = NULL;
 	}
@@ -1558,6 +1593,8 @@ static int adreno_remove(struct platform_device *pdev)
 	if (efuse_base != NULL)
 		iounmap(efuse_base);
 
+	kgsl_free_global(device, &device->memstore);
+
 	kgsl_device_platform_remove(device);
 
 	gmu_core_remove(device);
@@ -1699,6 +1736,10 @@ static int adreno_init(struct kgsl_device *device)
 	if (ret)
 		return ret;
 
+	ret = gmu_core_init(device);
+	if (ret)
+		return ret;
+
 	/* Put the GPU in a responsive state */
 	if (ADRENO_GPUREV(adreno_dev) < 600) {
 		/* No need for newer generation architectures */
@@ -1712,6 +1753,9 @@ static int adreno_init(struct kgsl_device *device)
 	adreno_perfcounter_init(adreno_dev);
 	adreno_fault_detect_init(adreno_dev);
 
+	adreno_dev->cooperative_reset = ADRENO_FEATURE(adreno_dev,
+							ADRENO_COOP_RESET);
+
 	/* Power down the device */
 	if (ADRENO_GPUREV(adreno_dev) < 600)
 		kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
@@ -1727,9 +1771,15 @@ static int adreno_init(struct kgsl_device *device)
 	 */
 
 	if (!adreno_is_a3xx(adreno_dev)) {
-		int r = kgsl_allocate_global(device,
+		unsigned int priv = 0;
+		int r;
+
+		if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+			priv |= KGSL_MEMDESC_PRIVILEGED;
+
+		r = kgsl_allocate_global(device,
 			&adreno_dev->profile_buffer, PAGE_SIZE,
-			0, 0, "alwayson");
+			0, priv, "alwayson");
 
 		adreno_dev->profile_index = 0;
 
@@ -2244,10 +2294,13 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
 	 * needs a reset too) and also for below gpu
 	 * A304: It can't do SMMU programming of any kind after a soft reset
 	 * A612: IPC protocol between RGMU and CP will not restart after reset
+	 * A610: An across chip issue with reset line in all 11nm chips,
+	 * resulting in recommendation to not use soft reset
 	 */
 
 	if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev) ||
-			adreno_is_a612(adreno_dev))
+			adreno_is_a612(adreno_dev) ||
+			adreno_is_a610(adreno_dev))
 		return false;
 
 	return true;
@@ -3038,11 +3091,12 @@ static void adreno_retry_rbbm_read(struct kgsl_device *device,
 	}
 }
 
-static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
+static bool adreno_is_rbbm_batch_reg(struct adreno_device *adreno_dev,
 	unsigned int offsetwords)
 {
-	if (adreno_is_a650(ADRENO_DEVICE(device)) ||
-		adreno_is_a620v1(ADRENO_DEVICE(device))) {
+	if ((adreno_is_a650(adreno_dev) &&
+		ADRENO_CHIPID_PATCH(adreno_dev->chipid) < 2) ||
+		adreno_is_a620v1(adreno_dev)) {
 		if (((offsetwords >= 0x0) && (offsetwords <= 0x3FF)) ||
 		((offsetwords >= 0x4FA) && (offsetwords <= 0x53F)) ||
 		((offsetwords >= 0x556) && (offsetwords <= 0x5FF)) ||
@@ -3075,7 +3129,7 @@ static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
 	rmb();
 
 	if ((*value == 0xdeafbead) &&
-		adreno_is_rbbm_batch_reg(device, offsetwords))
+		adreno_is_rbbm_batch_reg(ADRENO_DEVICE(device), offsetwords))
 		adreno_retry_rbbm_read(device, offsetwords, value);
 }
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 57a5326..4d0569c 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -107,6 +107,8 @@
 #define ADRENO_COOP_RESET BIT(19)
 /* Indicates that the specific target is no longer supported */
 #define ADRENO_DEPRECATED BIT(20)
+/* The target supports ringbuffer level APRIV */
+#define ADRENO_APRIV BIT(21)
 /*
  * Adreno GPU quirks - control bits for various workarounds
  */
@@ -193,6 +195,7 @@ enum adreno_gpurev {
 	ADRENO_REV_A512 = 512,
 	ADRENO_REV_A530 = 530,
 	ADRENO_REV_A540 = 540,
+	ADRENO_REV_A610 = 610,
 	ADRENO_REV_A612 = 612,
 	ADRENO_REV_A615 = 615,
 	ADRENO_REV_A616 = 616,
@@ -211,6 +214,7 @@ enum adreno_gpurev {
 #define ADRENO_PREEMPT_FAULT BIT(4)
 #define ADRENO_GMU_FAULT BIT(5)
 #define ADRENO_CTX_DETATCH_TIMEOUT_FAULT BIT(6)
+#define ADRENO_GMU_FAULT_SKIP_SNAPSHOT BIT(7)
 
 #define ADRENO_SPTP_PC_CTRL 0
 #define ADRENO_LM_CTRL      1
@@ -350,7 +354,6 @@ struct adreno_reglist {
  * @gpudev: Pointer to the GPU family specific functions for this core
  * @gmem_base: Base address of binning memory (GMEM/OCMEM)
  * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
- * @num_protected_regs: number of protected registers
  * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
  * @bus_width: Bytes transferred in 1 cycle
  */
@@ -361,7 +364,6 @@ struct adreno_gpu_core {
 	struct adreno_gpudev *gpudev;
 	unsigned long gmem_base;
 	size_t gmem_size;
-	unsigned int num_protected_regs;
 	unsigned int busy_mask;
 	u32 bus_width;
 };
@@ -405,6 +407,8 @@ enum gpu_coresight_sources {
  * @ft_policy: Defines the fault tolerance policy
  * @long_ib_detect: Long IB detection availability
  * @ft_pf_policy: Defines the fault policy for page faults
+ * @cooperative_reset: Indicates if graceful death handshake is enabled
+ * between GMU and GPU
  * @profile: Container for adreno profiler information
  * @dispatcher: Container for adreno GPU dispatcher
  * @pwron_fixup: Command buffer to run a post-power collapse shader workaround
@@ -439,6 +443,7 @@ enum gpu_coresight_sources {
  * @lm_limit: limiting value for LM
  * @lm_threshold_count: register value for counter for lm threshold breakin
  * @lm_threshold_cross: number of current peaks exceeding threshold
+ * @lm_slope: Slope value in the fused register for LM
  * @ifpc_count: Number of times the GPU went into IFPC
  * @speed_bin: Indicate which power level set to use
  * @highest_bank_bit: Value of the highest bank bit
@@ -482,6 +487,7 @@ struct adreno_device {
 	unsigned long ft_policy;
 	unsigned int long_ib_detect;
 	unsigned long ft_pf_policy;
+	bool cooperative_reset;
 	struct adreno_profile profile;
 	struct adreno_dispatcher dispatcher;
 	struct kgsl_memdesc pwron_fixup;
@@ -513,6 +519,7 @@ struct adreno_device {
 	uint32_t lm_limit;
 	uint32_t lm_threshold_count;
 	uint32_t lm_threshold_cross;
+	u32 lm_slope;
 	uint32_t ifpc_count;
 
 	unsigned int speed_bin;
@@ -604,7 +611,6 @@ enum adreno_regs {
 	ADRENO_REG_CP_ME_RAM_DATA,
 	ADRENO_REG_CP_PFP_UCODE_DATA,
 	ADRENO_REG_CP_PFP_UCODE_ADDR,
-	ADRENO_REG_CP_WFI_PEND_CTR,
 	ADRENO_REG_CP_RB_BASE,
 	ADRENO_REG_CP_RB_BASE_HI,
 	ADRENO_REG_CP_RB_RPTR_ADDR_LO,
@@ -925,7 +931,7 @@ struct adreno_gpudev {
 	void (*gpu_keepalive)(struct adreno_device *adreno_dev,
 			bool state);
 	bool (*hw_isidle)(struct adreno_device *adreno_dev);
-	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+	const char *(*iommu_fault_block)(struct kgsl_device *device,
 				unsigned int fsynr1);
 	int (*reset)(struct kgsl_device *device, int fault);
 	int (*soft_reset)(struct adreno_device *adreno_dev);
@@ -1158,6 +1164,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
 			ADRENO_GPUREV(adreno_dev) < 700;
 }
 
+ADRENO_TARGET(a610, ADRENO_REV_A610)
 ADRENO_TARGET(a612, ADRENO_REV_A612)
 ADRENO_TARGET(a618, ADRENO_REV_A618)
 ADRENO_TARGET(a620, ADRENO_REV_A620)
@@ -1812,4 +1819,5 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
 	enum adreno_regs offset, unsigned int val,
 	unsigned int fence_mask);
 int adreno_clear_pending_transactions(struct kgsl_device *device);
+void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 680afa0..0f8f834 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1232,7 +1232,6 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, A3XX_CP_ME_RAM_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A3XX_CP_PFP_UCODE_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A3XX_CP_PFP_UCODE_ADDR),
-	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A3XX_CP_WFI_PEND_CTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A3XX_CP_RB_BASE),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A3XX_CP_RB_RPTR),
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 12f5c21..911ee41 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2498,7 +2498,6 @@ static unsigned int a5xx_int_bits[ADRENO_INT_BITS_MAX] = {
 
 /* Register offset defines for A5XX, in order of enum adreno_regs */
 static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
-	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 4ef6bf8..0b55bfe 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -351,9 +351,9 @@ static const unsigned int a5xx_registers[] = {
 	0x04E0, 0x04F4, 0X04F8, 0x0529, 0x0531, 0x0533, 0x0540, 0x0555,
 	0xF400, 0xF400, 0xF800, 0xF807,
 	/* CP */
-	0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
-	0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28, 0x0B78, 0x0B7F,
-	0x0BB0, 0x0BBD,
+	0x0800, 0x0803, 0x0806, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860,
+	0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28,
+	0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
 	/* VSC */
 	0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61,
 	/* GRAS */
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 3417d18..bf5ff88 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -7,6 +7,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <soc/qcom/subsystem_restart.h>
+#include <linux/clk/qcom.h>
 
 #include "adreno.h"
 #include "adreno_a6xx.h"
@@ -15,41 +16,6 @@
 #include "adreno_trace.h"
 #include "kgsl_trace.h"
 
-static struct a6xx_protected_regs {
-	unsigned int base;
-	unsigned int count;
-	int read_protect;
-} a6xx_protected_regs_group[] = {
-	{ 0x600, 0x51, 0 },
-	{ 0xAE50, 0x2, 1 },
-	{ 0x9624, 0x13, 1 },
-	{ 0x8630, 0x8, 1 },
-	{ 0x9E70, 0x1, 1 },
-	{ 0x9E78, 0x187, 1 },
-	{ 0xF000, 0x810, 1 },
-	{ 0xFC00, 0x3, 0 },
-	{ 0x50E, 0x0, 1 },
-	{ 0x50F, 0x0, 0 },
-	{ 0x510, 0x0, 1 },
-	{ 0x0, 0x4F9, 0 },
-	{ 0x501, 0xA, 0 },
-	{ 0x511, 0x44, 0 },
-	{ 0xE00, 0x1, 1 },
-	{ 0xE03, 0xB, 1 },
-	{ 0x8E00, 0x0, 1 },
-	{ 0x8E50, 0xF, 1 },
-	{ 0xBE02, 0x0, 1 },
-	{ 0xBE20, 0x11F3, 1 },
-	{ 0x800, 0x82, 1 },
-	{ 0x8A0, 0x8, 1 },
-	{ 0x8AB, 0x19, 1 },
-	{ 0x900, 0x4D, 1 },
-	{ 0x98D, 0x76, 1 },
-	{ 0x8D0, 0x23, 0 },
-	{ 0x980, 0x4, 0 },
-	{ 0xA630, 0x0, 1 },
-};
-
 /* IFPC & Preemption static powerup restore list */
 static u32 a6xx_pwrup_reglist[] = {
 	A6XX_VSC_ADDR_MODE_CNTL,
@@ -123,6 +89,11 @@ static u32 a6xx_ifpc_pwrup_reglist[] = {
 	A6XX_CP_AHB_CNTL,
 };
 
+/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
+static u32 a650_pwrup_reglist[] = {
+	A6XX_CP_PROTECT_REG + 47,
+};
+
 static u32 a615_pwrup_reglist[] = {
 	A6XX_UCHE_GBIF_GX_CONFIG,
 };
@@ -175,58 +146,34 @@ static void a6xx_init(struct adreno_device *adreno_dev)
 		"powerup_register_list");
 }
 
-/**
- * a6xx_protect_init() - Initializes register protection on a6xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct kgsl_protected_registers *mmu_prot =
-		kgsl_mmu_get_prot_regs(&device->mmu);
-	int i, num_sets;
-	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
-	int max_sets = adreno_dev->gpucore->num_protected_regs;
-	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
+	const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
+	int i;
 
-	/* enable access protection to privileged registers */
-	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
+	/*
+	 * Enable access protection to privileged registers, fault on an access
+	 * protect violation and select the last span to protect from the start
+	 * address all the way to the end of the register address space
+	 */
+	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
+		(1 << 0) | (1 << 1) | (1 << 3));
 
-	if (mmu_prot) {
-		mmu_base = mmu_prot->base;
-		mmu_range = mmu_prot->range;
-		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
-	}
+	/* Program each register defined by the core definition */
+	for (i = 0; regs[i].reg; i++) {
+		u32 count;
 
-	WARN(req_sets > max_sets,
-		"Size exceeds the num of protection regs available\n");
+		/*
+		 * This is the offset of the end register as counted from the
+		 * start, i.e. # of registers in the range - 1
+		 */
+		count = regs[i].end - regs[i].start;
 
-	/* Protect GPU registers */
-	num_sets = min_t(unsigned int,
-		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
-	for (i = 0; i < num_sets; i++) {
-		struct a6xx_protected_regs *regs =
-					&a6xx_protected_regs_group[i];
-
-		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				regs->base | (regs->count << 18) |
-				(regs->read_protect << 31));
-	}
-
-	/* Protect MMU registers */
-	if (mmu_prot) {
-		while ((i < max_sets) && (mmu_range > 0)) {
-			cur_range = min_t(unsigned int, mmu_range,
-						0x2000);
-			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
-			mmu_base += cur_range;
-			mmu_range -= cur_range;
-			i++;
-		}
+		kgsl_regwrite(device, regs[i].reg,
+			(regs[i].start & 0x3ffff) | ((count & 0x1fff) << 18) |
+			(regs[i].noaccess << 31));
 	}
 }
 
@@ -253,7 +200,7 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
 {
 	if (adreno_is_a630(adreno_dev))
 		return 0x8AA8AA02;
-	else if (adreno_is_a612(adreno_dev))
+	else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev))
 		return 0xAAA8AA82;
 	else
 		return 0x8AA8AA82;
@@ -332,11 +279,12 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 
 	/*
 	 * Disable SP clock before programming HWCG registers.
-	 * A612 GPU is not having the GX power domain. Hence
-	 * skip GMU_GX registers for A12.
+	 * A612 and A610 GPU is not having the GX power domain.
+	 * Hence skip GMU_GX registers for A12 and A610.
 	 */
 
-	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+		!adreno_is_a610(adreno_dev))
 		gmu_core_regrmw(device,
 			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
 
@@ -346,10 +294,11 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 
 	/*
 	 * Enable SP clock after programming HWCG registers.
-	 * A612 GPU is not having the GX power domain. Hence
-	 * skip GMU_GX registers for A612.
+	 * A612 and A610 GPU is not having the GX power domain.
+	 * Hence skip GMU_GX registers for A612.
 	 */
-	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+	if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+		!adreno_is_a610(adreno_dev))
 		gmu_core_regrmw(device,
 			A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
 
@@ -385,6 +334,8 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
 		reglist[items++] = REGLIST(a612_pwrup_reglist);
 	else if (adreno_is_a615_family(adreno_dev))
 		reglist[items++] = REGLIST(a615_pwrup_reglist);
+	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
+		reglist[items++] = REGLIST(a650_pwrup_reglist);
 
 	/*
 	 * For each entry in each of the lists, write the offset and the current
@@ -481,7 +432,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	if (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640) {
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
-	} else if (adreno_is_a612(adreno_dev)) {
+	} else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
 	} else {
@@ -489,8 +440,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
 	}
 
-	if (adreno_is_a612(adreno_dev)) {
-		/* For A612 Mem pool size is reduced to 48 */
+	if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
+		/* For A612 and A610 Mem pool size is reduced to 48 */
 		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
 		kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
 	} else {
@@ -592,7 +543,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
 	/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
-	kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
+	if (a6xx_core->disable_tseskip)
+		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
 
 	/* Enable the GMEM save/restore feature for preemption */
 	if (adreno_is_preemption_enabled(adreno_dev))
@@ -791,8 +743,9 @@ static int _preemption_init(struct adreno_device *adreno_dev,
 {
 	unsigned int *cmds_orig = cmds;
 
-	/* Turn CP protection OFF */
-	cmds += cp_protected_mode(adreno_dev, cmds, 0);
+	/* Turn CP protection OFF on legacy targets */
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		cmds += cp_protected_mode(adreno_dev, cmds, 0);
 
 	*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6);
 	*cmds++ = 1;
@@ -803,8 +756,9 @@ static int _preemption_init(struct adreno_device *adreno_dev,
 	cmds += cp_gpuaddr(adreno_dev, cmds,
 			rb->secure_preemption_desc.gpuaddr);
 
-	/* Turn CP protection ON */
-	cmds += cp_protected_mode(adreno_dev, cmds, 1);
+	/* Turn CP protection back ON */
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		cmds += cp_protected_mode(adreno_dev, cmds, 1);
 
 	*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
 	cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
@@ -846,6 +800,20 @@ static int a6xx_post_start(struct adreno_device *adreno_dev)
 }
 
 /*
+ * Some targets support marking certain transactions as always privileged which
+ * allows us to mark more memory as privileged without having to explicitly set
+ * the APRIV bit.  For those targets, choose the following transactions to be
+ * privileged by default:
+ * CDWRITE     [6:6] - Crashdumper writes
+ * CDREAD      [5:5] - Crashdumper reads
+ * RBRPWB      [3:3] - RPTR shadow writes
+ * RBPRIVLEVEL [2:2] - Memory accesses from PM4 packets in the ringbuffer
+ * RBFETCH     [1:1] - Ringbuffer reads
+ */
+#define A6XX_APRIV_DEFAULT \
+	((1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1))
+
+/*
  * a6xx_rb_start() - Start the ringbuffer
  * @adreno_dev: Pointer to adreno device
  */
@@ -875,14 +843,8 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
 	if (ret)
 		return ret;
 
-	/*
-	 * Set the RBPRIVLEVEL bit in this register to determine
-	 * the privilege level of ucode executing packets in the RB,
-	 * so we can come out of secure mode and CP does not drop
-	 * the packet.
-	 */
-	if (adreno_is_a650_family(adreno_dev))
-		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, (1 << 2));
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, A6XX_APRIV_DEFAULT);
 
 	/* Clear the SQE_HALT to start the CP engine */
 	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
@@ -905,9 +867,6 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
  */
 static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
 {
-	if (adreno_is_a612(adreno_dev))
-		return 0;
-
 	return a6xx_gmu_sptprac_enable(adreno_dev);
 }
 
@@ -917,9 +876,6 @@ static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
  */
 static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
 {
-	if (adreno_is_a612(adreno_dev))
-		return;
-
 	a6xx_gmu_sptprac_disable(adreno_dev);
 }
 
@@ -1076,7 +1032,8 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	u32 a, b, c;
 	struct adreno_busy_data *busy = &adreno_dev->busy_data;
 
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		return 0;
 
 	/* The counters are selected in a6xx_gmu_enable_lm() */
@@ -1093,14 +1050,26 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	/*
 	 * The adjustment is the number of cycles lost to throttling, which
 	 * is calculated as a weighted average of the cycles throttled
-	 * at 15%, 50%, and 90%. The adjustment is negative because in A6XX,
+	 * at different levels. The adjustment is negative because in A6XX,
 	 * the busy count includes the throttled cycles. Therefore, we want
 	 * to remove them to prevent appearing to be busier than
 	 * we actually are.
 	 */
-	adj *= ((a * 15) + (b * 50) + (c * 90)) / 100;
+	if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+		/*
+		 * With the newer generations, CRC throttle from SIDs of 0x14
+		 * and above cannot be observed in power counters. Since 90%
+		 * throttle uses SID 0x16 the adjustment calculation needs
+		 * correction. The throttling is in increments of 4.2%, and the
+		 * 91.7% counter does a weighted count by the value of sid used
+		 * which are taken into consideration for the final formula.
+		 */
+		adj *= div_s64((a * 42) + (b * 500) +
+			(div_s64((int64_t)c - a - b * 12, 22) * 917), 1000);
+	else
+		adj *= ((a * 5) + (b * 50) + (c * 90)) / 100;
 
-	trace_kgsl_clock_throttling(0, a, b, c, adj);
+	trace_kgsl_clock_throttling(0, b, c, a, adj);
 
 	return adj;
 }
@@ -1309,47 +1278,66 @@ static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
 			A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
 }
 
-static const char *fault_block[8] = {
-	[0] = "CP",
-	[1] = "UCHE",
-	[2] = "VFD",
-	[3] = "UCHE",
-	[4] = "CCU",
-	[5] = "unknown",
-	[6] = "CDP Prefetch",
-	[7] = "GPMU",
+static const char *uche_client[7][3] = {
+	{"SP | VSC | VPC | HLSQ | PC | LRZ", "TP", "VFD"},
+	{"VSC | VPC | HLSQ | PC | LRZ", "TP | VFD", "SP"},
+	{"SP | VPC | HLSQ | PC | LRZ", "TP | VFD", "VSC"},
+	{"SP | VSC | HLSQ | PC | LRZ", "TP | VFD", "VPC"},
+	{"SP | VSC | VPC | PC | LRZ", "TP | VFD", "HLSQ"},
+	{"SP | VSC | VPC | HLSQ | LRZ", "TP | VFD", "PC"},
+	{"SP | VSC | VPC | HLSQ | PC", "TP | VFD", "LRZ"},
 };
 
-static const char *uche_client[8] = {
-	[0] = "VFD",
-	[1] = "SP",
-	[2] = "VSC",
-	[3] = "VPC",
-	[4] = "HLSQ",
-	[5] = "PC",
-	[6] = "LRZ",
-	[7] = "unknown",
-};
+#define SCOOBYDOO 0x5c00bd00
 
-static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
-						unsigned int fsynr1)
+static const char *a6xx_fault_block_uche(struct kgsl_device *device,
+		unsigned int mid)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	unsigned int client_id;
-	unsigned int uche_client_id;
-
-	client_id = fsynr1 & 0xff;
-
-	if (client_id >= ARRAY_SIZE(fault_block))
-		return "unknown";
-	else if (client_id != 3)
-		return fault_block[client_id];
+	unsigned int uche_client_id = 0;
+	static char str[40];
 
 	mutex_lock(&device->mutex);
+
+	if (!kgsl_state_is_awake(device)) {
+		mutex_unlock(&device->mutex);
+		return "UCHE: unknown";
+	}
+
 	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
 	mutex_unlock(&device->mutex);
 
-	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+	/* Ignore the value if the gpu is in IFPC */
+	if (uche_client_id == SCOOBYDOO)
+		return "UCHE: unknown";
+
+	uche_client_id &= A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK;
+	snprintf(str, sizeof(str), "UCHE: %s",
+			uche_client[uche_client_id][mid - 1]);
+
+	return str;
+}
+
+static const char *a6xx_iommu_fault_block(struct kgsl_device *device,
+		unsigned int fsynr1)
+{
+	unsigned int mid = fsynr1 & 0xff;
+
+	switch (mid) {
+	case 0:
+		return "CP";
+	case 1:
+	case 2:
+	case 3:
+		return a6xx_fault_block_uche(device, mid);
+	case 4:
+		return "CCU";
+	case 6:
+		return "CDP Prefetch";
+	case 7:
+		return "GPMU";
+	}
+
+	return "Unknown";
 }
 
 static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
@@ -2296,14 +2284,6 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 
 	/* Check efuse bits for various capabilties */
 	a6xx_check_features(adreno_dev);
-
-	/*
-	 * A640 GPUs used a fuse to determine which frequency plan to
-	 * use for the GPU. For A650 GPUs enable using higher frequencies
-	 * based on the LM feature flag.
-	 */
-	if (adreno_is_a650(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM))
-		adreno_dev->speed_bin = 1;
 }
 
 
@@ -2565,6 +2545,29 @@ static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
 	return 0;
 }
 
+static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
+	const char *name, struct clk *clk, bool on)
+{
+	if (!adreno_is_a610(adreno_dev))
+		return;
+
+	/* Handle clock settings for GFX PSCBCs */
+	if (on) {
+		if (!strcmp(name, "mem_iface_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		} else if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
+		}
+	} else {
+		if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		}
+	}
+}
+
 struct adreno_gpudev adreno_a6xx_gpudev = {
 	.reg_offsets = &a6xx_reg_offsets,
 	.start = a6xx_start,
@@ -2601,4 +2604,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.ccu_invalidate = a6xx_ccu_invalidate,
 	.perfcounter_update = a6xx_perfcounter_update,
 	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
+	.clk_set_options = a6xx_clk_set_options,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index 120cb8f..cb45de5 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -11,6 +11,26 @@
 #include "a6xx_reg.h"
 
 /**
+ * struct a6xx_protected_regs - container for a protect register span
+ */
+struct a6xx_protected_regs {
+	/** @reg: Physical protected mode register to write to */
+	u32 reg;
+	/** @start: Dword offset of the starting register in the range */
+	u32 start;
+	/**
+	 * @end: Dword offset of the ending register in the range
+	 * (inclusive)
+	 */
+	u32 end;
+	/**
+	 * @noaccess: 1 if the register should not be accessible from
+	 * userspace, 0 if it can be read (but not written)
+	 */
+	u32 noaccess;
+};
+
+/**
  * struct adreno_a6xx_core - a6xx specific GPU core definitions
  */
 struct adreno_a6xx_core {
@@ -44,6 +64,10 @@ struct adreno_a6xx_core {
 	bool pdc_in_aop;
 	/** @hang_detect_cycles: Hang detect counter timeout value */
 	u32 hang_detect_cycles;
+	/** @protected_regs: Array of protected registers for the target */
+	const struct a6xx_protected_regs *protected_regs;
+	/** @disable_tseskip: True if TSESkip logic is disabled */
+	bool disable_tseskip;
 };
 
 #define CP_CLUSTER_FE		0x0
@@ -127,8 +151,6 @@ struct cpu_gpu_lock {
 #define A6XX_CP_CTXRECORD_MAGIC_REF     0xAE399D6EUL
 /* Size of each CP preemption record */
 #define A6XX_CP_CTXRECORD_SIZE_IN_BYTES     (2112 * 1024)
-/* Size of the preemption counter block (in bytes) */
-#define A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE   (16 * 4)
 /* Size of the user context record block (in bytes) */
 #define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024)
 /* Size of the performance counter save/restore block (in bytes) */
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 91978af..b8a28fb 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -346,7 +346,7 @@ static int a6xx_gmu_start(struct kgsl_device *device)
 	u32 mask = 0x000001FF;
 
 	/* Check for 0xBABEFACE on legacy targets */
-	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_COOP_RESET)) {
+	if (gmu->ver.core <= 0x20010004) {
 		val = 0xBABEFACE;
 		mask = 0xFFFFFFFF;
 	}
@@ -360,7 +360,16 @@ static int a6xx_gmu_start(struct kgsl_device *device)
 	if (timed_poll_check(device,
 			A6XX_GMU_CM3_FW_INIT_RESULT,
 			val, GMU_START_TIMEOUT, mask)) {
-		dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
+		u32 val;
+
+		/*
+		 * The breadcrumb is written to a gmu virtual mapping
+		 * which points to dtcm byte offset 0x3fdc.
+		 */
+		gmu_core_regread(device,
+			A6XX_GMU_CM3_DTCM_START + (0x3fdc >> 2), &val);
+		dev_err(&gmu->pdev->dev, "GMU doesn't boot: 0x%x\n", val);
+
 		return -ETIMEDOUT;
 	}
 
@@ -962,6 +971,23 @@ static int a6xx_gmu_wait_for_idle(struct kgsl_device *device)
 /* A6xx GMU FENCE RANGE MASK */
 #define GMU_FENCE_RANGE_MASK	((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0))
 
+static void load_gmu_version_info(struct kgsl_device *device)
+{
+	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+
+	/* GMU version info is at a fixed offset in the DTCM */
+	gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFF8,
+				&gmu->ver.core);
+	gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFF9,
+				&gmu->ver.core_dev);
+	gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFA,
+				&gmu->ver.pwr);
+	gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFB,
+				&gmu->ver.pwr_dev);
+	gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFC,
+				&gmu->ver.hfi);
+}
+
 /*
  * a6xx_gmu_fw_start() - set up GMU and start FW
  * @device: Pointer to KGSL device
@@ -1048,6 +1074,10 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 
 	/* Configure power control and bring the GMU out of reset */
 	a6xx_gmu_power_config(device);
+
+	/* Populate the GMU version info before GMU boots */
+	load_gmu_version_info(device);
+
 	ret = a6xx_gmu_start(device);
 	if (ret)
 		return ret;
@@ -1066,10 +1096,6 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 			return ret;
 	}
 
-	/* Read the HFI and Power version from registers */
-	gmu_core_regread(device, A6XX_GMU_HFI_VERSION_INFO, &gmu->ver.hfi);
-	gmu_core_regread(device, A6XX_GMU_GENERAL_0, &gmu->ver.pwr);
-
 	ret = a6xx_gmu_hfi_start(device);
 	if (ret)
 		return ret;
@@ -1125,110 +1151,62 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
 
 		offset += sizeof(*blk);
 
-		switch (blk->type) {
-		case GMU_BLK_TYPE_CORE_VER:
-			gmu->ver.core = blk->value;
-			dev_dbg(&gmu->pdev->dev,
-					"CORE VER: 0x%8.8x\n", blk->value);
-			break;
-		case GMU_BLK_TYPE_CORE_DEV_VER:
-			gmu->ver.core_dev = blk->value;
-			dev_dbg(&gmu->pdev->dev,
-					"CORE DEV VER: 0x%8.8x\n", blk->value);
-			break;
-		case GMU_BLK_TYPE_PWR_VER:
-			gmu->ver.pwr = blk->value;
-			dev_dbg(&gmu->pdev->dev,
-					"PWR VER: 0x%8.8x\n", blk->value);
-			break;
-		case GMU_BLK_TYPE_PWR_DEV_VER:
-			gmu->ver.pwr_dev = blk->value;
-			dev_dbg(&gmu->pdev->dev,
-					"PWR DEV VER: 0x%8.8x\n", blk->value);
-			break;
-		case GMU_BLK_TYPE_HFI_VER:
-			gmu->ver.hfi = blk->value;
-			dev_dbg(&gmu->pdev->dev,
-					"HFI VER: 0x%8.8x\n", blk->value);
-			break;
-		case GMU_BLK_TYPE_PREALLOC_REQ:
-		case GMU_BLK_TYPE_PREALLOC_PERSIST_REQ:
+		if (blk->type == GMU_BLK_TYPE_PREALLOC_REQ ||
+				blk->type == GMU_BLK_TYPE_PREALLOC_PERSIST_REQ)
 			ret = gmu_prealloc_req(device, blk);
-			if (ret)
-				return ret;
-			break;
 
-		default:
-			break;
-		}
+		if (ret)
+			return ret;
 	}
 
 	 /* Request any other cache ranges that might be required */
 	return gmu_cache_finalize(device);
 }
 
-#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
-#define A6XX_IDLE_FULL_LLM              BIT(0)
-#define A6XX_WAKEUP_ACK                 BIT(1)
-#define A6XX_IDLE_FULL_ACK              BIT(0)
 #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 
-static int a6xx_llm_glm_handshake(struct kgsl_device *device)
+static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
+	u32 mask, const char *client)
 {
-	unsigned int val;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	u32 ack;
+	unsigned long t;
 
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
-			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
-		return 0;
+	kgsl_regwrite(device, reg, mask);
 
-	gmu_core_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
-	if (!(val & A6XX_STATE_OF_CHILD)) {
-		gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
-		gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
-				A6XX_IDLE_FULL_LLM);
-		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
-				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
-				A6XX_IDLE_FULL_ACK)) {
-			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
-			return -EINVAL;
-		}
-	}
+	t = jiffies + msecs_to_jiffies(100);
+	do {
+		kgsl_regread(device, ack_reg, &ack);
+		if ((ack & mask) == mask)
+			return;
 
-	return 0;
-}
+		/*
+		 * If we are attempting recovery in case of stall-on-fault
+		 * then the halt sequence will not complete as long as SMMU
+		 * is stalled.
+		 */
+		kgsl_mmu_pagefault_resume(&device->mmu);
 
-static void a6xx_isense_disable(struct kgsl_device *device)
-{
-	unsigned int val;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+		usleep_range(10, 100);
+	} while (!time_after(jiffies, t));
 
-	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
-		!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+	/* Check one last time */
+	kgsl_mmu_pagefault_resume(&device->mmu);
+
+	kgsl_regread(device, ack_reg, &ack);
+	if ((ack & mask) == mask)
 		return;
 
-	gmu_core_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
-	if (val) {
-		gmu_core_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
-		gmu_core_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
-	}
+	dev_err(device->dev, "%s GBIF halt timed out\n", client);
 }
 
 static int a6xx_gmu_suspend(struct kgsl_device *device)
 {
 	int ret = 0;
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-
-	/* do it only if LM feature is enabled */
-	/* Disable ISENSE if it's on */
-	a6xx_isense_disable(device);
-
-	/* LLM-GLM handshake sequence */
-	a6xx_llm_glm_handshake(device);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
 	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
-	a6xx_gmu_sptprac_disable(ADRENO_DEVICE(device));
+	a6xx_gmu_sptprac_disable(adreno_dev);
 
 	/* Disconnect GPU from BUS is not needed if CX GDSC goes off later */
 
@@ -1242,6 +1220,30 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
 	/* Make sure above writes are committed before we proceed to recovery */
 	wmb();
 
+	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
+
+	if (adreno_has_gbif(adreno_dev)) {
+		struct adreno_gpudev *gpudev =
+			ADRENO_GPU_DEVICE(adreno_dev);
+
+		/* Halt GX traffic */
+		if (a6xx_gmu_gx_is_on(device))
+			do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
+				A6XX_RBBM_GBIF_HALT_ACK,
+				gpudev->gbif_gx_halt_mask,
+				"GX");
+
+		/* Halt CX traffic */
+		do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
+			gpudev->gbif_arb_halt_mask, "CX");
+	}
+
+	if (a6xx_gmu_gx_is_on(device))
+		kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
+
+	/* Allow the software reset to complete */
+	udelay(100);
+
 	/*
 	 * This is based on the assumption that GMU is the only one controlling
 	 * the GX HS. This code path is the only client voting for GX through
@@ -1384,14 +1386,19 @@ void a6xx_gmu_enable_lm(struct kgsl_device *device)
 
 	/*
 	 * For throttling, use the following counters for throttled cycles:
-	 * XOCLK1: countable 0x10
-	 * XOCLK2: countable 0x15
-	 * XOCLK3: countable 0x19
+	 * XOCLK1: countable: 0x10
+	 * XOCLK2: countable: 0x16 for newer hardware / 0x15 for others
+	 * XOCLK3: countable: 0xf for newer hardware / 0x19 for others
 	 *
 	 * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
 	 * is 8 bits wide.
 	 */
-	val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
+	if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+		val = (0x10 << 8) | (0x16 << 16) | (0x0f << 24);
+	else
+		val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
 
 	/* Make sure not to write over XOCLK0 */
 	gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
@@ -1645,6 +1652,8 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
 	if (a6xx_gmu_gx_is_on(device)) {
 		/* Set fence to ALLOW mode so registers can be read */
 		kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+		/* Make sure the previous write posted before reading */
+		wmb();
 		kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
 
 		dev_err(device->dev, "set FENCE to ALLOW mode:%x\n", val);
@@ -1654,6 +1663,38 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
 	}
 }
 
+static void a6xx_gmu_cooperative_reset(struct kgsl_device *device)
+{
+
+	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	unsigned int result;
+
+	gmu_core_regwrite(device, A6XX_GMU_CX_GMU_WDOG_CTRL, 0);
+	gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(17));
+
+	/*
+	 * After triggering graceful death wait for snapshot ready
+	 * indication from GMU.
+	 */
+	if (!timed_poll_check(device, A6XX_GMU_CM3_FW_INIT_RESULT,
+				0x800, 2, 0x800))
+		return;
+
+	gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &result);
+	dev_err(&gmu->pdev->dev,
+		"GMU cooperative reset timed out 0x%x\n", result);
+	/*
+	 * If we dont get a snapshot ready from GMU, trigger NMI
+	 * and if we still timeout then we just continue with reset.
+	 */
+	adreno_gmu_send_nmi(ADRENO_DEVICE(device));
+	udelay(200);
+	gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &result);
+	if ((result & 0x800) != 0x800)
+		dev_err(&gmu->pdev->dev,
+			"GMU cooperative reset NMI timed out 0x%x\n", result);
+}
+
 static int a6xx_gmu_wait_for_active_transition(
 	struct kgsl_device *device)
 {
@@ -1696,6 +1737,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = {
 	.ifpc_store = a6xx_gmu_ifpc_store,
 	.ifpc_show = a6xx_gmu_ifpc_show,
 	.snapshot = a6xx_gmu_snapshot,
+	.cooperative_reset = a6xx_gmu_cooperative_reset,
 	.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
 	.gmu2host_intr_mask = HFI_IRQ_MASK,
 	.gmu_ao_intr_mask = GMU_AO_INT_MASK,
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 966c256..46a84ab 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -319,35 +319,11 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
 	 * Fenced writes on this path will make sure the GPU is woken up
 	 * in case it was power collapsed by the GMU.
 	 */
-	adreno_gmu_fenced_write(adreno_dev,
+	if (adreno_gmu_fenced_write(adreno_dev,
 		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
 		lower_32_bits(next->preemption_desc.gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
-
-	adreno_gmu_fenced_write(adreno_dev,
-		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
-		upper_32_bits(next->preemption_desc.gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
-
-	adreno_gmu_fenced_write(adreno_dev,
-		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
-		lower_32_bits(next->secure_preemption_desc.gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
-
-	adreno_gmu_fenced_write(adreno_dev,
-		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
-		upper_32_bits(next->secure_preemption_desc.gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
-
-	adreno_gmu_fenced_write(adreno_dev,
-		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
-		lower_32_bits(gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
-
-	adreno_gmu_fenced_write(adreno_dev,
-		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
-		upper_32_bits(gpuaddr),
-		FENCE_STATUS_WRITEDROPPED1_MASK);
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
 
 	/*
 	 * Above fence writes will make sure GMU comes out of
@@ -358,13 +334,38 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
 	 * preemption. This is require to make sure CP doesn't
 	 * interrupt GMU during wake-up from IFPC.
 	 */
-	if (gmu_core_dev_wait_for_active_transition(device)) {
-		adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+	if (gmu_core_dev_wait_for_active_transition(device))
+		goto err;
 
-		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
-		adreno_dispatcher_schedule(device);
-		return;
-	}
+	if (adreno_gmu_fenced_write(adreno_dev,
+		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+		upper_32_bits(next->preemption_desc.gpuaddr),
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
+
+	if (adreno_gmu_fenced_write(adreno_dev,
+		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+		lower_32_bits(next->secure_preemption_desc.gpuaddr),
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
+
+	if (adreno_gmu_fenced_write(adreno_dev,
+		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+		upper_32_bits(next->secure_preemption_desc.gpuaddr),
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
+
+	if (adreno_gmu_fenced_write(adreno_dev,
+		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+		lower_32_bits(gpuaddr),
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
+
+	if (adreno_gmu_fenced_write(adreno_dev,
+		ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+		upper_32_bits(gpuaddr),
+		FENCE_STATUS_WRITEDROPPED1_MASK))
+		goto err;
 
 	adreno_dev->next_rb = next;
 
@@ -378,8 +379,23 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
 	adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
 
 	/* Trigger the preemption */
-	adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
-		FENCE_STATUS_WRITEDROPPED1_MASK);
+	if (adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
+					FENCE_STATUS_WRITEDROPPED1_MASK)) {
+		adreno_dev->next_rb = NULL;
+		del_timer(&adreno_dev->preempt.timer);
+		goto err;
+	}
+
+	return;
+err:
+
+	/* If fenced write fails, set the fault and trigger recovery */
+	adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+	adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+	adreno_dispatcher_schedule(device);
+	/* Clear the keep alive */
+	if (gmu_core_isenabled(device))
+		gmu_core_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);
 }
 
 void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
@@ -590,7 +606,7 @@ void a6xx_preemption_start(struct adreno_device *adreno_dev)
 }
 
 static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
-	struct adreno_ringbuffer *rb, uint64_t counteraddr)
+	struct adreno_ringbuffer *rb)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	int ret;
@@ -636,7 +652,7 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
 	kgsl_sharedmem_writeq(device, &rb->preemption_desc,
 		PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
 	kgsl_sharedmem_writeq(device, &rb->preemption_desc,
-		PREEMPT_RECORD(counter), counteraddr);
+		PREEMPT_RECORD(counter), 0);
 
 	return 0;
 }
@@ -679,7 +695,6 @@ static void _preemption_close(struct adreno_device *adreno_dev)
 	unsigned int i;
 
 	del_timer(&preempt->timer);
-	kgsl_free_global(device, &preempt->counters);
 	a6xx_preemption_iommu_close(adreno_dev);
 
 	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
@@ -706,7 +721,6 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
 	struct adreno_ringbuffer *rb;
 	int ret;
 	unsigned int i;
-	uint64_t addr;
 
 	/* We are dependent on IOMMU to make preemption go on the CP side */
 	if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
@@ -716,23 +730,11 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
 
 	timer_setup(&preempt->timer, _a6xx_preemption_timer, 0);
 
-	/* Allocate mem for storing preemption counters */
-	ret = kgsl_allocate_global(device, &preempt->counters,
-		adreno_dev->num_ringbuffers *
-		A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
-		"preemption_counters");
-	if (ret)
-		goto err;
-
-	addr = preempt->counters.gpuaddr;
-
 	/* Allocate mem for storing preemption switch record */
 	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
-		ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
+		ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb);
 		if (ret)
 			goto err;
-
-		addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
 	}
 
 	ret = a6xx_preemption_iommu_init(adreno_dev);
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index dcd01b2..ca603d4 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -12,6 +12,9 @@
 #define A6XX_NUM_XIN_AXI_BLOCKS 5
 #define A6XX_NUM_XIN_CORE_BLOCKS 4
 
+/* Snapshot section size of each CP preemption record for A6XX  */
+#define A6XX_SNAPSHOT_CP_CTXRECORD_SIZE_IN_BYTES (64 * 1024)
+
 static const unsigned int a6xx_gras_cluster[] = {
 	0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
 	0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
@@ -341,13 +344,13 @@ static const unsigned int a6xx_registers[] = {
 	0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
 	0x0540, 0x0555,
 	/* CP */
-	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
-	0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
-	0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
-	0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
-	0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
-	0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
-	0x0A00, 0x0A03,
+	0x0800, 0x0803, 0x0806, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821,
+	0x0823, 0x0824, 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+	0x084F, 0x086F, 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4,
+	0x08D0, 0x08DD, 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911,
+	0x0928, 0x093E, 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996,
+	0x0998, 0x099E, 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1,
+	0x09C2, 0x09C8, 0x0A00, 0x0A03,
 	/* VSC */
 	0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
 	/* UCHE */
@@ -379,6 +382,13 @@ static const unsigned int a6xx_pre_crashdumper_registers[] = {
 	0x825, 0x825,
 };
 
+static const unsigned int a6xx_gmu_wrapper_registers[] = {
+	/* GMU CX */
+	0x1f840, 0x1f840, 0x1f844, 0x1f845, 0x1f887, 0x1f889,
+	/* GMU AO*/
+	0x23b0C, 0x23b0E, 0x23b15, 0x23b15,
+};
+
 enum a6xx_debugbus_id {
 	A6XX_DBGBUS_CP           = 0x1,
 	A6XX_DBGBUS_RBBM         = 0x2,
@@ -1599,8 +1609,9 @@ static void _a6xx_do_crashdump(struct kgsl_device *device)
 	if (val & BIT(24))
 		return;
 
-	/* Turn on APRIV so we can access the buffers */
-	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
+	/* Turn on APRIV for legacy targets so we can access the buffers */
+	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_APRIV))
+		kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
 
 	kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
 			lower_32_bits(a6xx_capturescript.gpuaddr));
@@ -1616,7 +1627,8 @@ static void _a6xx_do_crashdump(struct kgsl_device *device)
 		cpu_relax();
 	}
 
-	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
+	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_APRIV))
+		kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
 
 	if (!(reg & 0x2)) {
 		dev_err(device->dev, "Crash dump timed out: 0x%X\n", reg);
@@ -1711,6 +1723,32 @@ size_t a6xx_snapshot_isense_registers(struct kgsl_device *device, u8 *buf,
 	return (count * 8) + sizeof(*header);
 }
 
+/* Snapshot the preemption related buffers */
+static size_t snapshot_preemption_record(struct kgsl_device *device,
+	u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_memdesc *memdesc = priv;
+	struct kgsl_snapshot_gpu_object_v2 *header =
+		(struct kgsl_snapshot_gpu_object_v2 *)buf;
+	u8 *ptr = buf + sizeof(*header);
+
+	if (remain < (A6XX_SNAPSHOT_CP_CTXRECORD_SIZE_IN_BYTES +
+						sizeof(*header))) {
+		SNAPSHOT_ERR_NOMEM(device, "PREEMPTION RECORD");
+		return 0;
+	}
+
+	header->size = A6XX_SNAPSHOT_CP_CTXRECORD_SIZE_IN_BYTES >> 2;
+	header->gpuaddr = memdesc->gpuaddr;
+	header->ptbase =
+		kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
+	header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;
+
+	memcpy(ptr, memdesc->hostptr, A6XX_SNAPSHOT_CP_CTXRECORD_SIZE_IN_BYTES);
+
+	return A6XX_SNAPSHOT_CP_CTXRECORD_SIZE_IN_BYTES + sizeof(*header);
+}
+
 /*
  * a6xx_snapshot() - A6XX GPU snapshot function
  * @adreno_dev: Device being snapshotted
@@ -1724,6 +1762,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct adreno_ringbuffer *rb;
 	bool sptprac_on;
 	unsigned int i, roq_size, ucode_dbg_size;
 
@@ -1763,6 +1802,10 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 		adreno_snapshot_registers(device, snapshot,
 			a6xx_rscc_snapshot_registers,
 			ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
+	} else if (adreno_is_a610(adreno_dev)) {
+		adreno_snapshot_registers(device, snapshot,
+			a6xx_gmu_wrapper_registers,
+			ARRAY_SIZE(a6xx_gmu_wrapper_registers) / 2);
 	}
 
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
@@ -1838,6 +1881,15 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 		a6xx_snapshot_dbgahb_regs(device, snapshot);
 	}
 
+	/* Preemption record */
+	if (adreno_is_preemption_enabled(adreno_dev)) {
+		FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
+				snapshot, snapshot_preemption_record,
+				&rb->preemption_desc);
+		}
+	}
 }
 
 static int _a6xx_crashdump_init_mvc(struct adreno_device *adreno_dev,
@@ -2049,6 +2101,10 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
 		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
 
+		/* 16 bytes if cluster sel exists */
+		if (cluster->sel)
+			script_size += 16;
+
 		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
 
 			/* 16 bytes for programming the aperture */
diff --git a/drivers/gpu/msm/adreno_compat.h b/drivers/gpu/msm/adreno_compat.h
index ba4f00f..f7b2031 100644
--- a/drivers/gpu/msm/adreno_compat.h
+++ b/drivers/gpu/msm/adreno_compat.h
@@ -24,7 +24,7 @@ long adreno_compat_ioctl(struct kgsl_device_private *dev_priv,
 #else
 
 static inline int adreno_getproperty_compat(struct kgsl_device *device,
-		struct kgsL_device_getproperty *param);
+		struct kgsl_device_getproperty *param)
 {
 	return -EINVAL;
 }
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index be46b97..50938f2 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -7,6 +7,7 @@
 #include <linux/msm_kgsl.h>
 
 #include "adreno.h"
+extern struct dentry *kgsl_debugfs_dir;
 
 static int _isdb_set(void *data, u64 val)
 {
@@ -110,6 +111,27 @@ static int _active_count_get(void *data, u64 *val)
 
 DEFINE_DEBUGFS_ATTRIBUTE(_active_count_fops, _active_count_get, NULL, "%llu\n");
 
+static int _coop_reset_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_COOP_RESET))
+		adreno_dev->cooperative_reset = val ? true : false;
+	return 0;
+}
+
+static int _coop_reset_get(void *data, u64 *val)
+{
+	struct kgsl_device *device = data;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	*val = (u64) adreno_dev->cooperative_reset;
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(_coop_reset_fops, _coop_reset_get,
+				_coop_reset_set, "%llu\n");
+
 typedef void (*reg_read_init_t)(struct kgsl_device *device);
 typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
 	unsigned int *vals, int linec);
@@ -362,6 +384,7 @@ adreno_context_debugfs_init(struct adreno_device *adreno_dev,
 void adreno_debugfs_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct dentry *snapshot_dir;
 
 	if (IS_ERR_OR_NULL(device->d_debugfs))
 		return;
@@ -370,6 +393,11 @@ void adreno_debugfs_init(struct adreno_device *adreno_dev)
 			    &_active_count_fops);
 	adreno_dev->ctx_d_debugfs = debugfs_create_dir("ctx",
 							device->d_debugfs);
+	snapshot_dir = debugfs_lookup("snapshot", kgsl_debugfs_dir);
+
+	if (!IS_ERR_OR_NULL(snapshot_dir))
+		debugfs_create_file("coop_reset", 0644, snapshot_dir, device,
+					&_coop_reset_fops);
 
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM)) {
 		debugfs_create_file("lm_limit", 0644, device->d_debugfs, device,
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 43b39cf..88fc29b 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -7,6 +7,7 @@
 
 #include "adreno.h"
 #include "adreno_trace.h"
+#include "kgsl_gmu_core.h"
 
 #define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
 
@@ -1379,6 +1380,22 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
 
 	user_ts = *timestamp;
 
+	/*
+	 * If there is only one drawobj in the array and it is of
+	 * type SYNCOBJ_TYPE, skip comparing user_ts as it can be 0
+	 */
+	if (!(count == 1 && drawobj[0]->type == SYNCOBJ_TYPE) &&
+		(drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS)) {
+		/*
+		 * User specified timestamps need to be greater than the last
+		 * issued timestamp in the context
+		 */
+		if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) {
+			spin_unlock(&drawctxt->lock);
+			return -ERANGE;
+		}
+	}
+
 	for (i = 0; i < count; i++) {
 
 		switch (drawobj[i]->type) {
@@ -1657,12 +1674,32 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
 
 
 static void adreno_fault_header(struct kgsl_device *device,
-		struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
+		struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj,
+		int fault)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+	struct adreno_context *drawctxt =
+			drawobj ? ADRENO_CONTEXT(drawobj->context) : NULL;
 	unsigned int status, rptr, wptr, ib1sz, ib2sz;
 	uint64_t ib1base, ib2base;
+	bool gx_on = gmu_core_dev_gx_is_on(device);
+	int id = (rb != NULL) ? rb->id : -1;
+	const char *type = fault & ADRENO_GMU_FAULT ? "gmu" : "gpu";
+
+	if (!gx_on) {
+		if (drawobj != NULL)
+			pr_fault(device, drawobj,
+				"%s fault ctx %d ctx_type %s ts %d and GX is OFF\n",
+				type, drawobj->context->id,
+				get_api_type_str(drawctxt->type),
+				drawobj->timestamp);
+		else
+			dev_err(device->dev, "RB[%d] : %s fault and GX is OFF\n",
+				id, type);
+
+		return;
+	}
 
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &status);
 	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
@@ -1675,9 +1712,6 @@ static void adreno_fault_header(struct kgsl_device *device,
 	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
 
 	if (drawobj != NULL) {
-		struct adreno_context *drawctxt =
-			ADRENO_CONTEXT(drawobj->context);
-
 		drawctxt->base.total_fault_count++;
 		drawctxt->base.last_faulted_cmd_ts = drawobj->timestamp;
 
@@ -1687,26 +1721,25 @@ static void adreno_fault_header(struct kgsl_device *device,
 			ib2base, ib2sz, drawctxt->rb->id);
 
 		pr_fault(device, drawobj,
-			"gpu fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
-			drawobj->context->id, get_api_type_str(drawctxt->type),
+			"%s fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+			type, drawobj->context->id,
+			get_api_type_str(drawctxt->type),
 			drawobj->timestamp, status,
 			rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
 
 		if (rb != NULL)
 			pr_fault(device, drawobj,
-				"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
-				rb->id, rptr, rb->wptr);
+				"%s fault rb %d rb sw r/w %4.4x/%4.4x\n",
+				type, rb->id, rptr, rb->wptr);
 	} else {
-		int id = (rb != NULL) ? rb->id : -1;
-
 		dev_err(device->dev,
-			"RB[%d]: gpu fault status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
-			id, status, rptr, wptr, ib1base, ib1sz, ib2base,
+			"RB[%d] : %s fault status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+			id, type, status, rptr, wptr, ib1base, ib1sz, ib2base,
 			ib2sz);
 		if (rb != NULL)
 			dev_err(device->dev,
-				"RB[%d] gpu fault rb sw r/w %4.4x/%4.4x\n",
-				rb->id, rptr, rb->wptr);
+				"RB[%d] : %s fault rb sw r/w %4.4x/%4.4x\n",
+				rb->id, type, rptr, rb->wptr);
 	}
 }
 
@@ -2028,7 +2061,7 @@ static void do_header_and_snapshot(struct kgsl_device *device, int fault,
 
 	/* Always dump the snapshot on a non-drawobj failure */
 	if (cmdobj == NULL) {
-		adreno_fault_header(device, rb, NULL);
+		adreno_fault_header(device, rb, NULL, fault);
 		kgsl_device_snapshot(device, NULL, fault & ADRENO_GMU_FAULT);
 		return;
 	}
@@ -2038,7 +2071,7 @@ static void do_header_and_snapshot(struct kgsl_device *device, int fault,
 		return;
 
 	/* Print the fault header */
-	adreno_fault_header(device, rb, cmdobj);
+	adreno_fault_header(device, rb, cmdobj, fault);
 
 	if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
 		kgsl_device_snapshot(device, drawobj->context,
@@ -2065,6 +2098,22 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	if (fault == 0)
 		return 0;
 
+	mutex_lock(&device->mutex);
+
+	/*
+	 * In the very unlikely case that the power is off, do nothing - the
+	 * state will be reset on power up and everybody will be happy
+	 */
+	if (!kgsl_state_is_awake(device)) {
+		mutex_unlock(&device->mutex);
+		if (fault & ADRENO_SOFT_FAULT) {
+			/* Clear the existing register values */
+			memset(adreno_ft_regs_val, 0,
+				adreno_ft_regs_num * sizeof(unsigned int));
+		}
+		return 0;
+	}
+
 	/* Mask all GMU interrupts */
 	if (gmu_core_isenabled(device)) {
 		adreno_write_gmureg(adreno_dev,
@@ -2077,17 +2126,6 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 
 	gx_on = gmu_core_dev_gx_is_on(device);
 
-	/*
-	 * In the very unlikely case that the power is off, do nothing - the
-	 * state will be reset on power up and everybody will be happy
-	 */
-
-	if (!kgsl_state_is_awake(device) && (fault & ADRENO_SOFT_FAULT)) {
-		/* Clear the existing register values */
-		memset(adreno_ft_regs_val, 0,
-				adreno_ft_regs_num * sizeof(unsigned int));
-		return 0;
-	}
 
 	/*
 	 * On A5xx and A6xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24)
@@ -2100,11 +2138,13 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 		gx_on) {
 		unsigned int val;
 
-		mutex_lock(&device->mutex);
 		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
-		mutex_unlock(&device->mutex);
-		if (val & BIT(24))
-			return 0;
+		if (val & BIT(24)) {
+			mutex_unlock(&device->mutex);
+			dev_err(device->dev,
+				"SMMU is stalled without a pagefault\n");
+			return -EBUSY;
+		}
 	}
 
 	/* Turn off all the timers */
@@ -2117,8 +2157,6 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	if (adreno_is_preemption_enabled(adreno_dev))
 		del_timer_sync(&adreno_dev->preempt.timer);
 
-	mutex_lock(&device->mutex);
-
 	if (gx_on)
 		adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
 			ADRENO_REG_CP_RB_BASE_HI, &base);
@@ -2161,7 +2199,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 		adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
 			ADRENO_REG_CP_IB1_BASE_HI, &base);
 
-	do_header_and_snapshot(device, fault, hung_rb, cmdobj);
+	if (!test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
+		&adreno_dev->ft_pf_policy) && adreno_dev->cooperative_reset)
+		gmu_core_dev_cooperative_reset(device);
+
+	if (!(fault & ADRENO_GMU_FAULT_SKIP_SNAPSHOT))
+		do_header_and_snapshot(device, fault, hung_rb, cmdobj);
 
 	/* Turn off the KEEPALIVE vote from the ISR for hard fault */
 	if (gpudev->gpu_keepalive && fault & ADRENO_HARD_FAULT)
@@ -2365,6 +2408,12 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
 		drawobj->context->id, drawobj->timestamp);
 
 	adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
+
+	/*
+	 * This makes sure dispatcher doesn't run endlessly in cases where
+	 * we couldn't run recovery
+	 */
+	drawqueue->expires = jiffies + msecs_to_jiffies(adreno_drawobj_timeout);
 }
 
 static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 6463881..38c7b12 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -11,42 +11,30 @@
 #include "adreno_pm4types.h"
 
 /*
- * _wait_reg() - make CP poll on a register
+ * a3xx_wait_reg() - make CP poll on a register
  * @cmds:	Pointer to memory where commands are to be added
  * @addr:	Register address to poll for
  * @val:	Value to poll for
  * @mask:	The value against which register value is masked
  * @interval:	wait interval
  */
-static unsigned int _wait_reg(struct adreno_device *adreno_dev,
+static unsigned int a3xx_wait_reg(struct adreno_device *adreno_dev,
 			unsigned int *cmds, unsigned int addr,
 			unsigned int val, unsigned int mask,
 			unsigned int interval)
 {
 	unsigned int *start = cmds;
 
-	if (adreno_is_a3xx(adreno_dev)) {
-		*cmds++ = cp_packet(adreno_dev, CP_WAIT_REG_EQ, 4);
-		*cmds++ = addr;
-		*cmds++ = val;
-		*cmds++ = mask;
-		*cmds++ = interval;
-	} else {
-		*cmds++ = cp_mem_packet(adreno_dev, CP_WAIT_REG_MEM, 5, 1);
-		*cmds++ = 0x3; /* Mem Space = Register,  Function = Equals */
-		cmds += cp_gpuaddr(adreno_dev, cmds, addr); /* Poll address */
-		*cmds++ = val; /* ref val */
-		*cmds++ = mask;
-		*cmds++ = interval;
-
-		/* WAIT_REG_MEM turns back on protected mode - push it off */
-		cmds += cp_protected_mode(adreno_dev, cmds, 0);
-	}
+	*cmds++ = cp_packet(adreno_dev, CP_WAIT_REG_EQ, 4);
+	*cmds++ = addr;
+	*cmds++ = val;
+	*cmds++ = mask;
+	*cmds++ = interval;
 
 	return cmds - start;
 }
 
-static unsigned int _vbif_lock(struct adreno_device *adreno_dev,
+static unsigned int a3xx_vbif_lock(struct adreno_device *adreno_dev,
 			unsigned int *cmds)
 {
 	unsigned int *start = cmds;
@@ -54,8 +42,7 @@ static unsigned int _vbif_lock(struct adreno_device *adreno_dev,
 	 * glue commands together until next
 	 * WAIT_FOR_ME
 	 */
-	cmds += _wait_reg(adreno_dev, cmds,
-			adreno_getreg(adreno_dev, ADRENO_REG_CP_WFI_PEND_CTR),
+	cmds += a3xx_wait_reg(adreno_dev, cmds, A3XX_CP_WFI_PEND_CTR,
 			1, 0xFFFFFFFF, 0xF);
 
 	/* MMU-500 VBIF stall */
@@ -67,14 +54,14 @@ static unsigned int _vbif_lock(struct adreno_device *adreno_dev,
 	*cmds++ = 0x1;
 
 	/* Wait for acknowledgment */
-	cmds += _wait_reg(adreno_dev, cmds,
+	cmds += a3xx_wait_reg(adreno_dev, cmds,
 			A3XX_VBIF_DDR_OUTPUT_RECOVERABLE_HALT_CTRL1,
 			1, 0xFFFFFFFF, 0xF);
 
 	return cmds - start;
 }
 
-static unsigned int _vbif_unlock(struct adreno_device *adreno_dev,
+static unsigned int a3xx_vbif_unlock(struct adreno_device *adreno_dev,
 				unsigned int *cmds)
 {
 	unsigned int *start = cmds;
@@ -95,7 +82,7 @@ static unsigned int _vbif_unlock(struct adreno_device *adreno_dev,
 #define A3XX_GPU_OFFSET 0xa000
 
 /* This function is only needed for A3xx targets */
-static unsigned int _cp_smmu_reg(struct adreno_device *adreno_dev,
+static unsigned int a3xx_cp_smmu_reg(struct adreno_device *adreno_dev,
 				unsigned int *cmds,
 				enum kgsl_iommu_reg_map reg,
 				unsigned int num)
@@ -110,20 +97,20 @@ static unsigned int _cp_smmu_reg(struct adreno_device *adreno_dev,
 }
 
 /* This function is only needed for A3xx targets */
-static unsigned int _tlbiall(struct adreno_device *adreno_dev,
+static unsigned int a3xx_tlbiall(struct adreno_device *adreno_dev,
 				unsigned int *cmds)
 {
 	unsigned int *start = cmds;
 	unsigned int tlbstatus = (A3XX_GPU_OFFSET +
 		kgsl_iommu_reg_list[KGSL_IOMMU_CTX_TLBSTATUS]) >> 2;
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBIALL, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBIALL, 1);
 	*cmds++ = 1;
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBSYNC, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBSYNC, 1);
 	*cmds++ = 0;
 
-	cmds += _wait_reg(adreno_dev, cmds, tlbstatus, 0,
+	cmds += a3xx_wait_reg(adreno_dev, cmds, tlbstatus, 0,
 			KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
 
 	return cmds - start;
@@ -165,6 +152,10 @@ static unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev,
 	if (adreno_is_a3xx(adreno_dev))
 		return 0;
 
+	/* Targets with apriv control do not need to explicitly set the bit */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		return 0;
+
 	cmds += cp_wait_for_idle(adreno_dev, cmds);
 	cmds += cp_wait_for_me(adreno_dev, cmds);
 	*cmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
@@ -205,17 +196,18 @@ static unsigned int _adreno_iommu_set_pt_v2_a3xx(struct kgsl_device *device,
 
 	cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
 
-	cmds += _vbif_lock(adreno_dev, cmds);
+	cmds += a3xx_vbif_lock(adreno_dev, cmds);
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
 	*cmds++ = lower_32_bits(ttbr0);
 	*cmds++ = upper_32_bits(ttbr0);
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR,
+		1);
 	*cmds++ = contextidr;
 
-	cmds += _vbif_unlock(adreno_dev, cmds);
+	cmds += a3xx_vbif_unlock(adreno_dev, cmds);
 
-	cmds += _tlbiall(adreno_dev, cmds);
+	cmds += a3xx_tlbiall(adreno_dev, cmds);
 
 	/* wait for me to finish the TLBI */
 	cmds += cp_wait_for_me(adreno_dev, cmds);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 60ddbfc..9cb9ec4 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -271,6 +271,7 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
 {
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[id];
 	int ret;
+	unsigned int priv = 0;
 
 	rb->id = id;
 	kgsl_add_event_group(&rb->events, NULL, _rb_readtimestamp, rb,
@@ -289,9 +290,17 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
 		PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc");
 	if (ret)
 		return ret;
+
+	/* allocate a chunk of memory to create user profiling IB1s */
+	kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->profile_desc,
+		PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0, "profile_desc");
+
+	/* For targets that support it, make the ringbuffer privileged */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		priv |= KGSL_MEMDESC_PRIVILEGED;
+
 	return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc,
-			KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY,
-			0, "ringbuffer");
+		KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, priv, "ringbuffer");
 }
 
 int adreno_ringbuffer_probe(struct adreno_device *adreno_dev)
@@ -302,8 +311,14 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev)
 	int status = -ENOMEM;
 
 	if (!adreno_is_a3xx(adreno_dev)) {
+		unsigned int priv = KGSL_MEMDESC_RANDOM;
+
+		/* For targets that support it, make the scratch privileged */
+		if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+			priv |= KGSL_MEMDESC_PRIVILEGED;
+
 		status = kgsl_allocate_global(device, &device->scratch,
-				PAGE_SIZE, 0, 0, "scratch");
+				PAGE_SIZE, 0, priv, "scratch");
 		if (status != 0)
 			return status;
 	}
@@ -343,7 +358,7 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
 	kgsl_free_global(device, &rb->pagetable_desc);
-
+	kgsl_free_global(device, &rb->profile_desc);
 	kgsl_free_global(device, &rb->buffer_desc);
 	kgsl_del_event_group(&rb->events);
 	memset(rb, 0, sizeof(struct adreno_ringbuffer));
@@ -473,7 +488,10 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	 * reserve space to temporarily turn off protected mode
 	 * error checking if needed
 	 */
-	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
+	if ((flags & KGSL_CMD_FLAGS_PMODE) &&
+		!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
+		total_sizedwords += 4;
+
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
 	/* internal ib command identifier for the ringbuffer */
@@ -595,15 +613,25 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
 	if (secured_ctxt)
 		ringcmds += cp_secure_mode(adreno_dev, ringcmds, 1);
 
-	/* disable protected mode error checking */
-	if (flags & KGSL_CMD_FLAGS_PMODE)
+	/*
+	 * For kernel commands disable protected mode. For user commands turn on
+	 * protected mode universally to avoid the possibility that somebody
+	 * managed to get this far with protected mode turned off.
+	 *
+	 * If the target supports apriv control then we don't need this step
+	 * since all the permisisons will already be managed for us
+	 */
+
+	if ((flags & KGSL_CMD_FLAGS_PMODE) &&
+		!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
 		ringcmds += cp_protected_mode(adreno_dev, ringcmds, 0);
 
 	for (i = 0; i < sizedwords; i++)
 		*ringcmds++ = cmds[i];
 
 	/* re-enable protected mode error checking */
-	if (flags & KGSL_CMD_FLAGS_PMODE)
+	if ((flags & KGSL_CMD_FLAGS_PMODE) &&
+			!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
 		ringcmds += cp_protected_mode(adreno_dev, ringcmds, 1);
 
 	/*
@@ -814,6 +842,37 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
 	return (unsigned int)(p - cmds);
 }
 
+/* This is the maximum possible size for 64 bit targets */
+#define PROFILE_IB_DWORDS 4
+#define PROFILE_IB_SLOTS (PAGE_SIZE / (PROFILE_IB_DWORDS << 2))
+
+static int set_user_profiling(struct adreno_device *adreno_dev,
+		struct adreno_ringbuffer *rb, u32 *cmds, u64 gpuaddr)
+{
+	int dwords, index = 0;
+	u64 ib_gpuaddr;
+	u32 *ib;
+
+	if (!rb->profile_desc.hostptr)
+		return 0;
+
+	ib = ((u32 *) rb->profile_desc.hostptr) +
+		(rb->profile_index * PROFILE_IB_DWORDS);
+	ib_gpuaddr = rb->profile_desc.gpuaddr +
+		(rb->profile_index * (PROFILE_IB_DWORDS << 2));
+
+	dwords = _get_alwayson_counter(adreno_dev, ib, gpuaddr);
+
+	/* Make an indirect buffer for the request */
+	cmds[index++] = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
+	index += cp_gpuaddr(adreno_dev, &cmds[index], ib_gpuaddr);
+	cmds[index++] = dwords;
+
+	rb->profile_index = (rb->profile_index + 1) % PROFILE_IB_SLOTS;
+
+	return index;
+}
+
 /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
 int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 		struct kgsl_drawobj_cmd *cmdobj,
@@ -913,14 +972,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 		!adreno_is_a3xx(adreno_dev) &&
 		(cmdobj->profiling_buf_entry != NULL)) {
 		user_profiling = true;
-		dwords += 6;
 
 		/*
-		 * REG_TO_MEM packet on A5xx and above needs another ordinal.
-		 * Add 2 more dwords since we do profiling before and after.
+		 * User side profiling uses two IB1s, one before with 4 dwords
+		 * per INDIRECT_BUFFER_PFE call
 		 */
-		if (!ADRENO_LEGACY_PM4(adreno_dev))
-			dwords += 2;
+		dwords += 8;
 
 		/*
 		 * we want to use an adreno_submit_time struct to get the
@@ -972,11 +1029,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 	}
 
 	/*
-	 * Add cmds to read the GPU ticks at the start of command obj and
+	 * Add IB1 to read the GPU ticks at the start of command obj and
 	 * write it into the appropriate command obj profiling buffer offset
 	 */
 	if (user_profiling) {
-		cmds += _get_alwayson_counter(adreno_dev, cmds,
+		cmds += set_user_profiling(adreno_dev, rb, cmds,
 			cmdobj->profiling_buffer_gpuaddr +
 			offsetof(struct kgsl_drawobj_profiling_buffer,
 			gpu_ticks_submitted));
@@ -999,7 +1056,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 			*cmds++ = cp_mem_packet(adreno_dev,
 					CP_INDIRECT_BUFFER_PFE, 2, 1);
 			cmds += cp_gpuaddr(adreno_dev, cmds, ib->gpuaddr);
-			*cmds++ = (unsigned int) ib->size >> 2;
+			/*
+			 * Never allow bit 20 (IB_PRIV) to be set. All IBs MUST
+			 * run at reduced privilege
+			 */
+			*cmds++ = (unsigned int) ((ib->size >> 2) & 0xfffff);
+
 			/* preamble is required on only for first command */
 			use_preamble = false;
 		}
@@ -1023,11 +1085,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 	}
 
 	/*
-	 * Add cmds to read the GPU ticks at the end of command obj and
+	 * Add IB1 to read the GPU ticks at the end of command obj and
 	 * write it into the appropriate command obj profiling buffer offset
 	 */
 	if (user_profiling) {
-		cmds += _get_alwayson_counter(adreno_dev, cmds,
+		cmds += set_user_profiling(adreno_dev, rb, cmds,
 			cmdobj->profiling_buffer_gpuaddr +
 			offsetof(struct kgsl_drawobj_profiling_buffer,
 			gpu_ticks_retired));
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 32186c1..2729e29 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -121,6 +121,18 @@ struct adreno_ringbuffer {
 	int preempted_midway;
 	spinlock_t preempt_lock;
 	bool skip_inline_wptr;
+	/**
+	 * @profile_desc: global memory to construct IB1s to do user side
+	 * profiling
+	 */
+	struct kgsl_memdesc profile_desc;
+	/**
+	 * @profile_index: Pointer to the next "slot" in profile_desc for a user
+	 * profiling IB1.  This allows for PAGE_SIZE / 16 = 256 simultaneous
+	 * commands per ringbuffer with user profiling enabled
+	 * enough.
+	 */
+	u32 profile_index;
 };
 
 /* Returns the current ringbuffer */
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 8d2a97e..d1b2ad0 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -276,19 +276,15 @@ static void snapshot_rb_ibs(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int rptr, *rbptr;
+	unsigned int *rbptr, rptr = adreno_get_rptr(rb);
 	int index, i;
 	int parse_ibs = 0, ib_parse_start;
 
-	/* Get the current read pointers for the RB */
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
-
 	/*
 	 * Figure out the window of ringbuffer data to dump.  First we need to
 	 * find where the last processed IB ws submitted.  Start walking back
 	 * from the rptr
 	 */
-
 	index = rptr;
 	rbptr = rb->buffer_desc.hostptr;
 
@@ -844,23 +840,23 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
 
 	snapshot_frozen_objsize = 0;
 
-	setup_fault_process(device, snapshot,
-			context ? context->proc_priv : NULL);
-
 	/* Add GPU specific sections - registers mainly, but other stuff too */
 	if (gpudev->snapshot)
 		gpudev->snapshot(adreno_dev, snapshot);
 
-	/* Dumping these buffers is useless if the GX is not on */
-	if (!gmu_core_dev_gx_is_on(device))
-		return;
+	setup_fault_process(device, snapshot,
+			context ? context->proc_priv : NULL);
 
-	adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
-			ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size);
-	adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB2_BASE,
-			ADRENO_REG_CP_IB2_BASE_HI, &snapshot->ib2base);
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &snapshot->ib2size);
+	if (gmu_core_dev_gx_is_on(device)) {
+		adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
+				ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
+		adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ,
+				&snapshot->ib1size);
+		adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB2_BASE,
+				ADRENO_REG_CP_IB2_BASE_HI, &snapshot->ib2base);
+		adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ,
+				&snapshot->ib2size);
+	}
 
 	snapshot->ib1dumped = false;
 	snapshot->ib2dumped = false;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2257294..57dca4a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -3624,12 +3624,16 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
 	unsigned int cmd, void *data)
 {
 	struct kgsl_process_private *process = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_sparse_phys_alloc *param = data;
 	struct kgsl_mem_entry *entry;
 	uint64_t flags;
 	int ret;
 	int id;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
 	if (ret)
 		return ret;
@@ -3713,9 +3717,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
 	unsigned int cmd, void *data)
 {
 	struct kgsl_process_private *process = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_sparse_phys_free *param = data;
 	struct kgsl_mem_entry *entry;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	entry = kgsl_sharedmem_find_id_flags(process, param->id,
 			KGSL_MEMFLAGS_SPARSE_PHYS);
 	if (entry == NULL)
@@ -3745,10 +3753,14 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
 	unsigned int cmd, void *data)
 {
 	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_sparse_virt_alloc *param = data;
 	struct kgsl_mem_entry *entry;
 	int ret;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
 	if (ret)
 		return ret;
@@ -3789,9 +3801,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
 	unsigned int cmd, void *data)
 {
 	struct kgsl_process_private *process = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_sparse_virt_free *param = data;
 	struct kgsl_mem_entry *entry = NULL;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	entry = kgsl_sharedmem_find_id_flags(process, param->id,
 			KGSL_MEMFLAGS_SPARSE_VIRT);
 	if (entry == NULL)
@@ -4138,6 +4154,7 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
 		unsigned int cmd, void *data)
 {
 	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_sparse_bind *param = data;
 	struct kgsl_sparse_binding_object obj;
 	struct kgsl_mem_entry *virt_entry;
@@ -4146,6 +4163,9 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
 	int ret = 0;
 	int i = 0;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	ptr = (void __user *) (uintptr_t) param->list;
 
 	if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
@@ -4201,6 +4221,9 @@ long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
 	long result;
 	unsigned int i = 0;
 
+	if (!(device->flags & KGSL_FLAG_SPARSE))
+		return -ENOTSUPP;
+
 	/* Make sure sparse and syncpoint count isn't too big */
 	if (param->numsparse > KGSL_MAX_SPARSE ||
 		param->numsyncs > KGSL_MAX_SYNCPOINTS)
@@ -4976,6 +4999,32 @@ int kgsl_request_irq(struct platform_device *pdev, const  char *name,
 	return ret ? ret : num;
 }
 
+int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
+		u32 *ptr)
+{
+	char str[32];
+	int ddr = of_fdt_get_ddrtype();
+
+	/* of_fdt_get_ddrtype returns error if the DDR type isn't determined */
+	if (ddr >= 0) {
+		int ret;
+
+		/* Construct expanded string for the DDR type  */
+		ret = snprintf(str, sizeof(str), "%s-ddr%d", base, ddr);
+
+		/* WARN_ON() if the array size was too small for the string */
+		if (WARN_ON(ret > sizeof(str)))
+			return -ENOMEM;
+
+		/* Read the expanded string */
+		if (!of_property_read_u32(node, str, ptr))
+			return 0;
+	}
+
+	/* Read the default string */
+	return of_property_read_u32(node, base, ptr);
+}
+
 int kgsl_device_platform_probe(struct kgsl_device *device)
 {
 	int status = -EINVAL;
@@ -4985,6 +5034,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
 	if (status)
 		return status;
 
+	/* Disable the sparse ioctl invocation as they are not used */
+	device->flags &= ~KGSL_FLAG_SPARSE;
+
 	kgsl_device_debugfs_init(device);
 
 	status = kgsl_pwrctrl_init(device);
@@ -5032,12 +5084,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
 	/* Initialize the memory pools */
 	kgsl_init_page_pools(device->pdev);
 
-	status = kgsl_allocate_global(device, &device->memstore,
-		KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
-
-	if (status != 0)
-		goto error_close_mmu;
-
 	/*
 	 * The default request type PM_QOS_REQ_ALL_CORES is
 	 * applicable to all CPU cores that are online and
@@ -5110,8 +5156,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
 
 	idr_destroy(&device->context_idr);
 
-	kgsl_free_global(device, &device->memstore);
-
 	kgsl_mmu_close(device);
 
 	kgsl_pwrctrl_close(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 27a9c7f..0900100 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -9,6 +9,7 @@
 #include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <uapi/linux/msm_kgsl.h>
 
 #include "kgsl_gmu_core.h"
@@ -186,6 +187,8 @@ struct kgsl_memdesc_ops {
 #define KGSL_MEMDESC_CONTIG BIT(8)
 /* This is an instruction buffer */
 #define KGSL_MEMDESC_UCODE BIT(9)
+/* For global buffers, randomly assign an address from the region */
+#define KGSL_MEMDESC_RANDOM BIT(10)
 
 /**
  * struct kgsl_memdesc - GPU memory object descriptor
@@ -333,16 +336,6 @@ struct kgsl_event_group {
 };
 
 /**
- * struct kgsl_protected_registers - Protected register range
- * @base: Offset of the range to be protected
- * @range: Range (# of registers = 2 ** range)
- */
-struct kgsl_protected_registers {
-	unsigned int base;
-	int range;
-};
-
-/**
  * struct sparse_bind_object - Bind metadata
  * @node: Node for the rb tree
  * @p_memdesc: Physical memdesc bound to
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index 8664e1c..c5f3164 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -228,25 +228,10 @@ static inline compat_size_t sizet_to_compat(size_t size)
 	return (compat_size_t)size;
 }
 
-struct kgsl_device;
-struct kgsl_drawobj;
-
-int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
-			struct kgsl_drawobj *drawobj, void __user *cmdlist,
-			unsigned int numcmds, void __user *synclist,
-			unsigned int numsyncs);
-
 long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
 			unsigned long arg);
 
 #else
-static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
-			unsigned int flags, struct kgsl_drawobj *drawobj,
-			void __user *cmdlist, unsigned int numcmds,
-			void __user *synclist, unsigned int numsyncs)
-{
-	return -EINVAL;
-}
 
 static inline long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
 			unsigned long arg)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 1a86ca5..927418e 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -50,6 +50,7 @@ enum kgsl_event_results {
 };
 
 #define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
+#define KGSL_FLAG_SPARSE        BIT(1)
 
 /*
  * "list" of event types for ftrace symbolic magic
@@ -291,6 +292,7 @@ struct kgsl_device {
 
 	u32 snapshot_faultcount;	/* Total number of faults since boot */
 	bool force_panic;		/* Force panic after snapshot dump */
+	bool skip_ib_capture;		/* Skip IB capture after snapshot */
 	bool prioritize_unrecoverable;	/* Overwrite with new GMU snapshots */
 	bool set_isdb_breakpoint;	/* Set isdb registers before snapshot */
 
@@ -919,6 +921,22 @@ void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
 	void *priv);
 
 /**
+ * kgsl_of_property_read_ddrtype - Get property from devicetree based on
+ * the type of DDR.
+ * @node: Devicetree node
+ * @base: prefix string of the property
+ * @ptr:  Pointer to store the value of the property
+ *
+ * First look up the devicetree property based on the prefix string and DDR
+ * type. If property is not specified per DDR type, then look for the property
+ * based on prefix string only.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
+		u32 *ptr);
+
+/**
  * kgsl_query_property_list - Get a list of valid properties
  * @device: A KGSL device handle
  * @list: Pointer to a list of u32s
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 5eaaae3..3032f98 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -584,13 +584,29 @@ static void add_profiling_buffer(struct kgsl_device *device,
 		return;
 	}
 
-	cmdobj->profiling_buf_entry = entry;
 
-	if (id != 0)
+	if (!id) {
+		cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+	} else {
+		u64 off = offset + sizeof(struct kgsl_drawobj_profiling_buffer);
+
+		/*
+		 * Make sure there is enough room in the object to store the
+		 * entire profiling buffer object
+		 */
+		if (off < offset || off >= entry->memdesc.size) {
+			dev_err(device->dev,
+				"ignore invalid profile offset ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
+			drawobj->context->id, id, offset, gpuaddr, size);
+			kgsl_mem_entry_put(entry);
+			return;
+		}
+
 		cmdobj->profiling_buffer_gpuaddr =
 			entry->memdesc.gpuaddr + offset;
-	else
-		cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+	}
+
+	cmdobj->profiling_buf_entry = entry;
 }
 
 /**
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 660dbb2..a818997 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -22,8 +22,6 @@
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
 
-#define GMU_CM3_CFG_NONMASKINTR_SHIFT    9
-
 struct gmu_iommu_context {
 	const char *name;
 	struct device *dev;
@@ -539,7 +537,22 @@ static int gmu_dcvs_set(struct kgsl_device *device,
 		dev_err_ratelimited(&gmu->pdev->dev,
 			"Failed to set GPU perf idx %d, bw idx %d\n",
 			req.freq, req.bw);
-		gmu_snapshot(device);
+
+		/*
+		 * We can be here in two situations. First, we send a dcvs
+		 * hfi so gmu knows at what level it must bring up the gpu.
+		 * If that fails, it is already being handled as part of
+		 * gmu boot failures. The other reason why we are here is
+		 * because we are trying to scale an active gpu. For this,
+		 * we need to do inline snapshot and dispatcher based
+		 * recovery.
+		 */
+		if (test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
+			gmu_core_snapshot(device);
+			adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT |
+				ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
+			adreno_dispatcher_schedule(device);
+		}
 	}
 
 	/* indicate actual clock change */
@@ -807,38 +820,6 @@ static void build_bwtable_cmd_cache(struct gmu_device *gmu)
 				votes->cnoc_votes.cmd_data[i][j];
 }
 
-static int gmu_acd_probe(struct gmu_device *gmu, struct device_node *node)
-{
-	struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
-	struct device_node *acd_node;
-
-	acd_node = of_find_node_by_name(node, "qcom,gpu-acd-table");
-	if (!acd_node)
-		return -ENODEV;
-
-	cmd->hdr = 0xFFFFFFFF;
-	cmd->version = HFI_ACD_INIT_VERSION;
-	cmd->enable_by_level = 0;
-	cmd->stride = 0;
-	cmd->num_levels = 0;
-
-	of_property_read_u32(acd_node, "qcom,acd-stride", &cmd->stride);
-	if (!cmd->stride || cmd->stride > MAX_ACD_STRIDE)
-		return -EINVAL;
-
-	of_property_read_u32(acd_node, "qcom,acd-num-levels", &cmd->num_levels);
-	if (!cmd->num_levels || cmd->num_levels > MAX_ACD_NUM_LEVELS)
-		return -EINVAL;
-
-	of_property_read_u32(acd_node, "qcom,acd-enable-by-level",
-			&cmd->enable_by_level);
-	if (hweight32(cmd->enable_by_level) != cmd->num_levels)
-		return -EINVAL;
-
-	return of_property_read_u32_array(acd_node, "qcom,acd-data",
-			cmd->data, cmd->stride * cmd->num_levels);
-}
-
 /*
  * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
  * @gmu: Pointer to GMU device
@@ -915,24 +896,6 @@ static int gmu_rpmh_init(struct kgsl_device *device,
 	return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
 }
 
-static void send_nmi_to_gmu(struct adreno_device *adreno_dev)
-{
-	/* Mask so there's no interrupt caused by NMI */
-	adreno_write_gmureg(adreno_dev,
-			ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
-
-	/* Make sure the interrupt is masked before causing it */
-	wmb();
-	adreno_write_gmureg(adreno_dev,
-		ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
-	adreno_write_gmureg(adreno_dev,
-		ADRENO_REG_GMU_CM3_CFG,
-		(1 << GMU_CM3_CFG_NONMASKINTR_SHIFT));
-
-	/* Make sure the NMI is invoked before we proceed*/
-	wmb();
-}
-
 static irqreturn_t gmu_irq_handler(int irq, void *data)
 {
 	struct kgsl_device *device = data;
@@ -954,7 +917,7 @@ static irqreturn_t gmu_irq_handler(int irq, void *data)
 				ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
 				(mask | GMU_INT_WDOG_BITE));
 
-		send_nmi_to_gmu(adreno_dev);
+		adreno_gmu_send_nmi(adreno_dev);
 		/*
 		 * There is sufficient delay for the GMU to have finished
 		 * handling the NMI before snapshot is taken, as the fault
@@ -1281,6 +1244,41 @@ int gmu_cache_finalize(struct kgsl_device *device)
 	return 0;
 }
 
+static void gmu_acd_probe(struct kgsl_device *device, struct gmu_device *gmu,
+		struct device_node *node)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
+	u32 acd_level, cmd_idx, numlvl = pwr->num_pwrlevels;
+	int ret, i;
+
+	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_ACD))
+		return;
+
+	cmd->hdr = 0xFFFFFFFF;
+	cmd->version = HFI_ACD_INIT_VERSION;
+	cmd->stride = 1;
+	cmd->enable_by_level = 0;
+
+	for (i = 0, cmd_idx = 0; i < numlvl; i++) {
+		acd_level = pwr->pwrlevels[numlvl - i - 1].acd_level;
+		if (acd_level) {
+			cmd->enable_by_level |= (1 << i);
+			cmd->data[cmd_idx++] = acd_level;
+		}
+	}
+
+	if (!cmd->enable_by_level)
+		return;
+
+	cmd->num_levels = cmd_idx;
+
+	ret = gmu_aop_mailbox_init(device, gmu);
+	if (ret)
+		dev_err(&gmu->pdev->dev,
+			"AOP mailbox init failed: %d\n", ret);
+}
+
 /* Do not access any GMU registers in GMU probe function */
 static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 {
@@ -1393,17 +1391,7 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
 	else
 		gmu->idle_level = GPU_HW_ACTIVE;
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_ACD)) {
-		if (!gmu_acd_probe(gmu, node)) {
-			/* Init the AOP mailbox if we have a valid ACD table */
-			ret = gmu_aop_mailbox_init(device, gmu);
-			if (ret)
-				dev_err(&gmu->pdev->dev,
-					"AOP mailbox init failed: %d\n", ret);
-		} else
-			dev_err(&gmu->pdev->dev,
-				"ACD probe failed: missing or invalid table\n");
-	}
+	gmu_acd_probe(device, gmu, node);
 
 	set_bit(GMU_ENABLED, &device->gmu_core.flags);
 	device->gmu_core.dev_ops = &adreno_a6xx_gmudev;
@@ -1551,7 +1539,7 @@ static void gmu_snapshot(struct kgsl_device *device)
 	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 
-	send_nmi_to_gmu(adreno_dev);
+	adreno_gmu_send_nmi(adreno_dev);
 	/* Wait for the NMI to be handled */
 	udelay(100);
 	kgsl_device_snapshot(device, NULL, true);
@@ -1596,9 +1584,6 @@ static int gmu_start(struct kgsl_device *device)
 	case KGSL_STATE_INIT:
 		gmu_aop_send_acd_state(device, test_bit(ADRENO_ACD_CTRL,
 					&adreno_dev->pwrctrl_flag));
-		ret = gmu_init(device);
-		if (ret)
-			return ret;
 	case KGSL_STATE_SUSPEND:
 		WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
 
@@ -1623,7 +1608,10 @@ static int gmu_start(struct kgsl_device *device)
 			goto error_gmu;
 
 		/* Request default DCVS level */
-		kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
+		ret = kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
+		if (ret)
+			goto error_gmu;
+
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 		break;
 
@@ -1643,7 +1631,9 @@ static int gmu_start(struct kgsl_device *device)
 		if (ret)
 			goto error_gmu;
 
-		kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
+		ret = kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
+		if (ret)
+			goto error_gmu;
 		break;
 
 	case KGSL_STATE_RESET:
@@ -1797,6 +1787,7 @@ static bool gmu_regulator_isenabled(struct kgsl_device *device)
 struct gmu_core_ops gmu_ops = {
 	.probe = gmu_probe,
 	.remove = gmu_remove,
+	.init = gmu_init,
 	.start = gmu_start,
 	.stop = gmu_stop,
 	.dcvs_set = gmu_dcvs_set,
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index 8af7840..4e3bbfb 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -101,14 +101,23 @@ bool gmu_core_gpmu_isenabled(struct kgsl_device *device)
 
 bool gmu_core_scales_bandwidth(struct kgsl_device *device)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
 	if (device->gmu_core.type == GMU_CORE_TYPE_PCC)
 		return false;
-	else {
-		struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
-		return gmu_core_gpmu_isenabled(device) &&
-			   (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640);
-	}
+	return gmu_core_gpmu_isenabled(device) &&
+		   (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640);
+}
+
+int gmu_core_init(struct kgsl_device *device)
+{
+	struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
+
+	if (gmu_core_ops && gmu_core_ops->init)
+		return gmu_core_ops->init(device);
+
+	return 0;
 }
 
 int gmu_core_start(struct kgsl_device *device)
@@ -315,6 +324,15 @@ void gmu_core_dev_snapshot(struct kgsl_device *device,
 		ops->snapshot(device, snapshot);
 }
 
+void gmu_core_dev_cooperative_reset(struct kgsl_device *device)
+{
+
+	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
+
+	if (ops && ops->cooperative_reset)
+		ops->cooperative_reset(device);
+}
+
 bool gmu_core_dev_gx_is_on(struct kgsl_device *device)
 {
 	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h
index 22690aa..0818e71 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.h
+++ b/drivers/gpu/msm/kgsl_gmu_core.h
@@ -115,6 +115,7 @@ struct gmu_core_ops {
 	void (*remove)(struct kgsl_device *device);
 	int (*dcvs_set)(struct kgsl_device *device,
 			unsigned int gpu_pwrlevel, unsigned int bus_level);
+	int (*init)(struct kgsl_device *device);
 	int (*start)(struct kgsl_device *device);
 	void (*stop)(struct kgsl_device *device);
 	void (*snapshot)(struct kgsl_device *device);
@@ -142,6 +143,7 @@ struct gmu_dev_ops {
 	unsigned int (*ifpc_show)(struct kgsl_device *device);
 	void (*snapshot)(struct kgsl_device *device,
 		struct kgsl_snapshot *shapshot);
+	void (*cooperative_reset)(struct kgsl_device *device);
 	void (*halt_execution)(struct kgsl_device *device);
 	int (*wait_for_active_transition)(struct kgsl_device *device);
 	const unsigned int gmu2host_intr_mask;
@@ -177,6 +179,7 @@ extern struct gmu_core_ops rgmu_ops;
 /* GMU core functions */
 int gmu_core_probe(struct kgsl_device *device);
 void gmu_core_remove(struct kgsl_device *device);
+int gmu_core_init(struct kgsl_device *device);
 int gmu_core_start(struct kgsl_device *device);
 void gmu_core_stop(struct kgsl_device *device);
 int gmu_core_suspend(struct kgsl_device *device);
@@ -224,5 +227,6 @@ int gmu_core_dev_ifpc_show(struct kgsl_device *device);
 int gmu_core_dev_ifpc_store(struct kgsl_device *device, unsigned int val);
 void gmu_core_dev_prepare_stop(struct kgsl_device *device);
 int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device);
+void gmu_core_dev_cooperative_reset(struct kgsl_device *device);
 
 #endif /* __KGSL_GMU_CORE_H */
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 00500ad..864d103 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -320,6 +320,8 @@ static int hfi_send_cmd(struct gmu_device *gmu, uint32_t queue_idx,
 	return rc;
 }
 
+#define HFI_ACK_ERROR 0xffffffff
+
 static int hfi_send_generic_req(struct gmu_device *gmu, uint32_t queue,
 		void *cmd)
 {
@@ -329,16 +331,14 @@ static int hfi_send_generic_req(struct gmu_device *gmu, uint32_t queue,
 	memset(&ret_cmd, 0, sizeof(ret_cmd));
 
 	rc = hfi_send_cmd(gmu, queue, cmd, &ret_cmd);
-	if (rc)
-		return rc;
 
-	if (ret_cmd.results[2])
-		dev_err(&gmu->pdev->dev,
-				"HFI ACK failure: Req 0x%8.8X Error 0x%X\n",
-				ret_cmd.results[1],
-				ret_cmd.results[2]);
+	if (!rc && ret_cmd.results[2] == HFI_ACK_ERROR) {
+		dev_err(&gmu->pdev->dev, "HFI ACK failure: Req 0x%8.8X\n",
+						ret_cmd.results[1]);
+		return -EINVAL;
+	}
 
-	return ret_cmd.results[2] ? -EINVAL : 0;
+	return rc;
 }
 
 static int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state)
@@ -644,6 +644,29 @@ static int hfi_verify_fw_version(struct kgsl_device *device,
 	return 0;
 }
 
+static int hfi_send_lm_feature_ctrl(struct gmu_device *gmu,
+		struct adreno_device *adreno_dev)
+{
+	struct hfi_set_value_cmd req = {
+		.type = HFI_VALUE_LM_CS0,
+		.subtype = 0,
+		.data = adreno_dev->lm_slope,
+	};
+	struct kgsl_device *device = &adreno_dev->dev;
+	int ret;
+
+	if (!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+		return 0;
+
+	ret = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1,
+			device->pwrctrl.throttle_mask);
+
+	if (!ret)
+		ret = hfi_send_req(gmu, H2F_MSG_SET_VALUE, &req);
+
+	return ret;
+}
+
 static int hfi_send_acd_feature_ctrl(struct gmu_device *gmu,
 		struct adreno_device *adreno_dev)
 {
@@ -723,12 +746,9 @@ int hfi_start(struct kgsl_device *device,
 		if (result)
 			return result;
 
-		if (test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
-			result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1,
-					device->pwrctrl.throttle_mask);
-			if (result)
-				return result;
-		}
+		result = hfi_send_lm_feature_ctrl(gmu, adreno_dev);
+		if (result)
+			return result;
 
 		result = hfi_send_core_fw_start(gmu);
 		if (result)
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 8a6175f..a6d054b 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -96,6 +96,7 @@ struct hfi_queue_table;
 #define HFI_VALUE_LOG_EVENT_ON		112
 #define HFI_VALUE_LOG_EVENT_OFF		113
 #define HFI_VALUE_DCVS_OBJ		114
+#define HFI_VALUE_LM_CS0		115
 
 #define HFI_VALUE_GLOBAL_TOKEN		0xFFFFFFFF
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 6a8984a..ac85b3d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -8,6 +8,7 @@
 #include <linux/of_platform.h>
 #include <linux/seq_file.h>
 #include <linux/delay.h>
+#include <linux/random.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 
@@ -75,15 +76,8 @@ static struct kmem_cache *addr_entry_cache;
  *
  * Here we define an array and a simple allocator to keep track of the currently
  * active global entries. Each entry is assigned a unique address inside of a
- * MMU implementation specific "global" region. The addresses are assigned
- * sequentially and never re-used to avoid having to go back and reprogram
- * existing pagetables. The entire list of active entries are mapped and
- * unmapped into every new pagetable as it is created and destroyed.
- *
- * Because there are relatively few entries and they are defined at boot time we
- * don't need to go over the top to define a dynamic allocation scheme. It will
- * be less wasteful to pick a static number with a little bit of growth
- * potential.
+ * MMU implementation specific "global" region. We use a simple bitmap based
+ * allocator for the region to allow for both fixed and dynamic addressing.
  */
 
 #define GLOBAL_PT_ENTRIES 32
@@ -93,12 +87,16 @@ struct global_pt_entry {
 	char name[32];
 };
 
+#define GLOBAL_MAP_PAGES (KGSL_IOMMU_GLOBAL_MEM_SIZE >> PAGE_SHIFT)
+
 static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
+static DECLARE_BITMAP(global_map, GLOBAL_MAP_PAGES);
+
 static int secure_global_size;
 static int global_pt_count;
-uint64_t global_pt_alloc;
 static struct kgsl_memdesc gpu_qdss_desc;
 static struct kgsl_memdesc gpu_qtimer_desc;
+
 void kgsl_print_global_pt_entries(struct seq_file *s)
 {
 	int i;
@@ -193,6 +191,12 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
 
 	for (i = 0; i < global_pt_count; i++) {
 		if (global_pt_entries[i].memdesc == memdesc) {
+			u64 offset = memdesc->gpuaddr -
+				KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
+
+			bitmap_clear(global_map, offset >> PAGE_SHIFT,
+				kgsl_memdesc_footprint(memdesc) >> PAGE_SHIFT);
+
 			memdesc->gpuaddr = 0;
 			memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
 			global_pt_entries[i].memdesc = NULL;
@@ -204,19 +208,43 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
 static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
 		struct kgsl_memdesc *memdesc, const char *name)
 {
+	u32 bit, start = 0;
+	u64 size = kgsl_memdesc_footprint(memdesc);
+
 	if (memdesc->gpuaddr != 0)
 		return;
 
-	/*Check that we can fit the global allocations */
-	if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES) ||
-		WARN_ON((global_pt_alloc + memdesc->size) >=
-			KGSL_IOMMU_GLOBAL_MEM_SIZE))
+	if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES))
 		return;
 
-	memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
+	if (WARN_ON(size > KGSL_IOMMU_GLOBAL_MEM_SIZE))
+		return;
+
+	if (memdesc->priv & KGSL_MEMDESC_RANDOM) {
+		u32 range = GLOBAL_MAP_PAGES - (size >> PAGE_SHIFT);
+
+		start = get_random_int() % range;
+	}
+
+	while (start >= 0) {
+		bit = bitmap_find_next_zero_area(global_map, GLOBAL_MAP_PAGES,
+			start, size >> PAGE_SHIFT, 0);
+
+		if (bit < GLOBAL_MAP_PAGES)
+			break;
+
+		start--;
+	}
+
+	if (WARN_ON(start < 0))
+		return;
+
+	memdesc->gpuaddr =
+		KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + (bit << PAGE_SHIFT);
+
+	bitmap_set(global_map, bit, size >> PAGE_SHIFT);
 
 	memdesc->priv |= KGSL_MEMDESC_GLOBAL;
-	global_pt_alloc += kgsl_memdesc_footprint(memdesc);
 
 	global_pt_entries[global_pt_count].memdesc = memdesc;
 	strlcpy(global_pt_entries[global_pt_count].name, name,
@@ -763,8 +791,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	if (pt->name == KGSL_MMU_SECURE_PT)
 		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
 
-	ctx->fault = 1;
-
 	if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
 		&adreno_dev->ft_pf_policy) &&
 		(flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
@@ -806,9 +832,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 
 			fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
 			dev_crit(ctx->kgsldev->dev,
-				      "FAULTING BLOCK: %s\n",
-				      gpudev->iommu_fault_block(adreno_dev,
-								fsynr1));
+				"FAULTING BLOCK: %s\n",
+				gpudev->iommu_fault_block(device, fsynr1));
 		}
 
 		/* Don't print the debug if this is a permissions fault */
@@ -856,6 +881,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 		sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
 		KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
 
+		/* This is used by reset/recovery path */
+		ctx->stalled_on_fault = true;
+
 		adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
 		/* Go ahead with recovery*/
 		adreno_dispatcher_schedule(device);
@@ -1624,6 +1652,18 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
 	int status;
 	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
 
+	/* Set the following registers only when the MMU type is QSMMU */
+	if (mmu->subtype != KGSL_IOMMU_SMMU_V500) {
+		/* Enable hazard check from GPU_SMMU_HUM_CFG */
+		writel_relaxed(0x02, iommu->regbase + 0x6800);
+
+		/* Write to GPU_SMMU_DORA_ORDERING to disable reordering */
+		writel_relaxed(0x01, iommu->regbase + 0x64a0);
+
+		/* make sure register write committed */
+		wmb();
+	}
+
 	status = _setup_user_context(mmu);
 	if (status)
 		return status;
@@ -1947,7 +1987,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
 	struct kgsl_iommu_context  *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
 	unsigned int sctlr_val;
 
-	if (ctx->default_pt != NULL) {
+	if (ctx->default_pt != NULL && ctx->stalled_on_fault) {
 		kgsl_iommu_enable_clk(mmu);
 		KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
 		/*
@@ -1964,6 +2004,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
 		 */
 		wmb();
 		kgsl_iommu_disable_clk(mmu);
+		ctx->stalled_on_fault = false;
 	}
 }
 
@@ -1971,42 +2012,30 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
 {
 	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
 	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
-	unsigned int fsr_val;
 
-	if (ctx->default_pt != NULL && ctx->fault) {
-		while (1) {
-			KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
-			/*
-			 * Make sure the above register write
-			 * is not reordered across the barrier
-			 * as we use writel_relaxed to write it.
-			 */
-			wmb();
+	if (ctx->default_pt != NULL && ctx->stalled_on_fault) {
+		/*
+		 * This will only clear fault bits in FSR. FSR.SS will still
+		 * be set. Writing to RESUME (below) is the only way to clear
+		 * FSR.SS bit.
+		 */
+		KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
+		/*
+		 * Make sure the above register write is not reordered across
+		 * the barrier as we use writel_relaxed to write it.
+		 */
+		wmb();
 
-			/*
-			 * Write 1 to RESUME.TnR to terminate the
-			 * stalled transaction.
-			 */
-			KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
-			/*
-			 * Make sure the above register writes
-			 * are not reordered across the barrier
-			 * as we use writel_relaxed to write them
-			 */
-			wmb();
-
-			/*
-			 * Wait for small time before checking SS bit
-			 * to allow transactions to go through after
-			 * resume and update SS bit in case more faulty
-			 * transactions are pending.
-			 */
-			udelay(5);
-			fsr_val = KGSL_IOMMU_GET_CTX_REG(ctx, FSR);
-			if (!(fsr_val & (1 << KGSL_IOMMU_FSR_SS_SHIFT)))
-				break;
-		}
-		ctx->fault = 0;
+		/*
+		 * Write 1 to RESUME.TnR to terminate the stalled transaction.
+		 * This will also allow the SMMU to process new transactions.
+		 */
+		KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
+		/*
+		 * Make sure the above register writes are not reordered across
+		 * the barrier as we use writel_relaxed to write them.
+		 */
+		wmb();
 	}
 }
 
@@ -2152,14 +2181,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
 	return 0;
 }
 
-static struct kgsl_protected_registers *
-kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
-{
-	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
-
-	return &iommu->protect;
-}
-
 static struct kgsl_iommu_addr_entry *_find_gpuaddr(
 		struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
 {
@@ -2611,15 +2632,6 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
 	iommu->regstart = reg_val[0];
 	iommu->regsize = reg_val[1];
 
-	/* Protecting the SMMU registers is mandatory */
-	if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
-		dev_err(device->dev,
-			"dt: no iommu protection range specified\n");
-		return -EINVAL;
-	}
-	iommu->protect.base = reg_val[0] / sizeof(u32);
-	iommu->protect.range = reg_val[1] / sizeof(u32);
-
 	of_property_for_each_string(node, "clock-names", prop, cname) {
 		struct clk *c = devm_clk_get(&pdev->dev, cname);
 
@@ -2707,7 +2719,6 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
 	.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
-	.mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
 	.mmu_init_pt = kgsl_iommu_init_pt,
 	.mmu_add_global = kgsl_iommu_add_global,
 	.mmu_remove_global = kgsl_iommu_remove_global,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index b98f2c2..5e8bb24 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -86,8 +86,8 @@ enum kgsl_iommu_context_id {
  * @cb_num: The hardware context bank number, used for calculating register
  *		offsets.
  * @kgsldev: The kgsl device that uses this context.
- * @fault: Flag when set indicates that this iommu device has caused a page
- * fault
+ * @stalled_on_fault: Flag when set indicates that this iommu device is stalled
+ * on a page fault
  * @default_pt: The default pagetable for this context,
  *		it may be changed by self programming.
  */
@@ -97,7 +97,7 @@ struct kgsl_iommu_context {
 	enum kgsl_iommu_context_id id;
 	unsigned int cb_num;
 	struct kgsl_device *kgsldev;
-	int fault;
+	bool stalled_on_fault;
 	void __iomem *regbase;
 	struct kgsl_pagetable *default_pt;
 };
@@ -112,7 +112,6 @@ struct kgsl_iommu_context {
  * @clk_enable_count: The ref count of clock enable calls
  * @clks: Array of pointers to IOMMU clocks
  * @smmu_info: smmu info used in a5xx preemption
- * @protect: register protection settings for the iommu.
  */
 struct kgsl_iommu {
 	struct kgsl_iommu_context ctx[KGSL_IOMMU_CONTEXT_MAX];
@@ -123,7 +122,6 @@ struct kgsl_iommu {
 	atomic_t clk_enable_count;
 	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
 	struct kgsl_memdesc smmu_info;
-	struct kgsl_protected_registers protect;
 };
 
 /*
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 637e57d..93012aa 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -68,8 +68,6 @@ struct kgsl_mmu_ops {
 	bool (*mmu_pt_equal)(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt, u64 ttbr0);
 	int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
-	struct kgsl_protected_registers *(*mmu_get_prot_regs)
-			(struct kgsl_mmu *mmu);
 	int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
 	void (*mmu_add_global)(struct kgsl_mmu *mmu,
 			struct kgsl_memdesc *memdesc, const char *name);
@@ -328,15 +326,6 @@ static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
 		return mmu->mmu_ops->mmu_clear_fsr(mmu);
 }
 
-static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
-						(struct kgsl_mmu *mmu)
-{
-	if (MMU_OP_VALID(mmu, mmu_get_prot_regs))
-		return mmu->mmu_ops->mmu_get_prot_regs(mmu);
-
-	return NULL;
-}
-
 static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
 {
 	return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 14c1c58..7077d5d 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -253,12 +253,20 @@ void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
 	if (pages == NULL || pcount == 0)
 		return;
 
+	if (WARN(!kern_addr_valid((unsigned long)pages),
+		"Address of pages=%pK is not valid\n", pages))
+		return;
+
 	for (i = 0; i < pcount;) {
 		/*
 		 * Free each page or compound page group individually.
 		 */
 		struct page *p = pages[i];
 
+		if (WARN(!kern_addr_valid((unsigned long)p),
+			"Address of page=%pK is not valid\n", p))
+			return;
+
 		i += 1 << compound_order(p);
 		kgsl_pool_free_page(p);
 	}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 6786ecb4..ddf61de 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -3136,7 +3136,7 @@ EXPORT_SYMBOL(kgsl_pwr_limits_get_freq);
  * kgsl_pwrctrl_set_default_gpu_pwrlevel() - Set GPU to default power level
  * @device: Pointer to the kgsl_device struct
  */
-void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device)
+int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device)
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	unsigned int new_level = pwr->default_pwrlevel;
@@ -3158,5 +3158,5 @@ void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device)
 	pwr->previous_pwrlevel = old_level;
 
 	/* Request adjusted DCVS level */
-	kgsl_clk_set_rate(device, pwr->active_pwrlevel);
+	return kgsl_clk_set_rate(device, pwr->active_pwrlevel);
 }
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index ff3e4fe..0f4dc72 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -105,6 +105,7 @@ struct kgsl_pwrlevel {
 	unsigned int bus_freq;
 	unsigned int bus_min;
 	unsigned int bus_max;
+	unsigned int acd_level;
 };
 
 struct kgsl_regulator {
@@ -266,7 +267,7 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
 			struct kgsl_pwr_constraint *pwrc, uint32_t id);
 void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
 			unsigned long timeout_us);
-void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
+int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
 void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
 		struct device *dev);
 
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 790b379..4fdb5e2 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1012,6 +1012,9 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
 		data->bus.ib = &pwr->bus_ib[0];
 		data->bus.index = &pwr->bus_index[0];
 		data->bus.width = pwr->bus_width;
+		if (!kgsl_of_property_read_ddrtype(device->pdev->dev.of_node,
+			"qcom,bus-accesses", &data->bus.max))
+			data->bus.floating = false;
 	} else
 		data->bus.num = 0;
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a7c1471..0a22259 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -508,7 +508,11 @@ static int kgsl_lock_sgt(struct sg_table *sgt, u64 size)
 	int ret;
 	int i;
 
-	ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
+	do {
+		ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm,
+					&dest_perms, 1);
+	} while (ret == -EAGAIN);
+
 	if (ret) {
 		/*
 		 * If returned error code is EADDRNOTAVAIL, then this
@@ -539,11 +543,17 @@ static int kgsl_unlock_sgt(struct sg_table *sgt)
 	int ret;
 	struct sg_page_iter sg_iter;
 
-	ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
+	do {
+		ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm,
+					&dest_perms, 1);
+	} while (ret == -EAGAIN);
+
+	if (ret)
+		return ret;
 
 	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0)
 		ClearPagePrivate(sg_page_iter_page(&sg_iter));
-	return ret;
+	return 0;
 }
 
 static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
@@ -558,8 +568,15 @@ static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
 
 		ret = kgsl_unlock_sgt(memdesc->sgt);
 		if (ret) {
+			/*
+			 * Unlock of the secure buffer failed. This buffer will
+			 * be stuck in secure side forever and is unrecoverable.
+			 * Give up on the buffer and don't return it to the
+			 * pool.
+			 */
 			pr_err("kgsl: secure buf unlock failed: gpuaddr: %llx size: %llx ret: %d\n",
 					memdesc->gpuaddr, memdesc->size, ret);
+			return;
 		}
 
 		atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);
@@ -645,7 +662,7 @@ static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
 				&kgsl_driver.stats.secure);
 
 			kgsl_cma_unlock_secure(memdesc);
-			attrs = (unsigned long)&memdesc->attrs;
+			attrs = memdesc->attrs;
 		} else
 			atomic_long_sub(memdesc->size,
 				&kgsl_driver.stats.coherent);
@@ -1053,7 +1070,9 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
 		kvfree(memdesc->sgt);
 	}
 
+	memdesc->page_count = 0;
 	kvfree(memdesc->pages);
+	memdesc->pages = NULL;
 }
 EXPORT_SYMBOL(kgsl_sharedmem_free);
 
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index b3a56a8..99bb67b 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -752,6 +752,9 @@ void kgsl_device_snapshot(struct kgsl_device *device,
 	dev_err(device->dev, "%s snapshot created at pa %pa++0x%zx\n",
 			gmu_fault ? "GMU" : "GPU", &pa, snapshot->size);
 
+	if (device->skip_ib_capture)
+		BUG_ON(device->force_panic);
+
 	sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
 
 	/*
@@ -956,6 +959,22 @@ static ssize_t force_panic_store(struct kgsl_device *device, const char *buf,
 	return count;
 }
 
+/* Show the break_ib request status */
+static ssize_t skip_ib_capture_show(struct kgsl_device *device, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", device->skip_ib_capture);
+}
+
+/* Store the panic request value to break_ib */
+static ssize_t skip_ib_capture_store(struct kgsl_device *device,
+						const char *buf, size_t count)
+{
+	int ret;
+
+	ret = kstrtobool(buf, &device->skip_ib_capture);
+	return ret ? ret : count;
+}
+
 /* Show the prioritize_unrecoverable status */
 static ssize_t prioritize_unrecoverable_show(
 		struct kgsl_device *device, char *buf)
@@ -1038,6 +1057,8 @@ static SNAPSHOT_ATTR(snapshot_crashdumper, 0644, snapshot_crashdumper_show,
 	snapshot_crashdumper_store);
 static SNAPSHOT_ATTR(snapshot_legacy, 0644, snapshot_legacy_show,
 	snapshot_legacy_store);
+static SNAPSHOT_ATTR(skip_ib_capture, 0644, skip_ib_capture_show,
+		skip_ib_capture_store);
 
 static ssize_t snapshot_sysfs_show(struct kobject *kobj,
 	struct attribute *attr, char *buf)
@@ -1083,6 +1104,7 @@ static const struct attribute *snapshot_attrs[] = {
 	&attr_prioritize_unrecoverable.attr,
 	&attr_snapshot_crashdumper.attr,
 	&attr_snapshot_legacy.attr,
+	&attr_skip_ib_capture.attr,
 	NULL,
 };
 
@@ -1308,5 +1330,6 @@ static void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
 
 gmu_only:
 	complete_all(&snapshot->dump_gate);
-	BUG_ON(snapshot->device->force_panic);
+	BUG_ON(!snapshot->device->skip_ib_capture &
+				snapshot->device->force_panic);
 }
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 9428ea7..c52bd16 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -26,12 +26,36 @@
 #define A4_2WHEEL_MOUSE_HACK_7	0x01
 #define A4_2WHEEL_MOUSE_HACK_B8	0x02
 
+#define A4_WHEEL_ORIENTATION	(HID_UP_GENDESK | 0x000000b8)
+
 struct a4tech_sc {
 	unsigned long quirks;
 	unsigned int hw_wheel;
 	__s32 delayed_value;
 };
 
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+			    struct hid_field *field, struct hid_usage *usage,
+			    unsigned long **bit, int *max)
+{
+	struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+	if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+	    usage->hid == A4_WHEEL_ORIENTATION) {
+		/*
+		 * We do not want to have this usage mapped to anything as it's
+		 * nonstandard and doesn't really behave like an HID report.
+		 * It's only selecting the orientation (vertical/horizontal) of
+		 * the previous mouse wheel report. The input_events will be
+		 * generated once both reports are recorded in a4_event().
+		 */
+		return -1;
+	}
+
+	return 0;
+
+}
+
 static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
 	struct a4tech_sc *a4 = hid_get_drvdata(hdev);
 	struct input_dev *input;
 
-	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
-			!usage->type)
+	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
 		return 0;
 
 	input = field->hidinput->input;
@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
 			return 1;
 		}
 
-		if (usage->hid == 0x000100b8) {
+		if (usage->hid == A4_WHEEL_ORIENTATION) {
 			input_event(input, EV_REL, value ? REL_HWHEEL :
 					REL_WHEEL, a4->delayed_value);
 			return 1;
@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
 static struct hid_driver a4_driver = {
 	.name = "a4tech",
 	.id_table = a4_devices,
+	.input_mapping = a4_input_mapping,
 	.input_mapped = a4_input_mapped,
 	.event = a4_event,
 	.probe = a4_probe,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 1cb4199..d0a81a0 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
 struct apple_sc {
 	unsigned long quirks;
 	unsigned int fn_on;
-	DECLARE_BITMAP(pressed_fn, KEY_CNT);
 	DECLARE_BITMAP(pressed_numlock, KEY_CNT);
 };
 
@@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 {
 	struct apple_sc *asc = hid_get_drvdata(hid);
 	const struct apple_key_translation *trans, *table;
+	bool do_translate;
+	u16 code = 0;
 
 	if (usage->code == KEY_FN) {
 		asc->fn_on = !!value;
@@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 	}
 
 	if (fnmode) {
-		int do_translate;
-
 		if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
 				hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
 			table = macbookair_fn_keys;
@@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 		trans = apple_find_translation (table, usage->code);
 
 		if (trans) {
-			if (test_bit(usage->code, asc->pressed_fn))
-				do_translate = 1;
-			else if (trans->flags & APPLE_FLAG_FKEY)
-				do_translate = (fnmode == 2 && asc->fn_on) ||
-					(fnmode == 1 && !asc->fn_on);
-			else
-				do_translate = asc->fn_on;
+			if (test_bit(trans->from, input->key))
+				code = trans->from;
+			else if (test_bit(trans->to, input->key))
+				code = trans->to;
 
-			if (do_translate) {
-				if (value)
-					set_bit(usage->code, asc->pressed_fn);
-				else
-					clear_bit(usage->code, asc->pressed_fn);
+			if (!code) {
+				if (trans->flags & APPLE_FLAG_FKEY) {
+					switch (fnmode) {
+					case 1:
+						do_translate = !asc->fn_on;
+						break;
+					case 2:
+						do_translate = asc->fn_on;
+						break;
+					default:
+						/* should never happen */
+						do_translate = false;
+					}
+				} else {
+					do_translate = asc->fn_on;
+				}
 
-				input_event(input, usage->type, trans->to,
-						value);
-
-				return 1;
+				code = do_translate ? trans->to : trans->from;
 			}
+
+			input_event(input, usage->type, code, value);
+			return 1;
 		}
 
 		if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8613755..4545cd1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -215,14 +215,13 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
  * Add a usage to the temporary parser table.
  */
 
-static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
+static int hid_add_usage(struct hid_parser *parser, unsigned int usage)
 {
 	if (parser->local.usage_index >= HID_MAX_USAGES) {
 		hid_err(parser->device, "usage index exceeded\n");
 		return -1;
 	}
 	parser->local.usage[parser->local.usage_index] = usage;
-	parser->local.usage_size[parser->local.usage_index] = size;
 	parser->local.collection_index[parser->local.usage_index] =
 		parser->collection_stack_ptr ?
 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -483,7 +482,10 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
-		return hid_add_usage(parser, data, item->size);
+		if (item->size <= 2)
+			data = (parser->global.usage_page << 16) + data;
+
+		return hid_add_usage(parser, data);
 
 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 
@@ -492,6 +494,9 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
+		if (item->size <= 2)
+			data = (parser->global.usage_page << 16) + data;
+
 		parser->local.usage_minimum = data;
 		return 0;
 
@@ -502,6 +507,9 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 			return 0;
 		}
 
+		if (item->size <= 2)
+			data = (parser->global.usage_page << 16) + data;
+
 		count = data - parser->local.usage_minimum;
 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
 			/*
@@ -521,7 +529,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 		}
 
 		for (n = parser->local.usage_minimum; n <= data; n++)
-			if (hid_add_usage(parser, n, item->size)) {
+			if (hid_add_usage(parser, n)) {
 				dbg_hid("hid_add_usage failed\n");
 				return -1;
 			}
@@ -536,22 +544,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 }
 
 /*
- * Concatenate Usage Pages into Usages where relevant:
- * As per specification, 6.2.2.8: "When the parser encounters a main item it
- * concatenates the last declared Usage Page with a Usage to form a complete
- * usage value."
- */
-
-static void hid_concatenate_usage_page(struct hid_parser *parser)
-{
-	int i;
-
-	for (i = 0; i < parser->local.usage_index; i++)
-		if (parser->local.usage_size[i] <= 2)
-			parser->local.usage[i] += parser->global.usage_page << 16;
-}
-
-/*
  * Process a main item.
  */
 
@@ -560,8 +552,6 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
 	__u32 data;
 	int ret;
 
-	hid_concatenate_usage_page(parser);
-
 	data = item_udata(item);
 
 	switch (item->tag) {
@@ -771,8 +761,6 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
 	__u32 data;
 	int i;
 
-	hid_concatenate_usage_page(parser);
-
 	data = item_udata(item);
 
 	switch (item->tag) {
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 271f314..6f65f52 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1160,8 +1160,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
 
 	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
 
-	cp2112_gpio_direction_input(gc, d->hwirq);
-
 	if (!dev->gpio_poll) {
 		dev->gpio_poll = true;
 		schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1209,6 +1207,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
 		return PTR_ERR(dev->desc[pin]);
 	}
 
+	ret = cp2112_gpio_direction_input(&dev->gc, pin);
+	if (ret < 0) {
+		dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
+		goto err_desc;
+	}
+
 	ret = gpiochip_lock_as_irq(&dev->gc, pin);
 	if (ret) {
 		dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 6e1a4a4..ab9da59 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 
 	/* Locate the boot interface, to receive the LED change events */
 	struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+	struct hid_device *boot_hid;
+	struct hid_input *boot_hid_input;
 
-	struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
-	struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+	if (unlikely(boot_interface == NULL))
+		return -ENODEV;
+
+	boot_hid = usb_get_intfdata(boot_interface);
+	boot_hid_input = list_first_entry(&boot_hid->inputs,
 		struct hid_input, list);
 
 	return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0e1c1e4..f5da7f3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -559,6 +559,7 @@
 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A	0x0b4a
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE		0x134a
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A	0x094a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941	0x0941
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641	0x0641
 
 #define USB_VENDOR_ID_HUION		0x256c
@@ -971,6 +972,7 @@
 #define USB_DEVICE_ID_SAITEK_RAT7	0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9	0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7	0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52	0x075c
 
 #define USB_VENDOR_ID_SAMSUNG		0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
@@ -1234,7 +1236,9 @@
 
 #define USB_VENDOR_ID_QVR5	0x045e
 #define USB_VENDOR_ID_QVR32A	0x04b4
+#define USB_VENDOR_ID_NREAL	0x05a9
 #define USB_DEVICE_ID_QVR5	0x0659
 #define USB_DEVICE_ID_QVR32A	0x00c3
+#define USB_DEVICE_ID_NREAL	0x0680
 
 #endif
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 596227d..17d6123 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -763,7 +763,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
 		if (!buf) {
 			ret = -ENOMEM;
-			goto err_free;
+			goto err_stop;
 		}
 
 		ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
@@ -795,9 +795,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 		ret = lg4ff_init(hdev);
 
 	if (ret)
-		goto err_free;
+		goto err_stop;
 
 	return 0;
+
+err_stop:
+	hid_hw_stop(hdev);
 err_free:
 	kfree(drv_data);
 	return ret;
@@ -808,8 +811,7 @@ static void lg_remove(struct hid_device *hdev)
 	struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
 	if (drv_data->quirks & LG_FF4)
 		lg4ff_deinit(hdev);
-	else
-		hid_hw_stop(hdev);
+	hid_hw_stop(hdev);
 	kfree(drv_data);
 }
 
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 512d67e1..4b26928 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -1483,7 +1483,6 @@ int lg4ff_deinit(struct hid_device *hid)
 		}
 	}
 #endif
-	hid_hw_stop(hid);
 	drv_data->device_props = NULL;
 
 	kfree(entry);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 87eda34..d377325 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -555,10 +555,14 @@ static void pcmidi_setup_extra_keys(
 
 static int pcmidi_set_operational(struct pcmidi_snd *pm)
 {
+	int rc;
+
 	if (pm->ifnum != 1)
 		return 0; /* only set up ONCE for interace 1 */
 
-	pcmidi_get_output_report(pm);
+	rc = pcmidi_get_output_report(pm);
+	if (rc < 0)
+		return rc;
 	pcmidi_submit_output_report(pm, 0xc1);
 	return 0;
 }
@@ -687,7 +691,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
 	spin_lock_init(&pm->rawmidi_in_lock);
 
 	init_sustain_timers(pm);
-	pcmidi_set_operational(pm);
+	err = pcmidi_set_operational(pm);
+	if (err < 0) {
+		pk_error("failed to find output report\n");
+		goto fail_register;
+	}
 
 	/* register it */
 	err = snd_card_register(card);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index bbb5733..40b1cb1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
@@ -143,6 +144,7 @@ static const struct hid_device_id hid_quirks[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
 	HID_QUIRK_HIDINPUT_FORCE | HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A),
 	HID_QUIRK_HIDINPUT_FORCE | HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+	{ USB_VENDOR_ID_NREAL, USB_DEVICE_ID_NREAL, HID_QUIRK_HIDINPUT_FORCE },
 
 	{ 0 }
 };
diff --git a/drivers/hid/hid-qvr.c b/drivers/hid/hid-qvr.c
index 8211d30..5741e4d 100644
--- a/drivers/hid/hid-qvr.c
+++ b/drivers/hid/hid-qvr.c
@@ -46,6 +46,8 @@
 #include "hid-qvr.h"
 #include "hid-trace.h"
 
+#define WAIT_EVENT_INT_TOUT 20
+
 #define QVR_START_IMU		_IO('q', 1)
 #define QVR_STOP_IMU		_IO('q', 2)
 #define QVR_READ_CALIB_DATA_LEN	_IOR('q', 3, int32_t)
@@ -101,10 +103,6 @@ struct qvr_external_sensor {
 	int fd;
 };
 
-const static int msg_size = 368;
-const static int hid_request_report_id = 2;
-const static int hid_request_report_size = 64;
-
 static DECLARE_WAIT_QUEUE_HEAD(wq);
 static struct qvr_external_sensor qvr_external_sensor;
 
@@ -118,17 +116,18 @@ static int read_calibration_len(void)
 	if (hid_buf == NULL)
 		return -ENOMEM;
 
-	hid_buf[0] = 2;
-	hid_buf[1] = 20;
+	hid_buf[0] = QVR_HID_REPORT_ID_CAL;
+	hid_buf[1] = QVR_CMD_ID_CALIBRATION_DATA_SIZE;
 
 	ret = hid_hw_raw_request(sensor->hdev, hid_buf[0],
 		hid_buf,
-		hid_request_report_size,
+		QVR_HID_REQUEST_REPORT_SIZE,
 		HID_FEATURE_REPORT,
 		HID_REQ_SET_REPORT);
 
 	ret = wait_event_interruptible_timeout(wq,
-		sensor->calib_data_len != -1, msecs_to_jiffies(1000));
+		sensor->calib_data_len != -1,
+		msecs_to_jiffies(WAIT_EVENT_INT_TOUT));
 	if (ret == 0) {
 		kfree(hid_buf);
 		return -ETIME;
@@ -155,8 +154,8 @@ static uint8_t *read_calibration_data(void)
 	if (hid_buf == NULL)
 		return NULL;
 
-	hid_buf[0] = 2;
-	hid_buf[1] = 21;
+	hid_buf[0] = QVR_HID_REPORT_ID_CAL;
+	hid_buf[1] = QVR_CMD_ID_CALIBRATION_BLOCK_DATA;
 
 	complete_data = kzalloc(sensor->calib_data_len, GFP_KERNEL);
 	if (complete_data == NULL) {
@@ -168,11 +167,12 @@ static uint8_t *read_calibration_data(void)
 		sensor->calib_data_recv = 0;
 		ret = hid_hw_raw_request(sensor->hdev, hid_buf[0],
 			hid_buf,
-			hid_request_report_size,
+			QVR_HID_REQUEST_REPORT_SIZE,
 			HID_FEATURE_REPORT,
 			HID_REQ_SET_REPORT);
 		ret = wait_event_interruptible_timeout(wq,
-			sensor->calib_data_recv == 1, msecs_to_jiffies(1000));
+			sensor->calib_data_recv == 1,
+			msecs_to_jiffies(WAIT_EVENT_INT_TOUT));
 		if (ret == 0) {
 			pr_err("%s:get calibration data timeout\n", __func__);
 			kfree(hid_buf);
@@ -210,25 +210,25 @@ static int control_imu_stream(bool status)
 	if (hid_buf == NULL)
 		return -ENOMEM;
 
-	hid_buf[0] = 2;
-	hid_buf[1] = 25;
+	hid_buf[0] = QVR_HID_REPORT_ID_CAL;
+	hid_buf[1] = QVR_CMD_ID_IMU_CONTROL;
 	hid_buf[2] = status;
 
 	ret = hid_hw_raw_request(sensor->hdev, hid_buf[0],
 		hid_buf,
-		hid_request_report_size,
+		QVR_HID_REQUEST_REPORT_SIZE,
 		HID_FEATURE_REPORT,
 		HID_REQ_SET_REPORT);
 	ret = wait_event_interruptible_timeout(wq, sensor->ext_ack == 1,
-		msecs_to_jiffies(1000));
+		msecs_to_jiffies(WAIT_EVENT_INT_TOUT));
 	if (!ret && status) {
 		pr_debug("qvr: falling back - start IMU stream failed\n");
-		hid_buf[0] = hid_request_report_id;
-		hid_buf[1] = 7;
+		hid_buf[0] = QVR_HID_REPORT_ID_CAL;
+		hid_buf[1] = QVR_CMD_ID_IMU_CONTROL_FALLBACK;
 		ret = hid_hw_raw_request(sensor->hdev, hid_buf[0], hid_buf,
-				hid_request_report_size,
-				HID_FEATURE_REPORT,
-				HID_REQ_SET_REPORT);
+			QVR_HID_REQUEST_REPORT_SIZE,
+			HID_FEATURE_REPORT,
+			HID_REQ_SET_REPORT);
 	}
 	kfree(hid_buf);
 	if (ret > 0)
@@ -247,30 +247,30 @@ static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
 	struct external_imu_format imuData = { 0 };
 	struct qvr_buf_index *index_buf;
 
-	/*
-	 * Actual message size is 369 bytes
-	 * to make it 8 byte aligned we created a structure of size 368 bytes.
-	 * Ignoring the first byte 'report id' (which is always 1)
-	 *
-	 */
-	memcpy((void *)&imuData, (void *)message + 1, msg_size);
+	if (msize != sizeof(struct external_imu_format)) {
+		pr_err("%s: data size mismatch %d\n", __func__, msize);
+		return -EPROTO;
+	}
+
+	memcpy((void *)&imuData, (void *)message,
+		sizeof(struct external_imu_format));
 
 	if (!sensor->ts_base)
 		sensor->ts_base = ktime_to_ns(ktime_get_boottime());
 	if (!sensor->ts_offset)
 		sensor->ts_offset = imuData.gts0;
 	index_buf = (struct qvr_buf_index *)((uintptr_t)sensor->vaddr +
-			(sensor->vsize / 2) + (8 * sizeof(*sensor_buf)));
+		(sensor->vsize / 2) + (8 * sizeof(*sensor_buf)));
 	sensor_buf = (struct qvr_sensor_t *)((uintptr_t)sensor->vaddr +
-			(sensor->vsize / 2));
+		(sensor->vsize / 2));
 
 	data = (struct qvr_sensor_t *)&(sensor_buf[buf_index]);
 	if (sensor->ts_offset > imuData.gts0)
 		data->ats = sensor->ts_base +
-				((sensor->ts_offset - imuData.gts0) * 100);
+			sensor->ts_offset - imuData.gts0;
 	else
 		data->ats = sensor->ts_base +
-				((imuData.gts0 - sensor->ts_offset) * 100);
+			imuData.gts0 - sensor->ts_offset;
 	if (imuData.mts0 == 0)
 		data->mts = 0;
 	else
@@ -282,8 +282,8 @@ static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
 	data->gx = -imuData.gx0;
 	data->gy = imuData.gy0;
 	data->gz = -imuData.gz0;
-	data->mx = -imuData.mx0;
-	data->my = imuData.my0;
+	data->mx = -imuData.my0;
+	data->my = -imuData.mx0;
 	data->mz = -imuData.mz0;
 
 	trace_qvr_recv_sensor("gyro", data->gts, data->gx, data->gy, data->gz);
@@ -415,7 +415,7 @@ static ssize_t ts_base_store(struct kobject *kobj,
 static ssize_t ts_offset_show(struct kobject *kobj,
 	struct kobj_attribute *attr, char *buf)
 {
-	return snprintf(buf, 16, "%lld\n", qvr_external_sensor.ts_offset * 100);
+	return snprintf(buf, 16, "%lld\n", qvr_external_sensor.ts_offset);
 }
 
 static ssize_t ts_offset_store(struct kobject *kobj,
@@ -454,7 +454,6 @@ static int qvr_external_sensor_probe(struct hid_device *hdev,
 	struct qvr_external_sensor *sensor = &qvr_external_sensor;
 	int ret;
 	char *node_name = "qcom,smp2p-interrupt-qvrexternal-5-out";
-	__u8 *hid_buf;
 	sensor->hdev = hdev;
 
 	ret = register_smp2p(&hdev->dev, node_name, &sensor->gpio_info_out);
@@ -472,17 +471,6 @@ static int qvr_external_sensor_probe(struct hid_device *hdev,
 		pr_err("%s: hid_hw_start failed\n", __func__);
 		goto err_free;
 	}
-	hid_buf = kzalloc(255, GFP_ATOMIC);
-	if (hid_buf == NULL)
-		return -ENOMEM;
-	hid_buf[0] = hid_request_report_id;
-	hid_buf[1] = 7;
-	ret = hid_hw_raw_request(hdev, hid_buf[0], hid_buf,
-		hid_request_report_size,
-		HID_FEATURE_REPORT,
-		HID_REQ_SET_REPORT);
-	kfree(hid_buf);
-
 	sensor->device = &hdev->dev;
 
 	return 0;
@@ -530,7 +518,7 @@ static long qvr_external_sensor_ioctl(struct file *file, unsigned int cmd,
 		if (ret < 0)
 			return ret;
 		if (copy_to_user(argp, &sensor->calib_data_len,
-					sizeof(sensor->calib_data_len)))
+				sizeof(sensor->calib_data_len)))
 			return -EFAULT;
 		return 0;
 	case QVR_READ_CALIB_DATA:
@@ -593,6 +581,7 @@ static void qvr_external_sensor_device_remove(struct hid_device *hdev)
 static struct hid_device_id qvr_external_sensor_table[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_NREAL, USB_DEVICE_ID_NREAL) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, qvr_external_sensor_table);
@@ -651,8 +640,8 @@ static int __init qvr_external_sensor_init(void)
 		return -ret;
 	}
 	sensor->dev = device_create(sensor->class, NULL,
-				MKDEV(MAJOR(sensor->dev_no), 0), NULL,
-				"qvr_external_sensor_ioctl");
+		MKDEV(MAJOR(sensor->dev_no), 0), NULL,
+		"qvr_external_sensor_ioctl");
 	if (sensor->dev == NULL) {
 		class_destroy(sensor->class);
 		cdev_del(&sensor->cdev);
diff --git a/drivers/hid/hid-qvr.h b/drivers/hid/hid-qvr.h
index 08d54ba..58ffc88 100644
--- a/drivers/hid/hid-qvr.h
+++ b/drivers/hid/hid-qvr.h
@@ -18,144 +18,52 @@
 
 #define QVR_EXTERNAL_SENSOR_REPORT_ID 0x1
 
+//CMD IDs
+#define QVR_CMD_ID_CALIBRATION_DATA_SIZE      20
+#define QVR_CMD_ID_CALIBRATION_BLOCK_DATA     21
+#define QVR_CMD_ID_START_CALIBRATION_UPDATE   22
+#define QVR_CMD_ID_UPDATE_CALIBRATION_BLOCK   23
+#define QVR_CMD_ID_FINISH_CALIBRATION_UPDATE  24
+#define QVR_CMD_ID_IMU_CONTROL                25
+#define QVR_CMD_ID_IMU_CONTROL_FALLBACK       7
+
+#define QVR_HID_REPORT_ID_CAL                 2
+#define QVR_HID_REQUEST_REPORT_SIZE           64
+
 struct external_imu_format {
-	s16 temp0;
-	s16 temp1;
-	s16 temp2;
-	s16 temp3;
+	u8 reportID;
+	u8 padding;
+	u16 version;
+	u16 numIMUs;
+	u16 numSamplesPerImuPacket;
+	u16 totalPayloadSize;
+	u8 reservedPadding[28];
+
+	s16 imuID;
+	s16 sampleID;
+	s16 temperature;
+
 	u64 gts0;
-	u64 gts1;
-	u64 gts2;
-	u64 gts3;
-	s16 gx0;
-	s16 gx1;
-	s16 gx2;
-	s16 gx3;
-	s16 gx4;
-	s16 gx5;
-	s16 gx6;
-	s16 gx7;
-	s16 gx8;
-	s16 gx9;
-	s16 gx10;
-	s16 gx11;
-	s16 gx12;
-	s16 gx13;
-	s16 gx14;
-	s16 gx15;
-	s16 gx16;
-	s16 gx17;
-	s16 gx18;
-	s16 gx19;
-	s16 gx20;
-	s16 gx21;
-	s16 gx22;
-	s16 gx23;
-	s16 gx24;
-	s16 gx25;
-	s16 gx26;
-	s16 gx27;
-	s16 gx28;
-	s16 gx29;
-	s16 gx30;
-	s16 gx31;
-	s16 gy0;
-	s16 gy1;
-	s16 gy2;
-	s16 gy3;
-	s16 gy4;
-	s16 gy5;
-	s16 gy6;
-	s16 gy7;
-	s16 gy8;
-	s16 gy9;
-	s16 gy10;
-	s16 gy11;
-	s16 gy12;
-	s16 gy13;
-	s16 gy14;
-	s16 gy15;
-	s16 gy16;
-	s16 gy17;
-	s16 gy18;
-	s16 gy19;
-	s16 gy20;
-	s16 gy21;
-	s16 gy22;
-	s16 gy23;
-	s16 gy24;
-	s16 gy25;
-	s16 gy26;
-	s16 gy27;
-	s16 gy28;
-	s16 gy29;
-	s16 gy30;
-	s16 gy31;
-	s16 gz0;
-	s16 gz1;
-	s16 gz2;
-	s16 gz3;
-	s16 gz4;
-	s16 gz5;
-	s16 gz6;
-	s16 gz7;
-	s16 gz8;
-	s16 gz9;
-	s16 gz10;
-	s16 gz11;
-	s16 gz12;
-	s16 gz13;
-	s16 gz14;
-	s16 gz15;
-	s16 gz16;
-	s16 gz17;
-	s16 gz18;
-	s16 gz19;
-	s16 gz20;
-	s16 gz21;
-	s16 gz22;
-	s16 gz23;
-	s16 gz24;
-	s16 gz25;
-	s16 gz26;
-	s16 gz27;
-	s16 gz28;
-	s16 gz29;
-	s16 gz30;
-	s16 gz31;
+	u32 gNumerator;
+	u32 gDenominator;
+	s32 gx0;
+	s32 gy0;
+	s32 gz0;
+
 	u64 ats0;
-	u64 ats1;
-	u64 ats2;
-	u64 ats3;
+	u32 aNumerator;
+	u32 aDenominator;
 	s32 ax0;
-	s32 ax1;
-	s32 ax2;
-	s32 ax3;
 	s32 ay0;
-	s32 ay1;
-	s32 ay2;
-	s32 ay3;
 	s32 az0;
-	s32 az1;
-	s32 az2;
-	s32 az3;
+
 	u64 mts0;
-	u64 mts1;
-	u64 mts2;
-	u64 mts3;
-	s16 mx0;
-	s16 mx1;
-	s16 mx2;
-	s16 mx3;
-	s16 my0;
-	s16 my1;
-	s16 my2;
-	s16 my3;
-	s16 mz0;
-	s16 mz1;
-	s16 mz2;
-	s16 mz3; //368 bytes
-};
+	u32 mNumerator;
+	u32 mDenominator;
+	s32 mx0;
+	s32 my0;
+	s32 mz0;
+} __packed;
 
 void qvr_clear_def_parmeter(void);
 void qvr_init(struct hid_device *hdev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31f1023..09f2c61 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2806,7 +2806,6 @@ static int sony_input_configured(struct hid_device *hdev,
 	sony_cancel_work_sync(sc);
 	sony_remove_dev_list(sc);
 	sony_release_device_id(sc);
-	hid_hw_stop(hdev);
 	return ret;
 }
 
@@ -2868,6 +2867,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	 */
 	if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
 		hid_err(hdev, "failed to claim input\n");
+		hid_hw_stop(hdev);
 		return -ENODEV;
 	}
 
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index bea8def..30b8c32 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -34,6 +34,8 @@
 
 #include "hid-ids.h"
 
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT	0xb320
+
 static const signed short ff_rumble[] = {
 	FF_RUMBLE,
 	-1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
 	struct hid_field *ff_field = tmff->ff_field;
 	int x, y;
 	int left, right;	/* Rumbling */
+	int motor_swap;
 
 	switch (effect->type) {
 	case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
 					ff_field->logical_minimum,
 					ff_field->logical_maximum);
 
+		/* 2-in-1 strong motor is left */
+		if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+			motor_swap = left;
+			left = right;
+			right = motor_swap;
+		}
+
 		dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
 		ff_field->value[0] = left;
 		ff_field->value[1] = right;
@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
 		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),   /* FireStorm Dual Power 2 (and 3) */
 		.driver_data = (unsigned long)ff_rumble },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT),   /* Dual Trigger 2-in-1 */
+		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323),   /* Dual Trigger 3-in-1 (PC Mode) */
 		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 4a44e48e..c7cff92 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -378,7 +378,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
 
 	mutex_lock(&minors_lock);
 	dev = hidraw_table[minor];
-	if (!dev) {
+	if (!dev || !dev->exist) {
 		ret = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index a746017..5a949ca 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	spin_unlock_irq(&list->hiddev->list_lock);
 
 	mutex_lock(&hiddev->existancelock);
+	/*
+	 * recheck exist with existance lock held to
+	 * avoid opening a disconnected device
+	 */
+	if (!list->hiddev->exist) {
+		res = -ENODEV;
+		goto bail_unlock;
+	}
 	if (!list->hiddev->open++)
 		if (list->hiddev->exist) {
 			struct hid_device *hid = hiddev->hid;
@@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	hid_hw_power(hid, PM_HINT_NORMAL);
 bail_unlock:
 	mutex_unlock(&hiddev->existancelock);
+
+	spin_lock_irq(&list->hiddev->list_lock);
+	list_del(&list->node);
+	spin_unlock_irq(&list->hiddev->list_lock);
 bail:
 	file->private_data = NULL;
 	vfree(list);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 9cd4705..3038c97 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -91,7 +91,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
 }
 
 static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
-		struct hid_report *report, u8 *raw_data, int size)
+		struct hid_report *report, u8 *raw_data, int report_size)
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -152,7 +152,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
 	if (flush)
 		wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
 	else if (insert)
-		wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+		wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+				       raw_data, report_size);
 
 	return insert && !flush;
 }
@@ -282,14 +283,16 @@ static void wacom_feature_mapping(struct hid_device *hdev,
 		/* leave touch_max as is if predefined */
 		if (!features->touch_max) {
 			/* read manually */
-			data = kzalloc(2, GFP_KERNEL);
+			n = hid_report_len(field->report);
+			data = hid_alloc_report_buf(field->report, GFP_KERNEL);
 			if (!data)
 				break;
 			data[0] = field->report->id;
 			ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
-						data, 2, WAC_CMD_RETRIES);
-			if (ret == 2) {
-				features->touch_max = data[1];
+					       data, n, WAC_CMD_RETRIES);
+			if (ret == n) {
+				ret = hid_report_raw_event(hdev,
+					HID_FEATURE_REPORT, data, n, 0);
 			} else {
 				features->touch_max = 16;
 				hid_warn(hdev, "wacom_feature_mapping: "
@@ -2145,7 +2148,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
 {
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
 	struct wacom_features *features = &wacom_wac->features;
-	char name[WACOM_NAME_MAX];
+	char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
 
 	/* Generic devices name unspecified */
 	if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index e56dc97..1df037e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -255,7 +255,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
 
 static int wacom_dtus_irq(struct wacom_wac *wacom)
 {
-	char *data = wacom->data;
+	unsigned char *data = wacom->data;
 	struct input_dev *input = wacom->pen_input;
 	unsigned short prox, pressure = 0;
 
@@ -576,7 +576,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
 		strip2 = ((data[3] & 0x1f) << 8) | data[4];
 	}
 
-	prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
+	prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
 	       (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
 
 	wacom_report_numbered_buttons(input, nbuttons, buttons);
@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
 		y >>= 1;
 		distance >>= 1;
 	}
+	if (features->type == INTUOSHT2)
+		distance = features->distance_max - distance;
 	input_report_abs(input, ABS_X, x);
 	input_report_abs(input, ABS_Y, y);
 	input_report_abs(input, ABS_DISTANCE, distance);
@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
 	input_report_key(input, BTN_BASE2, (data[11] & 0x02));
 
 	if (data[12] & 0x80)
-		input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+		input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
 	else
 		input_report_abs(input, ABS_WHEEL, 0);
 
@@ -2531,6 +2533,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
 	struct wacom *wacom = hid_get_drvdata(hdev);
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
 	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+	struct wacom_features *features = &wacom->wacom_wac.features;
 
 	switch (equivalent_usage) {
 	case HID_GD_X:
@@ -2551,6 +2554,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
 	case HID_DG_TIPSWITCH:
 		wacom_wac->hid_data.tipswitch = value;
 		break;
+	case HID_DG_CONTACTMAX:
+		features->touch_max = value;
+		return;
 	}
 
 
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2f164bd..fdb0f83 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -38,7 +38,7 @@
 
 static unsigned long virt_to_hvpfn(void *addr)
 {
-	unsigned long paddr;
+	phys_addr_t paddr;
 
 	if (is_vmalloc_addr(addr))
 		paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5eed1e7..d6106e1 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,7 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
 
 		out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
 
-	default:
+		/* fallthrough */
+
+	case KVP_OP_GET_IP_INFO:
 		utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
 				MAX_ADAPTER_ID_SIZE,
 				UTF16_LITTLE_ENDIAN,
@@ -406,6 +408,10 @@ kvp_send_key(struct work_struct *dummy)
 		process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
 		break;
 	case KVP_OP_GET_IP_INFO:
+		/*
+		 * We only need to pass on the info of operation, adapter_id
+		 * and addr_family to the userland kvp daemon.
+		 */
 		process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
 		break;
 	case KVP_OP_SET:
@@ -421,7 +427,7 @@ kvp_send_key(struct work_struct *dummy)
 				UTF16_LITTLE_ENDIAN,
 				message->body.kvp_set.data.value,
 				HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
-				break;
+			break;
 
 		case REG_U32:
 			/*
@@ -446,7 +452,10 @@ kvp_send_key(struct work_struct *dummy)
 			break;
 
 		}
-	case KVP_OP_GET:
+
+		/*
+		 * The key is always a string - utf16 encoding.
+		 */
 		message->body.kvp_set.data.key_size =
 			utf16s_to_utf8s(
 			(wchar_t *)in_msg->body.kvp_set.data.key,
@@ -454,7 +463,18 @@ kvp_send_key(struct work_struct *dummy)
 			UTF16_LITTLE_ENDIAN,
 			message->body.kvp_set.data.key,
 			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
-			break;
+
+		break;
+
+	case KVP_OP_GET:
+		message->body.kvp_get.data.key_size =
+			utf16s_to_utf8s(
+			(wchar_t *)in_msg->body.kvp_get.data.key,
+			in_msg->body.kvp_get.data.key_size,
+			UTF16_LITTLE_ENDIAN,
+			message->body.kvp_get.data.key,
+			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+		break;
 
 	case KVP_OP_DELETE:
 		message->body.kvp_delete.key_size =
@@ -464,12 +484,12 @@ kvp_send_key(struct work_struct *dummy)
 			UTF16_LITTLE_ENDIAN,
 			message->body.kvp_delete.key,
 			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
-			break;
+		break;
 
 	case KVP_OP_ENUMERATE:
 		message->body.kvp_enum_data.index =
 			in_msg->body.kvp_enum_data.index;
-			break;
+		break;
 	}
 
 	kvp_transaction.state = HVUTIL_USERSPACE_REQ;
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 34e45b9..2f2fb19 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -694,8 +694,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
 
 	if (resource->caps.flags & POWER_METER_CAN_CAP) {
 		if (!can_cap_in_hardware()) {
-			dev_err(&resource->acpi_dev->dev,
-				"Ignoring unsafe software power cap!\n");
+			dev_warn(&resource->acpi_dev->dev,
+				 "Ignoring unsafe software power cap!\n");
 			goto skip_unsafe_cap;
 		}
 
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 6216417..e0454e7 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/uaccess.h>
 #include <linux/usb/usb_qdss.h>
+#include <linux/time.h>
 
 #include "coresight-byte-cntr.h"
 #include "coresight-priv.h"
@@ -19,6 +20,7 @@
 #define USB_BLK_SIZE 65536
 #define USB_SG_NUM (USB_BLK_SIZE / PAGE_SIZE)
 #define USB_BUF_NUM 255
+#define USB_TIME_OUT (5 * HZ)
 
 static struct tmc_drvdata *tmcdrvdata;
 
@@ -239,7 +241,7 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
 
-	if (!byte_cntr_data->enable || !byte_cntr_data->block_size) {
+	if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
 		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 		return -EINVAL;
 	}
@@ -252,6 +254,7 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
 
 	fp->private_data = byte_cntr_data;
 	nonseekable_open(in, fp);
+	byte_cntr_data->enable = true;
 	byte_cntr_data->read_active = true;
 	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 	return 0;
@@ -314,91 +317,180 @@ static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
 	return ret;
 }
 
-static void usb_read_work_fn(struct work_struct *work)
+static int usb_transfer_small_packet(struct qdss_request *usb_req,
+			struct byte_cntr *drvdata, size_t *small_size)
 {
-	int ret, i, seq = 0;
-	struct qdss_request *usb_req = NULL;
+	int ret = 0;
 	struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
-	size_t actual, req_size;
-	char *buf;
-	struct byte_cntr *drvdata =
-		container_of(work, struct byte_cntr, read_work);
+	size_t req_size, actual;
+	long w_offset;
 
-	while (tmcdrvdata->enable
-		&& tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
-		if (!atomic_read(&drvdata->irq_cnt)) {
-			ret = wait_event_interruptible(drvdata->usb_wait_wq,
-				atomic_read(&drvdata->irq_cnt) > 0
-				|| !tmcdrvdata->enable || tmcdrvdata->out_mode
-				!= TMC_ETR_OUT_MODE_USB
-				|| !drvdata->read_active);
-			if (ret == -ERESTARTSYS || !tmcdrvdata->enable
-			|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB
-			|| !drvdata->read_active)
-				break;
-		}
+	w_offset = tmc_sg_get_rwp_offset(tmcdrvdata);
+	req_size = ((w_offset < drvdata->offset) ? etr_buf->size : 0) +
+				w_offset - drvdata->offset;
+	req_size = (req_size < USB_BLK_SIZE) ? req_size : USB_BLK_SIZE;
 
-		req_size = USB_BLK_SIZE;
-		seq++;
+	while (req_size > 0) {
+
 		usb_req = devm_kzalloc(tmcdrvdata->dev, sizeof(*usb_req),
-					GFP_KERNEL);
-		if (!usb_req)
-			return;
-		usb_req->sg = devm_kzalloc(tmcdrvdata->dev,
-			sizeof(*(usb_req->sg)) * USB_SG_NUM, GFP_KERNEL);
-		if (!usb_req->sg) {
-			devm_kfree(tmcdrvdata->dev, usb_req->sg);
-			return;
+			GFP_KERNEL);
+		if (!usb_req) {
+			ret = -EFAULT;
+			goto out;
 		}
-		usb_req->length = USB_BLK_SIZE;
+
+		actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
+					req_size, &usb_req->buf);
+		usb_req->length = actual;
 		drvdata->usb_req = usb_req;
-		for (i = 0; i < USB_SG_NUM; i++) {
-			actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
-					PAGE_SIZE, &buf);
-			if (actual <= 0) {
-				devm_kfree(tmcdrvdata->dev, usb_req->sg);
-				devm_kfree(tmcdrvdata->dev, usb_req);
-				usb_req = NULL;
-				dev_err(tmcdrvdata->dev, "No data in ETR\n");
-				return;
-			}
-			sg_set_buf(&usb_req->sg[i], buf, actual);
-			if (i == 0)
-				usb_req->buf = buf;
-			req_size -= actual;
-			if ((drvdata->offset + actual) >= tmcdrvdata->size)
-				drvdata->offset = 0;
-			else
-				drvdata->offset += actual;
-			if (i == USB_SG_NUM - 1)
-				sg_mark_end(&usb_req->sg[i]);
-		}
-		usb_req->num_sgs = i;
+		req_size -= actual;
+
+		if ((drvdata->offset + actual) >= tmcdrvdata->size)
+			drvdata->offset = 0;
+		else
+			drvdata->offset += actual;
+
+		*small_size += actual;
+
 		if (atomic_read(&drvdata->usb_free_buf) > 0) {
-			ret = usb_qdss_write(tmcdrvdata->usbch,
-					drvdata->usb_req);
+			ret = usb_qdss_write(tmcdrvdata->usbch, usb_req);
+
 			if (ret) {
-				devm_kfree(tmcdrvdata->dev, usb_req->sg);
 				devm_kfree(tmcdrvdata->dev, usb_req);
 				usb_req = NULL;
 				drvdata->usb_req = NULL;
 				dev_err(tmcdrvdata->dev,
 					"Write data failed:%d\n", ret);
-				if (ret == -EAGAIN)
-					continue;
-				return;
+				goto out;
 			}
-			atomic_dec(&drvdata->usb_free_buf);
 
+			atomic_dec(&drvdata->usb_free_buf);
 		} else {
 			dev_dbg(tmcdrvdata->dev,
-			"Drop data, offset = %d, seq = %d, irq = %d\n",
-				drvdata->offset, seq,
-				atomic_read(&drvdata->irq_cnt));
-			devm_kfree(tmcdrvdata->dev, usb_req->sg);
+			"Drop data, offset = %d, len = %d\n",
+				drvdata->offset, req_size);
 			devm_kfree(tmcdrvdata->dev, usb_req);
 			drvdata->usb_req = NULL;
 		}
+	}
+
+out:
+	return ret;
+}
+
+static void usb_read_work_fn(struct work_struct *work)
+{
+	int ret, i, seq = 0;
+	struct qdss_request *usb_req = NULL;
+	struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
+	size_t actual, req_size, req_sg_num, small_size = 0;
+	char *buf;
+	struct byte_cntr *drvdata =
+		container_of(work, struct byte_cntr, read_work);
+
+
+	while (tmcdrvdata->enable
+		&& tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+		if (!atomic_read(&drvdata->irq_cnt)) {
+			ret =  wait_event_interruptible_timeout(
+				drvdata->usb_wait_wq,
+				atomic_read(&drvdata->irq_cnt) > 0
+				|| !tmcdrvdata->enable || tmcdrvdata->out_mode
+				!= TMC_ETR_OUT_MODE_USB
+				|| !drvdata->read_active, USB_TIME_OUT);
+			if (ret == -ERESTARTSYS || !tmcdrvdata->enable
+			|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB
+			|| !drvdata->read_active)
+				break;
+
+			if (ret == 0) {
+				ret = usb_transfer_small_packet(usb_req,
+						drvdata, &small_size);
+				if (ret && ret != -EAGAIN)
+					return;
+				continue;
+			}
+		}
+
+		req_size = USB_BLK_SIZE - small_size;
+		small_size = 0;
+
+		if (req_size > 0) {
+			seq++;
+			req_sg_num = (req_size - 1) / PAGE_SIZE + 1;
+			usb_req = devm_kzalloc(tmcdrvdata->dev,
+						sizeof(*usb_req), GFP_KERNEL);
+			if (!usb_req)
+				return;
+			usb_req->sg = devm_kzalloc(tmcdrvdata->dev,
+					sizeof(*(usb_req->sg)) * req_sg_num,
+					GFP_KERNEL);
+			if (!usb_req->sg) {
+				devm_kfree(tmcdrvdata->dev, usb_req);
+				usb_req = NULL;
+				return;
+			}
+
+			for (i = 0; i < req_sg_num; i++) {
+				actual = tmc_etr_buf_get_data(etr_buf,
+							drvdata->offset,
+							PAGE_SIZE, &buf);
+
+				if (actual <= 0) {
+					devm_kfree(tmcdrvdata->dev,
+							usb_req->sg);
+					devm_kfree(tmcdrvdata->dev, usb_req);
+					usb_req = NULL;
+					dev_err(tmcdrvdata->dev, "No data in ETR\n");
+					return;
+				}
+
+				sg_set_buf(&usb_req->sg[i], buf, actual);
+
+				if (i == 0)
+					usb_req->buf = buf;
+				if (i == req_sg_num - 1)
+					sg_mark_end(&usb_req->sg[i]);
+
+				if ((drvdata->offset + actual) >=
+					tmcdrvdata->size)
+					drvdata->offset = 0;
+				else
+					drvdata->offset += actual;
+			}
+
+			usb_req->length = req_size;
+			drvdata->usb_req = usb_req;
+			usb_req->num_sgs = i;
+
+			if (atomic_read(&drvdata->usb_free_buf) > 0) {
+				ret = usb_qdss_write(tmcdrvdata->usbch,
+						drvdata->usb_req);
+				if (ret) {
+					devm_kfree(tmcdrvdata->dev,
+							usb_req->sg);
+					devm_kfree(tmcdrvdata->dev, usb_req);
+					usb_req = NULL;
+					drvdata->usb_req = NULL;
+					dev_err(tmcdrvdata->dev,
+						"Write data failed:%d\n", ret);
+					if (ret == -EAGAIN)
+						continue;
+					return;
+				}
+				atomic_dec(&drvdata->usb_free_buf);
+
+			} else {
+				dev_dbg(tmcdrvdata->dev,
+				"Drop data, offset = %d, seq = %d, irq = %d\n",
+					drvdata->offset, seq,
+					atomic_read(&drvdata->irq_cnt));
+				devm_kfree(tmcdrvdata->dev, usb_req->sg);
+				devm_kfree(tmcdrvdata->dev, usb_req);
+				drvdata->usb_req = NULL;
+			}
+		}
+
 		if (atomic_read(&drvdata->irq_cnt) > 0)
 			atomic_dec(&drvdata->irq_cnt);
 	}
@@ -411,7 +503,8 @@ static void usb_write_done(struct byte_cntr *drvdata,
 	atomic_inc(&drvdata->usb_free_buf);
 	if (d_req->status)
 		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
-	devm_kfree(tmcdrvdata->dev, d_req->sg);
+	if (d_req->sg)
+		devm_kfree(tmcdrvdata->dev, d_req->sg);
 	devm_kfree(tmcdrvdata->dev, d_req);
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-common.h b/drivers/hwtracing/coresight/coresight-common.h
index b49a588..b6db835 100644
--- a/drivers/hwtracing/coresight/coresight-common.h
+++ b/drivers/hwtracing/coresight/coresight-common.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CORESIGHT_COMMON_H
@@ -16,6 +16,7 @@ struct coresight_csr {
 
 #ifdef CONFIG_CORESIGHT_CSR
 extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
 extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 309e5a0..e0d023719 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2013, 2015-2-17 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2015-2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -66,12 +66,15 @@ do {									\
 #define BLKSIZE_1024		2
 #define BLKSIZE_2048		3
 
+#define FLUSHPERIOD_2048	0x800
+
 struct csr_drvdata {
 	void __iomem		*base;
 	phys_addr_t		pbase;
 	struct device		*dev;
 	struct coresight_device	*csdev;
 	uint32_t		blksize;
+	uint32_t		flushperiod;
 	struct coresight_csr		csr;
 	struct clk		*clk;
 	spinlock_t		spin_lock;
@@ -79,6 +82,7 @@ struct csr_drvdata {
 	bool			hwctrl_set_support;
 	bool			set_byte_cntr_support;
 	bool			timestamp_support;
+	bool			enable_flush;
 };
 
 static LIST_HEAD(csr_list);
@@ -86,10 +90,23 @@ static DEFINE_MUTEX(csr_lock);
 
 #define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
 
+static void msm_qdss_csr_config_flush_period(struct csr_drvdata *drvdata)
+{
+	uint32_t usbflshctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (drvdata->flushperiod << 2);
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+}
+
 void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
-	uint32_t usbbamctrl, usbflshctrl;
+	uint32_t usbbamctrl;
 	unsigned long flags;
 
 	if (csr == NULL)
@@ -106,12 +123,6 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 	usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
-	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
-	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-	usbflshctrl |= 0x2;
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-
 	usbbamctrl |= 0x4;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
@@ -120,6 +131,36 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 }
 EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
 
+void msm_qdss_csr_enable_flush(struct coresight_csr *csr)
+{
+	struct csr_drvdata *drvdata;
+	uint32_t usbflshctrl;
+	unsigned long flags;
+
+	if (csr == NULL)
+		return;
+
+	drvdata = to_csr_drvdata(csr);
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+		return;
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	msm_qdss_csr_config_flush_period(drvdata);
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl |= 0x2;
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+	drvdata->enable_flush = true;
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_flush);
+
+
 void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
@@ -166,6 +207,7 @@ void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
 	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
 
 	CSR_LOCK(drvdata);
+	drvdata->enable_flush = false;
 	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
 }
 EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
@@ -295,14 +337,66 @@ static ssize_t timestamp_show(struct device *dev,
 
 static DEVICE_ATTR_RO(timestamp);
 
+static ssize_t flushperiod_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", drvdata->flushperiod);
+}
+
+static ssize_t flushperiod_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t size)
+{
+	unsigned long flags;
+	unsigned long val;
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	if (kstrtoul(buf, 0, &val) || val > 0xffff) {
+		spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	if (drvdata->flushperiod == val)
+		goto out;
+
+	drvdata->flushperiod = val;
+
+	if (drvdata->enable_flush)
+		msm_qdss_csr_config_flush_period(drvdata);
+
+out:
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+	return size;
+}
+
+static DEVICE_ATTR_RW(flushperiod);
+
 static struct attribute *csr_attrs[] = {
 	&dev_attr_timestamp.attr,
+	&dev_attr_flushperiod.attr,
 	NULL,
 };
 
 static struct attribute_group csr_attr_grp = {
 	.attrs = csr_attrs,
 };
+
 static const struct attribute_group *csr_attr_grps[] = {
 	&csr_attr_grp,
 	NULL,
@@ -374,14 +468,16 @@ static int csr_probe(struct platform_device *pdev)
 	else
 		dev_dbg(dev, "timestamp_support operation supported\n");
 
+	if (drvdata->usb_bam_support)
+		drvdata->flushperiod = FLUSHPERIOD_2048;
+
 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 	desc->type = CORESIGHT_DEV_TYPE_NONE;
 	desc->pdata = pdev->dev.platform_data;
 	desc->dev = &pdev->dev;
-	if (drvdata->timestamp_support)
-		desc->groups = csr_attr_grps;
+	desc->groups = csr_attr_grps;
 
 	drvdata->csdev = coresight_register(desc);
 	if (IS_ERR(drvdata->csdev))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 0ba4013..9e894c9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -176,6 +176,12 @@ static void etm4_enable_hw(void *info)
 	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
 		dev_err(drvdata->dev,
 			"timeout while waiting for Idle Trace Status\n");
+	/*
+	 * As recommended by section 4.3.7 ("Synchronization when using the
+	 * memory-mapped interface") of ARM IHI 0064D
+	 */
+	dsb(sy);
+	isb();
 
 	CS_LOCK(drvdata->base);
 
@@ -328,8 +334,12 @@ static void etm4_disable_hw(void *info)
 	/* EN, bit[0] Trace unit enable bit */
 	control &= ~0x1;
 
-	/* make sure everything completes before disabling */
-	mb();
+	/*
+	 * Make sure everything completes before disabling, as recommended
+	 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+	 * SSTATUS") of ARM IHI 0064D
+	 */
+	dsb(sy);
 	isb();
 	writel_relaxed(control, drvdata->base + TRCPRGCTLR);
 
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index f3ffa86..e835cf6 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -557,7 +557,7 @@ static ssize_t traceid_show(struct device *dev,
 	struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 	val = drvdata->traceid;
-	return sprintf(buf, "%#lx\n", val);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
 static ssize_t traceid_store(struct device *dev,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2e6858f..81cecd5 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -215,6 +215,20 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
 	tmc_free_data_pages(sg_table);
 }
 
+long tmc_sg_get_rwp_offset(struct tmc_drvdata *drvdata)
+{
+	struct etr_buf *etr_buf = drvdata->etr_buf;
+	struct etr_sg_table *etr_table = etr_buf->private;
+	struct tmc_sg_table *table = etr_table->sg_table;
+	u64 rwp;
+	long w_offset;
+
+	rwp = tmc_read_rwp(drvdata);
+	w_offset = tmc_sg_get_data_page_offset(table, rwp);
+
+	return w_offset;
+}
+
 /*
  * Alloc pages for the table. Since this will be used by the device,
  * allocate the pages closer to the device (i.e, dev_to_node(dev)
@@ -1115,6 +1129,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
 
 	CS_LOCK(drvdata->base);
 
+	msm_qdss_csr_enable_flush(drvdata->csr);
 	drvdata->enable_to_bam = true;
 }
 
@@ -1216,7 +1231,7 @@ void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata)
 	tmc_wait_for_flush(drvdata);
 	tmc_disable_hw(drvdata);
 
-	CS_LOCK(drvdata);
+	CS_LOCK(drvdata->base);
 
 	/* Disable CSR configuration */
 	msm_qdss_csr_disable_bam_to_usb(drvdata->csr);
@@ -1322,7 +1337,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 	 * buffer, provided the size matches. Any allocation has to be done
 	 * with the lock released.
 	 */
-	mutex_lock(&drvdata->mem_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1339,7 +1353,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 			/* Allocate memory with the locks released */
 			free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
 			if (IS_ERR(new_buf)) {
-				mutex_unlock(&drvdata->mem_lock);
 				return -ENOMEM;
 			}
 			coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
@@ -1349,7 +1362,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 				free_buf = new_buf =
 				tmc_etr_setup_sysfs_buf(drvdata);
 				if (IS_ERR(new_buf)) {
-					mutex_unlock(&drvdata->mem_lock);
 					return -ENOMEM;
 				}
 			}
@@ -1361,7 +1373,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 						usb_bypass_notifier);
 			if (IS_ERR_OR_NULL(drvdata->usbch)) {
 				dev_err(drvdata->dev, "usb_qdss_open failed\n");
-				mutex_unlock(&drvdata->mem_lock);
 				return -ENODEV;
 			}
 		} else {
@@ -1369,7 +1380,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 							usb_notifier);
 			if (IS_ERR_OR_NULL(drvdata->usbch)) {
 				dev_err(drvdata->dev, "usb_qdss_open failed\n");
-				mutex_unlock(&drvdata->mem_lock);
 				return -ENODEV;
 			}
 		}
@@ -1417,7 +1427,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
 		tmc_etr_byte_cntr_start(drvdata->byte_cntr);
 
-	mutex_unlock(&drvdata->mem_lock);
 	if (!ret)
 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
 
@@ -1432,13 +1441,19 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
 
 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 {
+	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	int ret = 0;
+
 	switch (mode) {
 	case CS_MODE_SYSFS:
-		return tmc_enable_etr_sink_sysfs(csdev);
+		mutex_lock(&drvdata->mem_lock);
+		ret = tmc_enable_etr_sink_sysfs(csdev);
+		mutex_unlock(&drvdata->mem_lock);
+		return ret;
+
 	case CS_MODE_PERF:
 		return tmc_enable_etr_sink_perf(csdev);
 	}
-
 	/* We shouldn't be here */
 	return -EINVAL;
 }
@@ -1448,11 +1463,9 @@ static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	mutex_lock(&drvdata->mem_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		mutex_unlock(&drvdata->mem_lock);
 		return;
 	}
 
@@ -1503,31 +1516,40 @@ static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
 		}
 	}
 out:
-	mutex_unlock(&drvdata->mem_lock);
 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
 }
 
 static void tmc_disable_etr_sink(struct coresight_device *csdev)
 {
+	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	mutex_lock(&drvdata->mem_lock);
 	_tmc_disable_etr_sink(csdev, true);
+	mutex_unlock(&drvdata->mem_lock);
 }
 
 int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 {
 	enum tmc_etr_out_mode new_mode, old_mode;
 
+	mutex_lock(&drvdata->mem_lock);
 	if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM]))
 		new_mode = TMC_ETR_OUT_MODE_MEM;
 	else if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB]))
 		new_mode = TMC_ETR_OUT_MODE_USB;
-	else
+	else {
+		mutex_unlock(&drvdata->mem_lock);
 		return -EINVAL;
+	}
 
-	if (new_mode == drvdata->out_mode)
+	if (new_mode == drvdata->out_mode) {
+		mutex_unlock(&drvdata->mem_lock);
 		return 0;
+	}
 
 	if (drvdata->mode == CS_MODE_DISABLED) {
 		drvdata->out_mode = new_mode;
+		mutex_unlock(&drvdata->mem_lock);
 		return 0;
 	}
 
@@ -1540,8 +1562,11 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 		dev_err(drvdata->dev, "Switch to %s failed. Fall back to %s.\n",
 			str_tmc_etr_out_mode[new_mode],
 			str_tmc_etr_out_mode[old_mode]);
+		mutex_unlock(&drvdata->mem_lock);
 		return -EINVAL;
 	}
+
+	mutex_unlock(&drvdata->mem_lock);
 	return 0;
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index a30e360..22b9fbc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -325,7 +325,7 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
 ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
 				u64 offset, size_t len, char **bufpp);
 int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode);
-
+long tmc_sg_get_rwp_offset(struct tmc_drvdata *drvdata);
 
 #define TMC_REG_PAIR(name, lo_off, hi_off)				\
 static inline u64							\
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 5ee653f..e1f77f6 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -39,6 +39,7 @@ do {									\
 #define TPDA_FLUSH_CR		(0x090)
 #define TPDA_FLUSH_SR		(0x094)
 #define TPDA_FLUSH_ERR		(0x098)
+#define TPDA_SPARE		(0xefc)
 
 #define TPDA_MAX_INPORTS	32
 
@@ -225,6 +226,59 @@ static const struct coresight_ops tpda_cs_ops = {
 	.link_ops	= &tpda_link_ops,
 };
 
+static ssize_t legacy_ts_mode_enable_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDA_UNLOCK(drvdata);
+	val = tpda_readl(drvdata, TPDA_SPARE);
+	TPDA_LOCK(drvdata);
+
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t legacy_ts_mode_enable_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDA_UNLOCK(drvdata);
+		val = tpda_readl(drvdata, TPDA_SPARE);
+		val = val | BIT(0);
+		tpda_writel(drvdata, val, TPDA_SPARE);
+		TPDA_LOCK(drvdata);
+	}
+
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR_RW(legacy_ts_mode_enable);
+
 static ssize_t trig_async_enable_show(struct device *dev,
 					   struct device_attribute *attr,
 					   char *buf)
@@ -553,6 +607,7 @@ static struct attribute *tpda_attrs[] = {
 	&dev_attr_global_flush_req.attr,
 	&dev_attr_port_flush_req.attr,
 	&dev_attr_cmbchan_mode.attr,
+	&dev_attr_legacy_ts_mode_enable.attr,
 	NULL,
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 4da336f..8c928f2 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -4377,9 +4377,10 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
 		return PTR_ERR(drvdata->csdev);
 
 	ret = coresight_enable_reg_clk(drvdata->csdev);
-	if (ret)
+	if (ret) {
+		coresight_unregister(drvdata->csdev);
 		return ret;
-
+	}
 	version = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR2);
 	drvdata->version = BMVAL(version, 4, 7);
 
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index f9d8373..174ef74 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -268,7 +268,10 @@ static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
 
 	if (!csdev->enable) {
 		if (sink_ops(csdev)->enable) {
-			coresight_enable_reg_clk(csdev);
+			ret = coresight_enable_reg_clk(csdev);
+			if (ret)
+				return ret;
+
 			ret = sink_ops(csdev)->enable(csdev, mode);
 			if (ret) {
 				coresight_disable_reg_clk(csdev);
@@ -325,7 +328,10 @@ static int coresight_enable_link(struct coresight_device *csdev,
 
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
-			coresight_enable_reg_clk(csdev);
+			ret = coresight_enable_reg_clk(csdev);
+			if (ret)
+				return ret;
+
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
 			if (ret) {
 				coresight_disable_reg_clk(csdev);
@@ -395,7 +401,10 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 
 	if (!csdev->enable) {
 		if (source_ops(csdev)->enable) {
-			coresight_enable_reg_clk(csdev);
+			ret = coresight_enable_reg_clk(csdev);
+			if (ret)
+				return ret;
+
 			ret = source_ops(csdev)->enable(csdev, NULL, mode);
 			if (ret) {
 				coresight_disable_reg_clk(csdev);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index e759ac0..968319f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -141,6 +141,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 		.driver_data = (kernel_ulong_t)0,
 	},
 	{
+		/* Lewisburg PCH */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+		.driver_data = (kernel_ulong_t)0,
+	},
+	{
 		/* Gemini Lake */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
 		.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -175,6 +180,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
 		.driver_data = (kernel_ulong_t)&intel_th_2x,
 	},
+	{
+		/* Tiger Lake PCH */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+		.driver_data = (kernel_ulong_t)&intel_th_2x,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 5d40275..16eea6e 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1101,7 +1101,6 @@ int stm_source_register_device(struct device *parent,
 
 err:
 	put_device(&src->dev);
-	kfree(src);
 
 	return err;
 }
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 3f3e8b3..d51bf53 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -270,9 +270,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
 	writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
 
 	/* send stop when last byte has been written */
-	if (--dev->buf_len == 0)
+	if (--dev->buf_len == 0) {
 		if (!dev->use_alt_cmd)
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+		at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
+	}
 
 	dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
 
@@ -690,9 +692,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
 		} else {
 			at91_twi_write_next_byte(dev);
 			at91_twi_write(dev, AT91_TWI_IER,
-				       AT91_TWI_TXCOMP |
-				       AT91_TWI_NACK |
-				       AT91_TWI_TXRDY);
+				       AT91_TWI_TXCOMP | AT91_TWI_NACK |
+				       (dev->buf_len ? AT91_TWI_TXRDY : 0));
 		}
 	}
 
@@ -913,7 +914,7 @@ static struct at91_twi_pdata sama5d4_config = {
 
 static struct at91_twi_pdata sama5d2_config = {
 	.clk_max_div = 7,
-	.clk_offset = 4,
+	.clk_offset = 3,
 	.has_unre_flag = true,
 	.has_alt_cmd = true,
 	.has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index c4d176f..f890af6 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -187,6 +187,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
 	.smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
 };
 
+/*
+ * We are an i2c-adapter which itself is part of an i2c-client. This means that
+ * transfers done through us take adapter->bus_lock twice, once for our parent
+ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
+ * nested locking, to make lockdep happy in the case of busses with muxes, the
+ * i2c-core's i2c_adapter_lock_bus function calls:
+ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
+ *
+ * But i2c_adapter_depth only works when the direct parent of the adapter is
+ * another adapter, as it is only meant for muxes. In our case there is an
+ * i2c-client and MFD instantiated platform_device in the parent->child chain
+ * between the 2 devices.
+ *
+ * So we override the default i2c_lock_operations and pass a hardcoded
+ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
+ *
+ * Note that if there were to be a mux attached to our adapter, this would
+ * break things again since the i2c-mux code expects the root-adapter to have
+ * a locking depth of 0. But we always have only 1 client directly attached
+ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
+ */
+static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
+				 unsigned int flags)
+{
+	rt_mutex_lock_nested(&adapter->bus_lock, 1);
+}
+
+static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
+				   unsigned int flags)
+{
+	return rt_mutex_trylock(&adapter->bus_lock);
+}
+
+static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
+				   unsigned int flags)
+{
+	rt_mutex_unlock(&adapter->bus_lock);
+}
+
+static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
+	.lock_bus =    cht_wc_i2c_adap_lock_bus,
+	.trylock_bus = cht_wc_i2c_adap_trylock_bus,
+	.unlock_bus =  cht_wc_i2c_adap_unlock_bus,
+};
+
 /**** irqchip for the client connected to the extchgr i2c adapter ****/
 static void cht_wc_i2c_irq_lock(struct irq_data *data)
 {
@@ -295,6 +340,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
 	adap->adapter.owner = THIS_MODULE;
 	adap->adapter.class = I2C_CLASS_HWMON;
 	adap->adapter.algo = &cht_wc_i2c_adap_algo;
+	adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
 	strlcpy(adap->adapter.name, "PMIC I2C Adapter",
 		sizeof(adap->adapter.name));
 	adap->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305..f5f0017 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
 
 	dev->disable_int(dev);
 	dev->disable(dev);
+	synchronize_irq(dev->irq);
 	dev->slave = NULL;
 	pm_runtime_put(dev->dev);
 
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d..959d491 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
 	struct completion msg_done;
 	struct clk *sclk;
 	struct i2c_client *slave;
+	int irq;
 };
 
 static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
 
 	writeb(0, priv->base + I2C_OFS_SVA0);
 
+	/*
+	 * Wait for interrupt to finish. New slave irqs cannot happen because we
+	 * cleared the slave address and, thus, only extension codes will be
+	 * detected which do not use the slave ptr.
+	 */
+	synchronize_irq(priv->irq);
 	priv->slave = NULL;
 
 	return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
 {
 	struct em_i2c_device *priv;
 	struct resource *r;
-	int irq, ret;
+	int ret;
 
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
 
 	em_i2c_reset(&priv->adap);
 
-	irq = platform_get_irq(pdev, 0);
-	ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+	priv->irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
 				"em_i2c", priv);
 	if (ret)
 		goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_clk;
 
-	dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+	dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+		 priv->irq);
 
 	return 0;
 
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 90946a8..9ff3371 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -98,7 +98,7 @@
 #define SB800_PIIX4_PORT_IDX_MASK	0x06
 #define SB800_PIIX4_PORT_IDX_SHIFT	1
 
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
 #define SB800_PIIX4_PORT_IDX_KERNCZ		0x02
 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ	0x18
 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ	3
@@ -362,18 +362,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
 	/* Find which register is used for port selection */
 	if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-		switch (PIIX4_dev->device) {
-		case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+		if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+		    (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+		     PIIX4_dev->revision >= 0x1F)) {
 			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
 			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
 			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
-			break;
-		case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
-		default:
+		} else {
 			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
 			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
 			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
-			break;
 		}
 	} else {
 		if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index d449e60..3cd43f5 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -115,6 +115,7 @@ struct geni_i2c_dev {
 	struct msm_gpi_dma_async_tx_cb_param rx_cb;
 	enum i2c_se_mode se_mode;
 	bool cmd_done;
+	bool is_shared;
 };
 
 struct geni_i2c_err_log {
@@ -420,15 +421,6 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 	struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
 	int i, ret = 0, timeout = 0;
 
-	ret = pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
-				gi2c->i2c_rsc.geni_gpio_active);
-	if (ret) {
-		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
-			"%s: Error %d pinctrl_select_state active\n",
-			__func__, ret);
-		return ret;
-	}
-
 	if (!gi2c->tx_c) {
 		gi2c->tx_c = dma_request_slave_channel(gi2c->dev, "tx");
 		if (!gi2c->tx_c) {
@@ -626,9 +618,15 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "GSI Txn timed out: %u len: %d\n",
 					gi2c->xfer_timeout, gi2c->cur->len);
+			geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
+						gi2c->ipcl);
 			gi2c->err = -ETIMEDOUT;
 		}
 geni_i2c_err_prep_sg:
+		if (gi2c->err) {
+			dmaengine_terminate_all(gi2c->tx_c);
+			gi2c->cfg_sent = 0;
+		}
 		if (msgs[i].flags & I2C_M_RD)
 			geni_se_iommu_unmap_buf(rx_dev, &gi2c->rx_ph,
 				msgs[i].len, DMA_FROM_DEVICE);
@@ -636,19 +634,13 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			geni_se_iommu_unmap_buf(tx_dev, &gi2c->tx_ph,
 				msgs[i].len, DMA_TO_DEVICE);
 		i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
-
-		if (gi2c->err) {
-			dmaengine_terminate_all(gi2c->tx_c);
-			gi2c->cfg_sent = 0;
+		if (gi2c->err)
 			goto geni_i2c_gsi_xfer_out;
-		}
 	}
 
 geni_i2c_gsi_xfer_out:
 	if (!ret && gi2c->err)
 		ret = gi2c->err;
-	pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
-				gi2c->i2c_rsc.geni_gpio_sleep);
 	return ret;
 }
 
@@ -907,6 +899,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,shared")) {
+		gi2c->is_shared = true;
+		dev_info(&pdev->dev, "Multi-EE usecase\n");
+	}
+
 	if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
 				&gi2c->i2c_rsc.clk_freq_out)) {
 		dev_info(&pdev->dev,
@@ -984,12 +981,14 @@ static int geni_i2c_runtime_suspend(struct device *dev)
 {
 	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
-	if (gi2c->se_mode == FIFO_SE_DMA) {
+	if (gi2c->se_mode == FIFO_SE_DMA)
 		disable_irq(gi2c->irq);
-		se_geni_resources_off(&gi2c->i2c_rsc);
-	} else {
-		/* GPIO is set to sleep state already. So just clocks off */
+
+	if (gi2c->is_shared) {
+		/* Do not unconfigure GPIOs if shared se */
 		se_geni_clks_off(&gi2c->i2c_rsc);
+	} else {
+		se_geni_resources_off(&gi2c->i2c_rsc);
 	}
 	return 0;
 }
@@ -1006,10 +1005,7 @@ static int geni_i2c_runtime_resume(struct device *dev)
 		gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
 	}
 
-	if (gi2c->se_mode != GSI_ONLY)
-		ret = se_geni_resources_on(&gi2c->i2c_rsc);
-	else
-		ret = se_geni_clks_on(&gi2c->i2c_rsc);
+	ret = se_geni_resources_on(&gi2c->i2c_rsc);
 
 	if (ret)
 		return ret;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 254e621..2c29f90 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
 	enum dma_data_direction dma_direction;
 
 	struct reset_control *rstc;
+	int irq;
 };
 
 #define rcar_i2c_priv_to_dev(p)		((p)->adap.dev.parent)
@@ -859,9 +860,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
 
 	WARN_ON(!priv->slave);
 
+	/* disable irqs and ensure none is running before clearing ptr */
 	rcar_i2c_write(priv, ICSIER, 0);
 	rcar_i2c_write(priv, ICSCR, 0);
 
+	synchronize_irq(priv->irq);
 	priv->slave = NULL;
 
 	pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -916,7 +919,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
 	struct i2c_adapter *adap;
 	struct device *dev = &pdev->dev;
 	struct i2c_timings i2c_t;
-	int irq, ret;
+	int ret;
 
 	priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
 	if (!priv)
@@ -979,10 +982,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
 		pm_runtime_put(dev);
 
 
-	irq = platform_get_irq(pdev, 0);
-	ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+	priv->irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
 	if (ret < 0) {
-		dev_err(dev, "cannot get irq %d\n", irq);
+		dev_err(dev, "cannot get irq %d\n", priv->irq);
 		goto out_pm_disable;
 	}
 
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index b75ff14..e6f351c 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -203,6 +203,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
 	if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
 		/* We got a NACKIE */
 		readb(riic->base + RIIC_ICDRR);	/* dummy read */
+		riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
 		riic->err = -ENXIO;
 	} else if (riic->bytes_left) {
 		return IRQ_NONE;
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index b88425f..7d3e7c5 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -199,12 +199,18 @@ enum geni_i3c_err_code {
 #define TLMM_I3C_MODE	0x24
 #define IBI_SW_RESET_MIN_SLEEP 1000
 #define IBI_SW_RESET_MAX_SLEEP 2000
+#define I3C_OD_CLK_RATE 370000
 
 enum i3c_trans_dir {
 	WRITE_TRANSACTION = 0,
 	READ_TRANSACTION = 1
 };
 
+enum i3c_bus_phase {
+	OPEN_DRAIN_MODE  = 0,
+	PUSH_PULL_MODE   = 1
+};
+
 struct geni_se {
 	void __iomem *base;
 	void __iomem *ibi_base;
@@ -262,6 +268,7 @@ struct geni_i3c_dev {
 	int cur_idx;
 	unsigned long newaddrslots[(I3C_ADDR_MASK + 1) / BITS_PER_LONG];
 	const struct geni_i3c_clk_fld *clk_fld;
+	const struct geni_i3c_clk_fld *clk_od_fld;
 	struct geni_ibi ibi;
 };
 
@@ -302,9 +309,9 @@ struct geni_i3c_clk_fld {
 	u8  clk_div;
 	u8  i2c_t_high_cnt;
 	u8  i2c_t_low_cnt;
-	u8  i2c_t_cycle_cnt;
 	u8  i3c_t_high_cnt;
 	u8  i3c_t_cycle_cnt;
+	u32 i2c_t_cycle_cnt;
 };
 
 static struct geni_i3c_dev*
@@ -332,11 +339,12 @@ to_geni_i3c_master(struct i3c_master_controller *master)
  * clk_freq_out = t / t_cycle
  */
 static const struct geni_i3c_clk_fld geni_i3c_clk_map[] = {
-	{ KHZ(100),    19200, 7, 10, 11, 26, 0, 0 },
-	{ KHZ(400),    19200, 2,  5, 12, 24, 0, 0 },
-	{ KHZ(1000),   19200, 1,  3,  9, 18, 7, 0 },
-	{ KHZ(1920),   19200, 1,  4,  9, 19, 7, 8 },
-	{ KHZ(12500), 100000, 1, 60, 140, 250, 8, 16 },
+	{ KHZ(100),    19200,  7, 10, 11,  0,  0,  26},
+	{ KHZ(400),    19200,  2,  5, 12,  0,  0,  24},
+	{ KHZ(1000),   19200,  1,  3,  9,  7,  0,  18},
+	{ KHZ(1920),   19200,  1,  4,  9,  7,  8,  19},
+	{ KHZ(370),   100000, 20,  4,  7,  8, 14,  14},
+	{ KHZ(12500), 100000,  1, 72, 168, 6,  7, 300},
 };
 
 static int geni_i3c_clk_map_idx(struct geni_i3c_dev *gi3c)
@@ -351,11 +359,19 @@ static int geni_i3c_clk_map_idx(struct geni_i3c_dev *gi3c)
 			 itr->clk_freq_out == bus->scl_rate.i3c) &&
 			 KHZ(itr->clk_src_freq) == gi3c->clk_src_freq) {
 			gi3c->clk_fld = itr;
-			return 0;
 		}
+
+		if (itr->clk_freq_out == I3C_OD_CLK_RATE)
+			gi3c->clk_od_fld = itr;
 	}
 
-	return -EINVAL;
+	if ((!gi3c->clk_fld) || (!gi3c->clk_od_fld)) {
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+			"%s : clk mapping failed", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
 static void set_new_addr_slot(unsigned long *addrslot, u8 addr)
@@ -391,13 +407,17 @@ static bool is_new_addr_slot_set(unsigned long *addrslot, u8 addr)
 	return ((*ptr & (1 << (addr % BITS_PER_LONG))) != 0);
 }
 
-static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c)
+static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c,
+	enum i3c_bus_phase bus_phase)
 {
 	const struct geni_i3c_clk_fld *itr = gi3c->clk_fld;
 	u32 val;
 	unsigned long freq;
 	int ret = 0;
 
+	if (bus_phase == OPEN_DRAIN_MODE)
+		itr = gi3c->clk_od_fld;
+
 	if (gi3c->dfs_idx > DFS_INDEX_MAX)
 		ret = geni_se_clk_freq_match(&gi3c->se.i3c_rsc,
 				KHZ(itr->clk_src_freq),
@@ -657,6 +677,10 @@ static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
 	mutex_lock(&gi3c->lock);
 
 	reinit_completion(&gi3c->done);
+	if (!pm_runtime_enabled(gi3c->se.dev))
+		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
+				"PM runtime disabled\n");
+
 	ret = pm_runtime_get_sync(gi3c->se.dev);
 	if (ret < 0) {
 		GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
@@ -669,8 +693,6 @@ static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
 		return ret;
 	}
 
-	qcom_geni_i3c_conf(gi3c);
-
 	return 0; /* return 0 to indicate SUCCESS */
 }
 
@@ -919,6 +941,8 @@ static int geni_i3c_master_send_ccc_cmd
 	if (ret)
 		return ret;
 
+	qcom_geni_i3c_conf(gi3c, OPEN_DRAIN_MODE);
+
 	for (i = 0; i < cmd->ndests; i++) {
 		int stall = (i < (cmd->ndests - 1)) ||
 			(cmd->id == I3C_CCC_ENTDAA);
@@ -996,6 +1020,8 @@ static int geni_i3c_master_priv_xfers
 	if (ret)
 		return ret;
 
+	qcom_geni_i3c_conf(gi3c, PUSH_PULL_MODE);
+
 	for (i = 0; i < nxfers; i++) {
 		bool stall = (i < (nxfers - 1));
 		struct i3c_xfer_params xfer = { FIFO_MODE };
@@ -1047,6 +1073,8 @@ static int geni_i3c_master_i2c_xfers
 	if (ret)
 		return ret;
 
+	qcom_geni_i3c_conf(gi3c, PUSH_PULL_MODE);
+
 	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
 		"i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
 		num, msgs[0].len, msgs[0].flags);
@@ -1189,7 +1217,7 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
 		goto err_cleanup;
 	}
 
-	qcom_geni_i3c_conf(gi3c);
+	qcom_geni_i3c_conf(gi3c, OPEN_DRAIN_MODE);
 
 	/* Get an address for the master. */
 	ret = i3c_master_get_free_addr(m, 0);
@@ -1204,8 +1232,16 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
 	ret = i3c_master_set_info(&gi3c->ctrlr, &info);
 
 err_cleanup:
-	pm_runtime_mark_last_busy(gi3c->se.dev);
-	pm_runtime_put_autosuspend(gi3c->se.dev);
+	/*As framework calls multiple exposed API's after this API, we cannot
+	 *use mutex protected internal put/get sync API. Hence forcefully
+	 *disabling clocks and decrementing usage count.
+	 */
+	disable_irq(gi3c->irq);
+	se_geni_resources_off(&gi3c->se.i3c_rsc);
+	pm_runtime_disable(gi3c->se.dev);
+	pm_runtime_put_noidle(gi3c->se.dev);
+	pm_runtime_set_suspended(gi3c->se.dev);
+	pm_runtime_enable(gi3c->se.dev);
 
 	return ret;
 }
@@ -1476,19 +1512,14 @@ static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
 	/* check if any IBI is enabled, if not then reset HW */
 	val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN);
 	if (!val) {
-		u32 wait = 100;
 
 		gi3c->ibi.err = 0;
 		reinit_completion(&gi3c->ibi.done);
 
 		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG);
-		val |= ~IBI_C_ENABLE;
+		val &= ~IBI_C_ENABLE;
 		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG);
 
-		/* enable ENABLE_CHANGE */
-		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-		val |= ENABLE_CHANGE_IRQ_EN;
-		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
 
 		/* wait for ENABLE change */
 		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
@@ -1506,54 +1537,9 @@ static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
 			return;
 		}
 
-		/* IBI_C reset */
-		geni_write_reg(1, gi3c->se.ibi_base, IBI_SW_RESET);
-		/*
-		 * wait for SW_RESET to be taken care by HW. Post reset it
-		 * will get cleared by HW
-		 */
-		while (wait--) {
-			if (geni_read_reg(gi3c->se.ibi_base, IBI_SW_RESET) != 0)
-				break;
-			usleep_range(IBI_SW_RESET_MIN_SLEEP,
-				IBI_SW_RESET_MAX_SLEEP);
-		}
-
-		if (!wait)
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"IBI controller reset failed\n");
-
-		gi3c->ibi.err = 0;
-		reinit_completion(&gi3c->ibi.done);
-
-		/* enable ENABLE_CHANGE */
-		val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-		val |= SW_RESET_DONE_EN;
-		geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
-
-		/* wait for SW_RESET_DONE */
-		timeout = wait_for_completion_timeout(&gi3c->ibi.done,
-				XFER_TIMEOUT);
-		if (!timeout) {
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"timeout while resetting  IBI controller\n");
-			return;
-		}
-
-		if (gi3c->ibi.err) {
-			GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev,
-				"error while resetting IBI controller 0x%x\n",
-				gi3c->ibi.err);
-			return;
-		}
-
-		/* disable IBI interrupts */
-		geni_write_reg(0, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
 	}
 
 	gi3c->ibi.is_init = false;
-	disable_irq(gi3c->ibi.mngr_irq);
-	disable_irq(gi3c->ibi.gpii_irq[0]);
 }
 
 static void geni_i3c_master_free_ibi(struct i3c_dev_desc *dev)
@@ -1787,6 +1773,15 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c,
 		return ret;
 	}
 
+	/* set mngr irq as wake-up irq */
+	ret = irq_set_irq_wake(gi3c->ibi.mngr_irq, 1);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Failed to set mngr IRQ(%d) wake: err:%d\n",
+			gi3c->ibi.mngr_irq, ret);
+		return ret;
+	}
+
 	/* Register GPII interrupt */
 	gi3c->ibi.gpii_irq[0] = platform_get_irq(pdev, 2);
 	if (gi3c->ibi.gpii_irq[0] < 0) {
@@ -1805,6 +1800,15 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c,
 		return ret;
 	}
 
+	/* set gpii irq as wake-up irq */
+	ret = irq_set_irq_wake(gi3c->ibi.gpii_irq[0], 1);
+	if (ret) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Failed to set gpii IRQ(%d) wake: err:%d\n",
+			gi3c->ibi.gpii_irq[0], ret);
+		return ret;
+	}
+
 	qcom_geni_i3c_ibi_conf(gi3c);
 
 	return 0;
@@ -1919,6 +1923,11 @@ static int geni_i3c_remove(struct platform_device *pdev)
 	return ret;
 }
 
+static int geni_i3c_resume_noirq(struct device *dev)
+{
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static int geni_i3c_runtime_suspend(struct device *dev)
 {
@@ -1943,6 +1952,18 @@ static int geni_i3c_runtime_resume(struct device *dev)
 	/* Enable TLMM I3C MODE registers */
 	return 0;
 }
+
+static int geni_i3c_suspend_noirq(struct device *dev)
+{
+	if (!pm_runtime_status_suspended(dev)) {
+		geni_i3c_runtime_suspend(dev);
+		pm_runtime_disable(dev);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_enable(dev);
+	}
+	return 0;
+}
 #else
 static int geni_i3c_runtime_suspend(struct device *dev)
 {
@@ -1953,9 +1974,16 @@ static int geni_i3c_runtime_resume(struct device *dev)
 {
 	return 0;
 }
+
+static int geni_i3c_suspend_noirq(struct device *dev)
+{
+	return 0;
+}
 #endif
 
 static const struct dev_pm_ops geni_i3c_pm_ops = {
+	.suspend_noirq = geni_i3c_suspend_noirq,
+	.resume_noirq = geni_i3c_resume_noirq,
 	.runtime_suspend = geni_i3c_runtime_suspend,
 	.runtime_resume  = geni_i3c_runtime_resume,
 };
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e1da67d..9e61720 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -814,10 +814,10 @@ static int ad799x_probe(struct i2c_client *client,
 
 	ret = ad799x_write_config(st, st->chip_config->default_config);
 	if (ret < 0)
-		goto error_disable_reg;
+		goto error_disable_vref;
 	ret = ad799x_read_config(st);
 	if (ret < 0)
-		goto error_disable_reg;
+		goto error_disable_vref;
 	st->config = ret;
 
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 4e339cf..e6ce25b 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -16,6 +16,7 @@
  *
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
@@ -34,6 +35,11 @@
 #define AXP288_ADC_EN_MASK				0xF0
 #define AXP288_ADC_TS_ENABLE				0x01
 
+#define AXP288_ADC_TS_BIAS_MASK				GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA				(0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA				(1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA				(2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA				(3 << 4)
 #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK		GENMASK(1, 0)
 #define AXP288_ADC_TS_CURRENT_OFF			(0 << 0)
 #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING		(1 << 0)
@@ -186,10 +192,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
 	return ret;
 }
 
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+	{
+		/* Lenovo Ideapad 100S (11 inch) */
+		.matches = {
+		  DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		  DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+		},
+		.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+	},
+	{}
+};
+
 static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
+	const struct dmi_system_id *bias_override;
 	int ret, adc_enable_val;
 
+	bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+	if (bias_override) {
+		ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_BIAS_MASK,
+					 (uintptr_t)bias_override->driver_data);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * Determine if the TS pin is enabled and set the TS current-source
 	 * accordingly.
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 4be29ed..1ca2c4d 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -115,6 +115,8 @@
 #define MAX_ADC_V2_CHANNELS		10
 #define MAX_ADC_V1_CHANNELS		8
 #define MAX_EXYNOS3250_ADC_CHANNELS	2
+#define MAX_EXYNOS4212_ADC_CHANNELS	4
+#define MAX_S5PV210_ADC_CHANNELS	10
 
 /* Bit definitions common for ADC_V1 and ADC_V2 */
 #define ADC_CON_EN_START	(1u << 0)
@@ -270,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
 	writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
 }
 
+/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
+static const struct exynos_adc_data exynos4212_adc_data = {
+	.num_channels	= MAX_EXYNOS4212_ADC_CHANNELS,
+	.mask		= ADC_DATX_MASK,	/* 12 bit ADC resolution */
+	.needs_adc_phy	= true,
+	.phy_offset	= EXYNOS_ADCV1_PHY_OFFSET,
+
+	.init_hw	= exynos_adc_v1_init_hw,
+	.exit_hw	= exynos_adc_v1_exit_hw,
+	.clear_irq	= exynos_adc_v1_clear_irq,
+	.start_conv	= exynos_adc_v1_start_conv,
+};
+
 static const struct exynos_adc_data exynos_adc_v1_data = {
 	.num_channels	= MAX_ADC_V1_CHANNELS,
 	.mask		= ADC_DATX_MASK,	/* 12 bit ADC resolution */
@@ -282,6 +297,16 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
 	.start_conv	= exynos_adc_v1_start_conv,
 };
 
+static const struct exynos_adc_data exynos_adc_s5pv210_data = {
+	.num_channels	= MAX_S5PV210_ADC_CHANNELS,
+	.mask		= ADC_DATX_MASK,	/* 12 bit ADC resolution */
+
+	.init_hw	= exynos_adc_v1_init_hw,
+	.exit_hw	= exynos_adc_v1_exit_hw,
+	.clear_irq	= exynos_adc_v1_clear_irq,
+	.start_conv	= exynos_adc_v1_start_conv,
+};
+
 static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
 					  unsigned long addr)
 {
@@ -479,6 +504,12 @@ static const struct of_device_id exynos_adc_match[] = {
 		.compatible = "samsung,s3c6410-adc",
 		.data = &exynos_adc_s3c64xx_data,
 	}, {
+		.compatible = "samsung,s5pv210-adc",
+		.data = &exynos_adc_s5pv210_data,
+	}, {
+		.compatible = "samsung,exynos4212-adc",
+		.data = &exynos4212_adc_data,
+	}, {
 		.compatible = "samsung,exynos-adc-v1",
 		.data = &exynos_adc_v1_data,
 	}, {
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 36b59d8..6c5d81a 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -109,14 +109,14 @@ struct hx711_data {
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 {
-	int val;
+	unsigned long flags;
 
 	/*
 	 * if preempted for more then 60us while PD_SCK is high:
 	 * hx711 is going in reset
 	 * ==> measuring is false
 	 */
-	preempt_disable();
+	local_irq_save(flags);
 	gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
 
 	/*
@@ -126,7 +126,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 */
 	ndelay(hx711_data->data_ready_delay_ns);
 
-	val = gpiod_get_value(hx711_data->gpiod_dout);
 	/*
 	 * here we are not waiting for 0.2 us as suggested by the datasheet,
 	 * because the oscilloscope showed in a test scenario
@@ -134,7 +133,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
 	 */
 	gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
-	preempt_enable();
+	local_irq_restore(flags);
 
 	/*
 	 * make it a square wave for addressing cases with capacitance on
@@ -142,7 +141,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
 	 */
 	ndelay(hx711_data->data_ready_delay_ns);
 
-	return val;
+	/* sample as late as possible */
+	return gpiod_get_value(hx711_data->gpiod_dout);
 }
 
 static int hx711_read(struct hx711_data *hx711_data)
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index ce9af43..49c1956 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
 	if (ret)
 		return ret;
 
-	regval = ret & MAX9611_TEMP_MASK;
+	regval &= MAX9611_TEMP_MASK;
 
 	if ((regval > MAX9611_TEMP_MAX_POS &&
 	     regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 42f41f8..283b968 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -849,6 +849,11 @@ static int adc_get_dt_channel_data(struct device *dev,
 		prop->avg_samples = VADC_DEF_AVG_SAMPLES;
 	}
 
+	prop->scale_fn_type = -EINVAL;
+	ret = of_property_read_u32(node, "qcom,scale-fn-type", &value);
+	if (!ret && value < SCALE_HW_CALIB_MAX)
+		prop->scale_fn_type = value;
+
 	prop->lut_index = VADC_DEF_LUT_INDEX;
 
 	ret = of_property_read_u32(node, "qcom,lut-index", &value);
@@ -942,8 +947,10 @@ static int adc_get_dt_data(struct adc_chip *adc, struct device_node *node)
 			return ret;
 		}
 
-		prop.scale_fn_type =
-			data->adc_chans[prop.channel].scale_fn_type;
+		if (prop.scale_fn_type == -EINVAL)
+			prop.scale_fn_type =
+				data->adc_chans[prop.channel].scale_fn_type;
+
 		adc->chan_props[index] = prop;
 
 		adc_chan = &data->adc_chans[prop.channel];
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index ebcfbde..114dab7 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -925,6 +925,35 @@ static int qcom_vadc_scale_hw_smb_temp(
 	return 0;
 }
 
+static int qcom_vadc_scale_hw_smb1398_temp(
+				const struct vadc_prescale_ratio *prescale,
+				const struct adc_data *data,
+				u16 adc_code, int *result_mdec)
+{
+	s64 voltage = 0, adc_vdd_ref_mv = 1875;
+	u64 temp;
+
+	if (adc_code > VADC5_MAX_CODE)
+		adc_code = 0;
+
+	/* (ADC code * vref_vadc (1.875V)) / full_scale_code */
+	voltage = (s64) adc_code * adc_vdd_ref_mv * 1000;
+	voltage = div64_s64(voltage, data->full_scale_code_volt);
+	if (voltage > 0) {
+		temp = voltage * prescale->den;
+		temp *= 100;
+		do_div(temp, prescale->num * PMIC5_SMB1398_TEMP_SCALE_FACTOR);
+		voltage = temp;
+	} else {
+		voltage = 0;
+	}
+
+	voltage = voltage - PMIC5_SMB1398_TEMP_CONSTANT;
+	*result_mdec = voltage;
+
+	return 0;
+}
+
 static int qcom_vadc_scale_hw_chg5_temp(
 				const struct vadc_prescale_ratio *prescale,
 				const struct adc_data *data,
@@ -1043,6 +1072,9 @@ int qcom_vadc_hw_scale(enum vadc_scale_fn_type scaletype,
 	case SCALE_HW_CALIB_PM5_SMB_TEMP:
 		return qcom_vadc_scale_hw_smb_temp(prescale, data,
 						adc_code, result);
+	case SCALE_HW_CALIB_PM5_SMB1398_TEMP:
+		return qcom_vadc_scale_hw_smb1398_temp(prescale, data,
+						adc_code, result);
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index ccd3751..e77c92f 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -44,6 +44,8 @@
 #define PMIC5_CHG_TEMP_SCALE_FACTOR		377500
 #define PMIC5_SMB_TEMP_CONSTANT			419400
 #define PMIC5_SMB_TEMP_SCALE_FACTOR		356
+#define PMIC5_SMB1398_TEMP_SCALE_FACTOR	340
+#define PMIC5_SMB1398_TEMP_CONSTANT		268235
 
 #define PMI_CHG_SCALE_1				-138890
 #define PMI_CHG_SCALE_2				391750000000LL
@@ -175,6 +177,8 @@ enum vadc_scale_fn_type {
 	SCALE_HW_CALIB_BATT_THERM_100K,
 	SCALE_HW_CALIB_BATT_THERM_30K,
 	SCALE_HW_CALIB_BATT_THERM_400K,
+	SCALE_HW_CALIB_PM5_SMB1398_TEMP,
+	SCALE_HW_CALIB_MAX,
 };
 
 struct adc_data {
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index dcb5017..f3a966a 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
 				dev_err(dev,
 					"Only %i channels supported with %s, but reg = <%i>.\n",
 					num_channels, child->name, reg);
-				return ret;
+				return -EINVAL;
 			}
 		}
 
@@ -400,7 +400,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
 			dev_err(dev,
 				"Channel %i uses different ADC mode than the rest.\n",
 				reg);
-			return ret;
+			return -EINVAL;
 		}
 
 		/* Channel is valid, grab the regulator. */
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index ca432e7..38eb966 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -21,45 +21,22 @@
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3			BIT(17)
-#define STM32F4_EOC2			BIT(9)
-#define STM32F4_EOC1			BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT	16
-#define STM32F4_ADC_ADCPRE_MASK		GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV			BIT(18)
-#define STM32H7_EOC_MST			BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT		18
-#define STM32H7_PRESC_MASK		GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT		16
-#define STM32H7_CKMODE_MASK		GENMASK(17, 16)
-
 /**
  * stm32_adc_common_regs - stm32 common registers, compatible dependent data
  * @csr:	common status register offset
  * @eoc1:	adc1 end of conversion flag in @csr
  * @eoc2:	adc2 end of conversion flag in @csr
  * @eoc3:	adc3 end of conversion flag in @csr
+ * @ier:	interrupt enable register offset for each adc
+ * @eocie_msk:	end of conversion interrupt enable mask in @ier
  */
 struct stm32_adc_common_regs {
 	u32 csr;
 	u32 eoc1_msk;
 	u32 eoc2_msk;
 	u32 eoc3_msk;
+	u32 ier;
+	u32 eocie_msk;
 };
 
 struct stm32_adc_priv;
@@ -268,6 +245,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
 	.eoc1_msk = STM32F4_EOC1,
 	.eoc2_msk = STM32F4_EOC2,
 	.eoc3_msk = STM32F4_EOC3,
+	.ier = STM32F4_ADC_CR1,
+	.eocie_msk = STM32F4_EOCIE,
 };
 
 /* STM32H7 common registers definitions */
@@ -275,8 +254,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
 	.csr = STM32H7_ADC_CSR,
 	.eoc1_msk = STM32H7_EOC_MST,
 	.eoc2_msk = STM32H7_EOC_SLV,
+	.ier = STM32H7_ADC_IER,
+	.eocie_msk = STM32H7_EOCIE,
 };
 
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+	0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
+};
+
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+					  unsigned int adc)
+{
+	u32 ier, offset = stm32_adc_offset[adc];
+
+	ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+	return ier & priv->cfg->regs->eocie_msk;
+}
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -287,13 +282,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
 	chained_irq_enter(chip, desc);
 	status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-	if (status & priv->cfg->regs->eoc1_msk)
+	/*
+	 * End of conversion may be handled by using IRQ or DMA. There may be a
+	 * race here when two conversions complete at the same time on several
+	 * ADCs. EOC may be read 'set' for several ADCs, with:
+	 * - an ADC configured to use DMA (EOC triggers the DMA request, and
+	 *   is then automatically cleared by DR read in hardware)
+	 * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+	 *   be called in this case)
+	 * So both EOC status bit in CSR and EOCIE control bit must be checked
+	 * before invoking the interrupt handler (e.g. call ISR only for
+	 * IRQ-enabled ADCs).
+	 */
+	if (status & priv->cfg->regs->eoc1_msk &&
+	    stm32_adc_eoc_enabled(priv, 0))
 		generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-	if (status & priv->cfg->regs->eoc2_msk)
+	if (status & priv->cfg->regs->eoc2_msk &&
+	    stm32_adc_eoc_enabled(priv, 1))
 		generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-	if (status & priv->cfg->regs->eoc3_msk)
+	if (status & priv->cfg->regs->eoc3_msk &&
+	    stm32_adc_eoc_enabled(priv, 2))
 		generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
 	chained_irq_exit(chip, desc);
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 8af507b..2579d51 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -25,8 +25,145 @@
  * --------------------------------------------------------
  */
 #define STM32_ADC_MAX_ADCS		3
+#define STM32_ADC_OFFSET		0x100
 #define STM32_ADCX_COMN_OFFSET		0x300
 
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR			0x00
+#define STM32F4_ADC_CR1			0x04
+#define STM32F4_ADC_CR2			0x08
+#define STM32F4_ADC_SMPR1		0x0C
+#define STM32F4_ADC_SMPR2		0x10
+#define STM32F4_ADC_HTR			0x24
+#define STM32F4_ADC_LTR			0x28
+#define STM32F4_ADC_SQR1		0x2C
+#define STM32F4_ADC_SQR2		0x30
+#define STM32F4_ADC_SQR3		0x34
+#define STM32F4_ADC_JSQR		0x38
+#define STM32F4_ADC_JDR1		0x3C
+#define STM32F4_ADC_JDR2		0x40
+#define STM32F4_ADC_JDR3		0x44
+#define STM32F4_ADC_JDR4		0x48
+#define STM32F4_ADC_DR			0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT			BIT(4)
+#define STM32F4_EOC			BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT		24
+#define STM32F4_RES_MASK		GENMASK(25, 24)
+#define STM32F4_SCAN			BIT(8)
+#define STM32F4_EOCIE			BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART			BIT(30)
+#define STM32F4_EXTEN_SHIFT		28
+#define STM32F4_EXTEN_MASK		GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT		24
+#define STM32F4_EXTSEL_MASK		GENMASK(27, 24)
+#define STM32F4_EOCS			BIT(10)
+#define STM32F4_DDS			BIT(9)
+#define STM32F4_DMA			BIT(8)
+#define STM32F4_ADON			BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3			BIT(17)
+#define STM32F4_EOC2			BIT(9)
+#define STM32F4_EOC1			BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT	16
+#define STM32F4_ADC_ADCPRE_MASK		GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR			0x00
+#define STM32H7_ADC_IER			0x04
+#define STM32H7_ADC_CR			0x08
+#define STM32H7_ADC_CFGR		0x0C
+#define STM32H7_ADC_SMPR1		0x14
+#define STM32H7_ADC_SMPR2		0x18
+#define STM32H7_ADC_PCSEL		0x1C
+#define STM32H7_ADC_SQR1		0x30
+#define STM32H7_ADC_SQR2		0x34
+#define STM32H7_ADC_SQR3		0x38
+#define STM32H7_ADC_SQR4		0x3C
+#define STM32H7_ADC_DR			0x40
+#define STM32H7_ADC_DIFSEL		0xC0
+#define STM32H7_ADC_CALFACT		0xC4
+#define STM32H7_ADC_CALFACT2		0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY		BIT(12)
+#define STM32H7_EOC			BIT(2)
+#define STM32H7_ADRDY			BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE			STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL			BIT(31)
+#define STM32H7_ADCALDIF		BIT(30)
+#define STM32H7_DEEPPWD			BIT(29)
+#define STM32H7_ADVREGEN		BIT(28)
+#define STM32H7_LINCALRDYW6		BIT(27)
+#define STM32H7_LINCALRDYW5		BIT(26)
+#define STM32H7_LINCALRDYW4		BIT(25)
+#define STM32H7_LINCALRDYW3		BIT(24)
+#define STM32H7_LINCALRDYW2		BIT(23)
+#define STM32H7_LINCALRDYW1		BIT(22)
+#define STM32H7_ADCALLIN		BIT(16)
+#define STM32H7_BOOST			BIT(8)
+#define STM32H7_ADSTP			BIT(4)
+#define STM32H7_ADSTART			BIT(2)
+#define STM32H7_ADDIS			BIT(1)
+#define STM32H7_ADEN			BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT		10
+#define STM32H7_EXTEN_MASK		GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT		5
+#define STM32H7_EXTSEL_MASK		GENMASK(9, 5)
+#define STM32H7_RES_SHIFT		2
+#define STM32H7_RES_MASK		GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT		0
+#define STM32H7_DMNGT_MASK		GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+	STM32H7_DMNGT_DR_ONLY,		/* Regular data in DR only */
+	STM32H7_DMNGT_DMA_ONESHOT,	/* DMA one shot mode */
+	STM32H7_DMNGT_DFSDM,		/* DFSDM mode */
+	STM32H7_DMNGT_DMA_CIRC,		/* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT		16
+#define STM32H7_CALFACT_D_MASK		GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT		0
+#define STM32H7_CALFACT_S_MASK		GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT	0
+#define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV			BIT(18)
+#define STM32H7_EOC_MST			BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT		18
+#define STM32H7_PRESC_MASK		GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT		16
+#define STM32H7_CKMODE_MASK		GENMASK(17, 16)
+
 /**
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:		control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3784118..c52d20f 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -27,115 +27,6 @@
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR			0x00
-#define STM32F4_ADC_CR1			0x04
-#define STM32F4_ADC_CR2			0x08
-#define STM32F4_ADC_SMPR1		0x0C
-#define STM32F4_ADC_SMPR2		0x10
-#define STM32F4_ADC_HTR			0x24
-#define STM32F4_ADC_LTR			0x28
-#define STM32F4_ADC_SQR1		0x2C
-#define STM32F4_ADC_SQR2		0x30
-#define STM32F4_ADC_SQR3		0x34
-#define STM32F4_ADC_JSQR		0x38
-#define STM32F4_ADC_JDR1		0x3C
-#define STM32F4_ADC_JDR2		0x40
-#define STM32F4_ADC_JDR3		0x44
-#define STM32F4_ADC_JDR4		0x48
-#define STM32F4_ADC_DR			0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT			BIT(4)
-#define STM32F4_EOC			BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT		24
-#define STM32F4_RES_MASK		GENMASK(25, 24)
-#define STM32F4_SCAN			BIT(8)
-#define STM32F4_EOCIE			BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART			BIT(30)
-#define STM32F4_EXTEN_SHIFT		28
-#define STM32F4_EXTEN_MASK		GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT		24
-#define STM32F4_EXTSEL_MASK		GENMASK(27, 24)
-#define STM32F4_EOCS			BIT(10)
-#define STM32F4_DDS			BIT(9)
-#define STM32F4_DMA			BIT(8)
-#define STM32F4_ADON			BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR			0x00
-#define STM32H7_ADC_IER			0x04
-#define STM32H7_ADC_CR			0x08
-#define STM32H7_ADC_CFGR		0x0C
-#define STM32H7_ADC_SMPR1		0x14
-#define STM32H7_ADC_SMPR2		0x18
-#define STM32H7_ADC_PCSEL		0x1C
-#define STM32H7_ADC_SQR1		0x30
-#define STM32H7_ADC_SQR2		0x34
-#define STM32H7_ADC_SQR3		0x38
-#define STM32H7_ADC_SQR4		0x3C
-#define STM32H7_ADC_DR			0x40
-#define STM32H7_ADC_DIFSEL		0xC0
-#define STM32H7_ADC_CALFACT		0xC4
-#define STM32H7_ADC_CALFACT2		0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY		BIT(12)
-#define STM32H7_EOC			BIT(2)
-#define STM32H7_ADRDY			BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE			STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL			BIT(31)
-#define STM32H7_ADCALDIF		BIT(30)
-#define STM32H7_DEEPPWD			BIT(29)
-#define STM32H7_ADVREGEN		BIT(28)
-#define STM32H7_LINCALRDYW6		BIT(27)
-#define STM32H7_LINCALRDYW5		BIT(26)
-#define STM32H7_LINCALRDYW4		BIT(25)
-#define STM32H7_LINCALRDYW3		BIT(24)
-#define STM32H7_LINCALRDYW2		BIT(23)
-#define STM32H7_LINCALRDYW1		BIT(22)
-#define STM32H7_ADCALLIN		BIT(16)
-#define STM32H7_BOOST			BIT(8)
-#define STM32H7_ADSTP			BIT(4)
-#define STM32H7_ADSTART			BIT(2)
-#define STM32H7_ADDIS			BIT(1)
-#define STM32H7_ADEN			BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT		10
-#define STM32H7_EXTEN_MASK		GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT		5
-#define STM32H7_EXTSEL_MASK		GENMASK(9, 5)
-#define STM32H7_RES_SHIFT		2
-#define STM32H7_RES_MASK		GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT		0
-#define STM32H7_DMNGT_MASK		GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
-	STM32H7_DMNGT_DR_ONLY,		/* Regular data in DR only */
-	STM32H7_DMNGT_DMA_ONESHOT,	/* DMA one shot mode */
-	STM32H7_DMNGT_DFSDM,		/* DFSDM mode */
-	STM32H7_DMNGT_DMA_CIRC,		/* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT		16
-#define STM32H7_CALFACT_D_MASK		GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT		0
-#define STM32H7_CALFACT_S_MASK		GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT	0
-#define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
-
 /* Number of linear calibration shadow registers / LINCALRDYW control bits */
 #define STM32H7_LINCALFACT_NUM		6
 
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 15a1152..f5586dd 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -981,11 +981,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
 	ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
 
 	if (adc->dev_data->type == DFSDM_AUDIO) {
-		ch->scan_type.sign = 's';
 		ch->ext_info = dfsdm_adc_audio_ext_info;
 	} else {
-		ch->scan_type.sign = 'u';
+		ch->scan_type.shift = 8;
 	}
+	ch->scan_type.sign = 's';
 	ch->scan_type.realbits = 24;
 	ch->scan_type.storagebits = 32;
 
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 54d88b6..f9d13e4 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -694,6 +694,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
 	struct iio_dev *iio = _iio;
 	struct opt3001 *opt = iio_priv(iio);
 	int ret;
+	bool wake_result_ready_queue = false;
 
 	if (!opt->ok_to_ignore_lock)
 		mutex_lock(&opt->lock);
@@ -728,13 +729,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
 		}
 		opt->result = ret;
 		opt->result_ready = true;
-		wake_up(&opt->result_ready_queue);
+		wake_result_ready_queue = true;
 	}
 
 out:
 	if (!opt->ok_to_ignore_lock)
 		mutex_unlock(&opt->lock);
 
+	if (wake_result_ready_queue)
+		wake_up(&opt->result_ready_queue);
+
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 39dc7be..6257be2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1723,8 +1723,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
 	mutex_lock(&id_priv->handler_mutex);
 	mutex_unlock(&id_priv->handler_mutex);
 
+	rdma_restrack_del(&id_priv->res);
 	if (id_priv->cma_dev) {
-		rdma_restrack_del(&id_priv->res);
 		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
 			if (id_priv->cm_id.ib)
 				ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -3463,10 +3463,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
 
 	return 0;
 err2:
-	if (id_priv->cma_dev) {
-		rdma_restrack_del(&id_priv->res);
+	rdma_restrack_del(&id_priv->res);
+	if (id_priv->cma_dev)
 		cma_release_dev(id_priv);
-	}
 err1:
 	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
 	return ret;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index af5ad6a..9271f72 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
 				    IB_POLL_BATCH);
 	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
 	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
-		queue_work(ib_comp_wq, &cq->work);
+		queue_work(cq->comp_wq, &cq->work);
 }
 
 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
 {
-	queue_work(ib_comp_wq, &cq->work);
+	queue_work(cq->comp_wq, &cq->work);
 }
 
 /**
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 		break;
 	case IB_POLL_WORKQUEUE:
+	case IB_POLL_UNBOUND_WORKQUEUE:
 		cq->comp_handler = ib_cq_completion_workqueue;
 		INIT_WORK(&cq->work, ib_cq_poll_work);
 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+		cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+				ib_comp_wq : ib_comp_unbound_wq;
 		break;
 	default:
 		ret = -EINVAL;
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
 		irq_poll_disable(&cq->iop);
 		break;
 	case IB_POLL_WORKQUEUE:
+	case IB_POLL_UNBOUND_WORKQUEUE:
 		cancel_work_sync(&cq->work);
 		break;
 	default:
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index db3b627..6d8ac51 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -61,6 +61,7 @@ struct ib_client_data {
 };
 
 struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
 struct workqueue_struct *ib_wq;
 EXPORT_SYMBOL_GPL(ib_wq);
 
@@ -1166,10 +1167,19 @@ static int __init ib_core_init(void)
 		goto err;
 	}
 
+	ib_comp_unbound_wq =
+		alloc_workqueue("ib-comp-unb-wq",
+				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+	if (!ib_comp_unbound_wq) {
+		ret = -ENOMEM;
+		goto err_comp;
+	}
+
 	ret = class_register(&ib_class);
 	if (ret) {
 		pr_warn("Couldn't create InfiniBand device class\n");
-		goto err_comp;
+		goto err_comp_unbound;
 	}
 
 	ret = rdma_nl_init();
@@ -1218,6 +1228,8 @@ static int __init ib_core_init(void)
 	rdma_nl_exit();
 err_sysfs:
 	class_unregister(&ib_class);
+err_comp_unbound:
+	destroy_workqueue(ib_comp_unbound_wq);
 err_comp:
 	destroy_workqueue(ib_comp_wq);
 err:
@@ -1236,6 +1248,7 @@ static void __exit ib_core_cleanup(void)
 	addr_cleanup();
 	rdma_nl_exit();
 	class_unregister(&ib_class);
+	destroy_workqueue(ib_comp_unbound_wq);
 	destroy_workqueue(ib_comp_wq);
 	/* Make sure that any pending umem accounting work is done. */
 	destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2..74aa3e6 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
 	if (has_smi)
 		cq_size *= 2;
 
-	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
-			IB_POLL_WORKQUEUE);
-	if (IS_ERR(port_priv->cq)) {
-		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
-		ret = PTR_ERR(port_priv->cq);
-		goto error3;
-	}
-
 	port_priv->pd = ib_alloc_pd(device, 0);
 	if (IS_ERR(port_priv->pd)) {
 		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
 		ret = PTR_ERR(port_priv->pd);
+		goto error3;
+	}
+
+	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
+			IB_POLL_UNBOUND_WORKQUEUE);
+	if (IS_ERR(port_priv->cq)) {
+		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
+		ret = PTR_ERR(port_priv->cq);
 		goto error4;
 	}
 
@@ -3236,11 +3236,11 @@ static int ib_mad_port_open(struct ib_device *device,
 error7:
 	destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-	ib_dealloc_pd(port_priv->pd);
-error4:
 	ib_free_cq(port_priv->cq);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+	ib_dealloc_pd(port_priv->pd);
 error3:
 	kfree(port_priv);
 
@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
 	destroy_workqueue(port_priv->wq);
 	destroy_mad_qp(&port_priv->qp_info[1]);
 	destroy_mad_qp(&port_priv->qp_info[0]);
-	ib_dealloc_pd(port_priv->pd);
 	ib_free_cq(port_priv->cq);
+	ib_dealloc_pd(port_priv->pd);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
 	/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 3b7fa0c..279f0ae 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -209,7 +209,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
 	struct ib_device *dev;
 
 	if (!res->valid)
-		return;
+		goto out;
 
 	dev = res_to_dev(res);
 	if (!dev)
@@ -222,8 +222,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
 	down_write(&dev->res.rwsem);
 	hash_del(&res->node);
 	res->valid = false;
-	if (res->task)
-		put_task_struct(res->task);
 	up_write(&dev->res.rwsem);
+
+out:
+	if (res->task) {
+		put_task_struct(res->task);
+		res->task = NULL;
+	}
 }
 EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a685..a18f3f8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
 	if (get_user(id, arg))
 		return -EFAULT;
+	if (id >= IB_UMAD_MAX_AGENTS)
+		return -EINVAL;
 
 	mutex_lock(&file->port->file_mutex);
 	mutex_lock(&file->mutex);
 
-	if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+	id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+	if (!__get_agent(file, id)) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 50152c1..357de3b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -265,6 +265,9 @@ void ib_uverbs_release_file(struct kref *ref)
 	if (atomic_dec_and_test(&file->device->refcount))
 		ib_uverbs_comp_dev(file->device);
 
+	if (file->async_file)
+		kref_put(&file->async_file->ref,
+			 ib_uverbs_release_async_event_file);
 	kobject_put(&file->device->kobj);
 	kfree(file);
 }
@@ -915,10 +918,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
 	}
 	mutex_unlock(&file->device->lists_mutex);
 
-	if (file->async_file)
-		kref_put(&file->async_file->ref,
-			 ib_uverbs_release_async_event_file);
-
 	kref_put(&file->ref, ib_uverbs_release_file);
 
 	return 0;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7b76e6f..f2fb731 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -274,13 +274,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 			   struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
 	int err;
-	struct fw_ri_tpte tpt;
+	struct fw_ri_tpte *tpt;
 	u32 stag_idx;
 	static atomic_t key;
 
 	if (c4iw_fatal_error(rdev))
 		return -EIO;
 
+	tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+	if (!tpt)
+		return -ENOMEM;
+
 	stag_state = stag_state > 0;
 	stag_idx = (*stag) >> 8;
 
@@ -290,6 +294,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 			mutex_lock(&rdev->stats.lock);
 			rdev->stats.stag.fail++;
 			mutex_unlock(&rdev->stats.lock);
+			kfree(tpt);
 			return -ENOMEM;
 		}
 		mutex_lock(&rdev->stats.lock);
@@ -304,28 +309,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 
 	/* write TPT entry */
 	if (reset_tpt_entry)
-		memset(&tpt, 0, sizeof(tpt));
+		memset(tpt, 0, sizeof(*tpt));
 	else {
-		tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+		tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
 			FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
 			FW_RI_TPTE_STAGSTATE_V(stag_state) |
 			FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
-		tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+		tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
 			(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
 			FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
 						      FW_RI_VA_BASED_TO))|
 			FW_RI_TPTE_PS_V(page_size));
-		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+		tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
 			FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
-		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
-		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
-		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
-		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
-		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+		tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+		tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+		tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+		tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+		tpt->len_hi = cpu_to_be32((u32)(len >> 32));
 	}
 	err = write_adapter_mem(rdev, stag_idx +
 				(rdev->lldi.vr->stag.start >> 5),
-				sizeof(tpt), &tpt, skb, wr_waitp);
+				sizeof(*tpt), tpt, skb, wr_waitp);
 
 	if (reset_tpt_entry) {
 		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -333,6 +338,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 		rdev->stats.stag.cur -= 32;
 		mutex_unlock(&rdev->stats.lock);
 	}
+	kfree(tpt);
 	return err;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 7eaff4d..5bc811b 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
 	if (!data)
 		return -ENOMEM;
 	copy = min(len, datalen - 1);
-	if (copy_from_user(data, buf, copy))
-		return -EFAULT;
+	if (copy_from_user(data, buf, copy)) {
+		ret = -EFAULT;
+		goto free_data;
+	}
 
 	ret = debugfs_file_get(file->f_path.dentry);
 	if (unlikely(ret))
-		return ret;
+		goto free_data;
 	ptr = data;
 	token = ptr;
 	for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
 	ret = len;
 
 	debugfs_file_put(file->f_path.dentry);
+free_data:
 	kfree(data);
 	return ret;
 }
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
 		return -ENOMEM;
 	ret = debugfs_file_get(file->f_path.dentry);
 	if (unlikely(ret))
-		return ret;
+		goto free_data;
 	bit = find_first_bit(fault->opcodes, bitsize);
 	while (bit < bitsize) {
 		zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
 	data[size - 1] = '\n';
 	data[size] = '\0';
 	ret = simple_read_from_buffer(buf, len, pos, data, size);
+free_data:
 	kfree(data);
 	return ret;
 }
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 0307405..f208a25 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2326,7 +2326,7 @@ struct opa_port_status_req {
 	__be32 vl_select_mask;
 };
 
-#define VL_MASK_ALL		0x000080ff
+#define VL_MASK_ALL		0x00000000000080ffUL
 
 struct opa_port_status_rsp {
 	__u8 port_num;
@@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
 }
 
 static void a0_portstatus(struct hfi1_pportdata *ppd,
-			  struct opa_port_status_rsp *rsp, u32 vl_select_mask)
+			  struct opa_port_status_rsp *rsp)
 {
 	if (!is_bx(ppd->dd)) {
 		unsigned long vl;
 		u64 sum_vl_xmit_wait = 0;
-		u32 vl_all_mask = VL_MASK_ALL;
+		unsigned long vl_all_mask = VL_MASK_ALL;
 
-		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
-				 8 * sizeof(vl_all_mask)) {
+		for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
 			u64 tmp = sum_vl_xmit_wait +
 				  read_port_cntr(ppd, C_TX_WAIT_VL,
 						 idx_from_vl(vl));
@@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
 		(struct opa_port_status_req *)pmp->data;
 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
 	struct opa_port_status_rsp *rsp;
-	u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
+	unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
 	unsigned long vl;
 	size_t response_data_size;
 	u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
 	u8 port_num = req->port_num;
-	u8 num_vls = hweight32(vl_select_mask);
+	u8 num_vls = hweight64(vl_select_mask);
 	struct _vls_pctrs *vlinfo;
 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
 
 	hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
 
-	rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
+	rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
 	rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
 					  CNTR_INVALID_VL));
 	rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
@@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
 	 * So in the for_each_set_bit() loop below, we don't need
 	 * any additional checks for vl.
 	 */
-	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-			 8 * sizeof(vl_select_mask)) {
+	for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
 		memset(vlinfo, 0, sizeof(*vlinfo));
 
 		tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
@@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
 		vfi++;
 	}
 
-	a0_portstatus(ppd, rsp, vl_select_mask);
+	a0_portstatus(ppd, rsp);
 
 	if (resp_len)
 		*resp_len += response_data_size;
@@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
 	return error_counter_summary;
 }
 
-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
-			    u32 vl_select_mask)
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
 {
 	if (!is_bx(ppd->dd)) {
 		unsigned long vl;
 		u64 sum_vl_xmit_wait = 0;
-		u32 vl_all_mask = VL_MASK_ALL;
+		unsigned long vl_all_mask = VL_MASK_ALL;
 
-		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
-				 8 * sizeof(vl_all_mask)) {
+		for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
 			u64 tmp = sum_vl_xmit_wait +
 				  read_port_cntr(ppd, C_TX_WAIT_VL,
 						 idx_from_vl(vl));
@@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
 	u64 port_mask;
 	u8 port_num;
 	unsigned long vl;
-	u32 vl_select_mask;
+	unsigned long vl_select_mask;
 	int vfi;
 	u16 link_width;
 	u16 link_speed;
@@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
 	 * So in the for_each_set_bit() loop below, we don't need
 	 * any additional checks for vl.
 	 */
-	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-			 8 * sizeof(req->vl_select_mask)) {
+	for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
 		memset(vlinfo, 0, sizeof(*vlinfo));
 
 		rsp->vls[vfi].port_vl_xmit_data =
@@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
 		vfi++;
 	}
 
-	a0_datacounters(ppd, rsp, vl_select_mask);
+	a0_datacounters(ppd, rsp);
 
 	if (resp_len)
 		*resp_len += response_data_size;
@@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
 	struct _vls_ectrs *vlinfo;
 	unsigned long vl;
 	u64 port_mask, tmp;
-	u32 vl_select_mask;
+	unsigned long vl_select_mask;
 	int vfi;
 
 	req = (struct opa_port_error_counters64_msg *)pmp->data;
@@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
 	vlinfo = &rsp->vls[0];
 	vfi = 0;
 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
-	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-			 8 * sizeof(req->vl_select_mask)) {
+	for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
 		memset(vlinfo, 0, sizeof(*vlinfo));
 		rsp->vls[vfi].port_vl_xmit_discards =
 			cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
@@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
 	u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
 	u64 portn = be64_to_cpu(req->port_select_mask[3]);
 	u32 counter_select = be32_to_cpu(req->counter_select_mask);
-	u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+	unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
 	unsigned long vl;
 
 	if ((nports != 1) || (portn != 1 << port)) {
@@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
 	if (counter_select & CS_UNCORRECTABLE_ERRORS)
 		write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
 
-	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-			 8 * sizeof(vl_select_mask)) {
+	for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
 		if (counter_select & CS_PORT_XMIT_DATA)
 			write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
 
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 88e326d..d648a41 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
 	sdma_flush_descq(sde);
 	spin_lock_irqsave(&sde->flushlist_lock, flags);
 	/* copy flush list */
-	list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
-		list_del_init(&txp->list);
-		list_add_tail(&txp->list, &flushlist);
-	}
+	list_splice_init(&sde->flushlist, &flushlist);
 	spin_unlock_irqrestore(&sde->flushlist_lock, flags);
 	/* flush from flush list */
 	list_for_each_entry_safe(txp, txp_next, &flushlist, list)
@@ -2426,7 +2423,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
 		wait->tx_count++;
 		wait->count += tx->num_desc;
 	}
-	schedule_work(&sde->flush_worker);
+	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
 	ret = -ECOMM;
 	goto unlock;
 nodesc:
@@ -2526,7 +2523,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
 		}
 	}
 	spin_unlock(&sde->flushlist_lock);
-	schedule_work(&sde->flush_worker);
+	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
 	ret = -ECOMM;
 	goto update_tail;
 nodesc:
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e5466d7..5aaa2a6 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1668,8 +1668,6 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
 				    tx_buf_size, DMA_TO_DEVICE);
 		kfree(tun_qp->tx_ring[i].buf.addr);
 	}
-	kfree(tun_qp->tx_ring);
-	tun_qp->tx_ring = NULL;
 	i = MLX4_NUM_TUNNEL_BUFS;
 err:
 	while (i > 0) {
@@ -1678,6 +1676,8 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
 				    rx_buf_size, DMA_FROM_DEVICE);
 		kfree(tun_qp->ring[i].addr);
 	}
+	kfree(tun_qp->tx_ring);
+	tun_qp->tx_ring = NULL;
 	kfree(tun_qp->ring);
 	tun_qp->ring = NULL;
 	return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 53eccc0d..c05eae9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6370,6 +6370,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 			mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
 		list_del(&mpi->list);
 		mutex_unlock(&mlx5_ib_multiport_mutex);
+		kfree(mpi);
 		return;
 	}
 
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9bab4fb..bd1fdad 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-	return order <= mr_cache_max_order(dev) &&
-		umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_mr *mr = NULL;
-	bool populate_mtts = false;
+	bool use_umr;
 	struct ib_umem *umem;
 	int page_shift;
 	int npages;
@@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (err < 0)
 		return ERR_PTR(err);
 
-	if (use_umr(dev, order)) {
+	use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+		  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+		   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+	if (order <= mr_cache_max_order(dev) && use_umr) {
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
 					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
 			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
 			mr = NULL;
 		}
-		populate_mtts = false;
 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
 		if (access_flags & IB_ACCESS_ON_DEMAND) {
 			err = -EINVAL;
 			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
 			goto error;
 		}
-		populate_mtts = true;
+		use_umr = false;
 	}
 
 	if (!mr) {
-		if (!umr_can_modify_entity_size(dev))
-			populate_mtts = true;
 		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-				page_shift, access_flags, populate_mtts);
+				page_shift, access_flags, !use_umr);
 		mutex_unlock(&dev->slow_path_mutex);
 	}
 
@@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	update_odp_mr(mr);
 #endif
 
-	if (!populate_mtts) {
+	if (use_umr) {
 		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
 		if (access_flags & IB_ACCESS_ON_DEMAND)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 9e1cac8..453e5c4 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -497,7 +497,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
 			u64 io_virt, size_t bcnt, u32 *bytes_mapped)
 {
-	u64 access_mask = ODP_READ_ALLOWED_BIT;
+	u64 access_mask;
 	int npages = 0, page_shift, np;
 	u64 start_idx, page_mask;
 	struct ib_umem_odp *odp;
@@ -522,6 +522,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
 	page_shift = mr->umem->page_shift;
 	page_mask = ~(BIT(page_shift) - 1);
 	start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+	access_mask = ODP_READ_ALLOWED_BIT;
 
 	if (mr->umem->writable)
 		access_mask |= ODP_WRITE_ALLOWED_BIT;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2c1114e..bc6a44a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3401,13 +3401,17 @@ static const match_table_t srp_opt_tokens = {
 
 /**
  * srp_parse_in - parse an IP address and port number combination
+ * @net:	   [in]  Network namespace.
+ * @sa:		   [out] Address family, IP address and port number.
+ * @addr_port_str: [in]  IP address and port number.
+ * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
  *
  * Parse the following address formats:
  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
  */
 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
-			const char *addr_port_str)
+			const char *addr_port_str, bool *has_port)
 {
 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
 	char *port_str;
@@ -3416,9 +3420,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
 	if (!addr)
 		return -ENOMEM;
 	port_str = strrchr(addr, ':');
-	if (!port_str)
-		return -EINVAL;
-	*port_str++ = '\0';
+	if (port_str && strchr(port_str, ']'))
+		port_str = NULL;
+	if (port_str)
+		*port_str++ = '\0';
+	if (has_port)
+		*has_port = port_str != NULL;
 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
 	if (ret && addr[0]) {
 		addr_end = addr + strlen(addr) - 1;
@@ -3440,6 +3447,7 @@ static int srp_parse_options(struct net *net, const char *buf,
 	char *p;
 	substring_t args[MAX_OPT_ARGS];
 	unsigned long long ull;
+	bool has_port;
 	int opt_mask = 0;
 	int token;
 	int ret = -EINVAL;
@@ -3538,7 +3546,8 @@ static int srp_parse_options(struct net *net, const char *buf,
 				ret = -ENOMEM;
 				goto out;
 			}
-			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
+					   NULL);
 			if (ret < 0) {
 				pr_warn("bad source parameter '%s'\n", p);
 				kfree(p);
@@ -3554,7 +3563,10 @@ static int srp_parse_options(struct net *net, const char *buf,
 				ret = -ENOMEM;
 				goto out;
 			}
-			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
+					   &has_port);
+			if (!has_port)
+				ret = -EINVAL;
 			if (ret < 0) {
 				pr_warn("bad dest parameter '%s'\n", p);
 				kfree(p);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 7807325..c431df7 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
 		return -ENODEV;
 
 	epirq = &interface->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(epirq))
+		return -ENODEV;
+
 	epout = &interface->endpoint[1].desc;
+	if (!usb_endpoint_is_int_out(epout))
+		return -ENODEV;
 
 	if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
 		goto fail;
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index 3e9c353..a01b25f 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
 	onkey->input->phys = onkey->phys;
 	onkey->input->dev.parent = &pdev->dev;
 
-	if (onkey->key_power)
-		input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
-	input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+	input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
 	INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
 
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f9525d6..ae01263 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1358,7 +1358,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0618", 0 },
 	{ "ELAN0619", 0 },
 	{ "ELAN061A", 0 },
-	{ "ELAN061B", 0 },
+/*	{ "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
 	{ "ELAN061C", 0 },
 	{ "ELAN061D", 0 },
 	{ "ELAN061E", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index eb9b9de..530142b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1810,30 +1810,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
 				  leave_breadcrumbs);
 }
 
-static bool elantech_use_host_notify(struct psmouse *psmouse,
-				     struct elantech_device_info *info)
-{
-	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
-		return true;
-
-	switch (info->bus) {
-	case ETP_BUS_PS2_ONLY:
-		/* expected case */
-		break;
-	case ETP_BUS_SMB_HST_NTFY_ONLY:
-	case ETP_BUS_PS2_SMB_HST_NTFY:
-		/* SMbus implementation is stable since 2018 */
-		if (dmi_get_bios_year() >= 2018)
-			return true;
-	default:
-		psmouse_dbg(psmouse,
-			    "Ignoring SMBus bus provider %d\n", info->bus);
-		break;
-	}
-
-	return false;
-}
-
 /**
  * elantech_setup_smbus - called once the PS/2 devices are enumerated
  * and decides to instantiate a SMBus InterTouch device.
@@ -1853,7 +1829,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 		 * i2c_blacklist_pnp_ids.
 		 * Old ICs are up to the user to decide.
 		 */
-		if (!elantech_use_host_notify(psmouse, info) ||
+		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
 		    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
 			return -ENXIO;
 	}
@@ -1873,6 +1849,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 	return 0;
 }
 
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+				     struct elantech_device_info *info)
+{
+	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+		return true;
+
+	switch (info->bus) {
+	case ETP_BUS_PS2_ONLY:
+		/* expected case */
+		break;
+	case ETP_BUS_SMB_ALERT_ONLY:
+		/* fall-through  */
+	case ETP_BUS_PS2_SMB_ALERT:
+		psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+		break;
+	case ETP_BUS_SMB_HST_NTFY_ONLY:
+		/* fall-through  */
+	case ETP_BUS_PS2_SMB_HST_NTFY:
+		return true;
+	default:
+		psmouse_dbg(psmouse,
+			    "Ignoring SMBus bus provider %d.\n",
+			    info->bus);
+	}
+
+	return false;
+}
+
 int elantech_init_smbus(struct psmouse *psmouse)
 {
 	struct elantech_device_info info;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 10a0391..538986e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -161,7 +161,8 @@ struct trackpoint_data {
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
 #else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+				    bool set_properties)
 {
 	return -ENOSYS;
 }
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 7fb358f..162526a 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -149,7 +149,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 	}
 
 	mutex_lock(&data->irq_mutex);
-	bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+	bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
 	       data->irq_count);
 	/*
 	 * At this point, irq_status has all bits that are set in the
@@ -388,6 +388,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
 	bitmap_copy(data->current_irq_mask, data->new_irq_mask,
 		    data->num_of_irq_regs);
 
+	bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
 error_unlock:
 	mutex_unlock(&data->irq_mutex);
 	return error;
@@ -401,6 +403,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
 	struct device *dev = &rmi_dev->dev;
 
 	mutex_lock(&data->irq_mutex);
+	bitmap_andnot(data->fn_irq_bits,
+		      data->fn_irq_bits, mask, data->irq_count);
 	bitmap_andnot(data->new_irq_mask,
 		  data->current_irq_mask, mask, data->irq_count);
 
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index a8b9be3..7d0a5cc 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
 
 static void hv_kbd_on_channel_callback(void *context)
 {
+	struct vmpacket_descriptor *desc;
 	struct hv_device *hv_dev = context;
-	void *buffer;
-	int bufferlen = 0x100; /* Start with sensible size */
 	u32 bytes_recvd;
 	u64 req_id;
-	int error;
 
-	buffer = kmalloc(bufferlen, GFP_ATOMIC);
-	if (!buffer)
-		return;
+	foreach_vmbus_pkt(desc, hv_dev->channel) {
+		bytes_recvd = desc->len8 * 8;
+		req_id = desc->trans_id;
 
-	while (1) {
-		error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
-					     &bytes_recvd, &req_id);
-		switch (error) {
-		case 0:
-			if (bytes_recvd == 0) {
-				kfree(buffer);
-				return;
-			}
-
-			hv_kbd_handle_received_packet(hv_dev, buffer,
-						      bytes_recvd, req_id);
-			break;
-
-		case -ENOBUFS:
-			kfree(buffer);
-			/* Handle large packet */
-			bufferlen = bytes_recvd;
-			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-			if (!buffer)
-				return;
-			break;
-		}
+		hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
+					      req_id);
 	}
 }
 
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 75b5006..b1cf0c9 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
 		return -ENODEV;
 
+	endpoint = &intf->cur_altsetting->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(endpoint))
+		return -ENODEV;
+
 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!kbtab || !input_dev)
@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
 
-	endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
 	usb_fill_int_urb(kbtab->irq, dev,
 			 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
 			 kbtab->data, 8,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 5299eea..11f7366 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -856,6 +856,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called zylonite-wm97xx.
 
+config SECURE_TOUCH_SYNAPTICS_DSX
+	bool "Secure Touch"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	help
+	  Say Y here to enable Secure Touch in supported driver.
+
+	  If unsure, say N.
+
+	  To compile the supported driver with Secure Touch enabled,
+	  choose M here.
+
 config TOUCHSCREEN_USB_COMPOSITE
 	tristate "USB Touchscreen Driver"
 	depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index be3f1ae..ea87d1f 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -59,6 +59,11 @@
 
 #define LINK_KOBJ_NAME "tp"
 
+#define FTS_DVDD_VOL_MIN 1800000
+#define FTS_DVDD_VOL_MAX 1800000
+#define FTS_AVDD_VOL_MIN 3000000
+#define FTS_AVDD_VOL_MAX 3300000
+
 /*
  * Uncomment to use polling mode instead of interrupt mode.
  *
@@ -205,7 +210,7 @@ static ssize_t fts_fwupdate_store(struct device *dev,
 	}
 
 	fwD.data = NULL;
-	ret = getFWdata(PATH_FILE_FW, &orig_data, &orig_size, 0);
+	ret = getFWdata_nocheck(PATH_FILE_FW, &orig_data, &orig_size, 0);
 	if (ret < OK) {
 		logError(1, "%s %s: impossible retrieve FW... ERROR %08X\n",
 			tag, __func__, ERROR_MEMH_READ);
@@ -4223,7 +4228,7 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb,
 
 		switch (blank) {
 		case DRM_PANEL_BLANK_POWERDOWN:
-			if (info->sensor_sleep)
+			if (info->sensor_sleep && info->aoi_notify_enabled)
 				break;
 
 			if (info->aoi_notify_enabled)
@@ -4231,13 +4236,18 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb,
 			else
 				info->aoi_wake_on_suspend = false;
 
-			if (info->aoi_wake_on_suspend)
+			if (info->aoi_wake_on_suspend) {
 				info->sensor_sleep = true;
-			else
+				__pm_stay_awake(&info->wakeup_source);
+			} else {
 				queue_work(info->event_wq, &info->suspend_work);
+			}
 			break;
 
 		case DRM_PANEL_BLANK_UNBLANK:
+			if (info->aoi_wake_on_suspend)
+				__pm_relax(&info->wakeup_source);
+
 			if (!info->sensor_sleep)
 				break;
 
@@ -4326,6 +4336,21 @@ static int fts_get_reg(struct fts_ts_info *info, bool get)
 			retval = PTR_ERR(info->pwr_reg);
 			goto regulator_put;
 		}
+
+		retval = regulator_set_load(info->pwr_reg, 62000);
+		if (retval < 0) {
+			logError(1, "%s %s: Failed to set power load\n",
+				tag, __func__);
+			goto regulator_put;
+		}
+
+		retval = regulator_set_voltage(info->pwr_reg,
+				FTS_DVDD_VOL_MIN, FTS_DVDD_VOL_MAX);
+		if (retval < 0) {
+			logError(1, "%s %s: Failed to set power voltage\n",
+				tag, __func__);
+			goto regulator_put;
+		}
 	}
 
 	if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
@@ -4338,6 +4363,22 @@ static int fts_get_reg(struct fts_ts_info *info, bool get)
 			retval = PTR_ERR(info->bus_reg);
 			goto regulator_put;
 		}
+
+		retval = regulator_set_load(info->bus_reg, 20000);
+		if (retval < 0) {
+			logError(1, "%s %s: Failed to set power load\n",
+				tag, __func__);
+			goto regulator_put;
+		}
+
+		retval = regulator_set_voltage(info->bus_reg,
+				FTS_AVDD_VOL_MIN, FTS_AVDD_VOL_MAX);
+
+		if (retval < 0) {
+			logError(1, "%s %s: Failed to set power voltage\n",
+				tag, __func__);
+			goto regulator_put;
+		}
 	}
 
 	return 0;
@@ -4968,6 +5009,7 @@ static int fts_probe(struct i2c_client *client, const struct i2c_device_id *idp)
 		return error;
 	}
 
+	device_init_wakeup(&client->dev, true);
 	return fts_probe_internal(client, idp);
 }
 
@@ -5036,6 +5078,7 @@ static int fts_remove(struct i2c_client *client)
 	kfree(info->i2c_data);
 	kfree(info);
 
+	device_init_wakeup(&client->dev, false);
 	return OK;
 }
 
diff --git a/drivers/input/touchscreen/st/fts_aoi_event.c b/drivers/input/touchscreen/st/fts_aoi_event.c
index df5faaa..36dd2bcb 100644
--- a/drivers/input/touchscreen/st/fts_aoi_event.c
+++ b/drivers/input/touchscreen/st/fts_aoi_event.c
@@ -53,17 +53,15 @@ ssize_t aoi_set_store(struct device *dev, struct device_attribute *attr,
 	if (bottom > Y_AXIS_MAX)
 		bottom = Y_AXIS_MAX;
 
-	if (left < 0 || left > X_AXIS_MAX || right < 0) {
-		info->aoi_notify_enabled = false;
+	if (left < 0 || left > X_AXIS_MAX || right < 0 ||
+		top > Y_AXIS_MAX || bottom < 0)
 		return -EINVAL;
-	}
-
-	if (top < 0 || top > Y_AXIS_MAX || bottom < 0) {
-		info->aoi_notify_enabled = false;
-		return -EINVAL;
-	}
 
 	if (left >= right || top >= bottom) {
+		info->aoi_left = 0;
+		info->aoi_top = 0;
+		info->aoi_right = 0;
+		info->aoi_bottom = 0;
 		info->aoi_notify_enabled = false;
 		return count;
 	}
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
index 3472532..9fc3926 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
@@ -119,6 +119,38 @@ int getFirmwareVersion(u16 *fw_vers, u16 *config_id)
 	return OK;
 }
 
+int getFWdata_nocheck(const char *pathToFile, u8 **data, int *size, int from)
+{
+	const struct firmware *fw = NULL;
+	struct device *dev = getDev();
+	int res;
+
+	if (dev == NULL)
+		return ERROR_OP_NOT_ALLOW;
+
+	logError(0, "%s Read FW from BIN file!\n", tag);
+
+	res = firmware_request_nowarn(&fw, pathToFile, dev);
+	if (res) {
+		logError(1, "%s %s:No File found! ERROR %08X\n",
+			tag, __func__, ERROR_FILE_NOT_FOUND);
+		return ERROR_FILE_NOT_FOUND;
+	}
+
+	*size = fw->size;
+	*data = (u8 *)kmalloc_array((*size), sizeof(u8), GFP_KERNEL);
+	if (*data == NULL) {
+		logError(1, "%s %s:Impossible to allocate! %08X\n", __func__);
+		release_firmware(fw);
+		return ERROR_ALLOC;
+	}
+	memcpy(*data, (u8 *)fw->data, (*size));
+	release_firmware(fw);
+
+	logError(0, "%s %s:Finshed!\n", tag, __func__);
+	return OK;
+}
+
 int getFWdata(const char *pathToFile, u8 **data, int *size, int from)
 {
 	const struct firmware *fw = NULL;
@@ -145,7 +177,7 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from)
 	default:
 		logError(0, "%s Read FW from BIN file!\n", tag);
 
-		if (ftsInfo.u16_fwVer == FTS_LATEST_VERSION)
+		if (ftsInfo.u16_fwVer >= FTS_LATEST_VERSION)
 			return ERROR_FW_NO_UPDATE;
 
 		dev = getDev();
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
index e712fe5..844c5da 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
@@ -100,6 +100,7 @@ int flash_unlock(void);
 int fillMemory(u32 address, u8 *data, int size);
 int getFirmwareVersion(u16 *fw_vers, u16 *config_id);
 int getFWdata(const char *pathToFile, u8 **data, int *size, int from);
+int getFWdata_nocheck(const char *pathToFile, u8 **data, int *size, int from);
 int parseBinFile(u8 *fw_data, int fw_size, struct Firmware *fw, int keep_cx);
 int readFwFile(const char *path, struct Firmware *fw, int keep_cx);
 int flash_burn(struct Firmware *fw, int force_burn, int keep_cx);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
old mode 100755
new mode 100644
index e4ff2ea..2206dc0
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -45,6 +45,16 @@
 #ifdef KERNEL_ABOVE_2_6_38
 #include <linux/input/mt.h>
 #endif
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+#include <linux/errno.h>
+#include <soc/qcom/scm.h>
+enum subsystem {
+	TZ = 1,
+	APSS = 3
+};
+
+#define TZ_BLSP_MODIFY_OWNERSHIP_ID 3
+#endif
 
 #include <linux/completion.h>
 
@@ -182,17 +192,18 @@ static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
 static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf);
 
-struct synaptics_rmi4_f01_device_status {
-	union {
-		struct {
-			unsigned char status_code:4;
-			unsigned char reserved:2;
-			unsigned char flash_prog:1;
-			unsigned char unconfigured:1;
-		} __packed;
-		unsigned char data[1];
-	};
-};
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data);
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+#endif
 
 struct synaptics_rmi4_f11_query_0_5 {
 	union {
@@ -705,6 +716,14 @@ static struct device_attribute attrs[] = {
 	__ATTR(wake_gesture, 0664,
 			synaptics_rmi4_wake_gesture_show,
 			synaptics_rmi4_wake_gesture_store),
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+	__ATTR(secure_touch_enable, 0664,
+			synaptics_rmi4_secure_touch_enable_show,
+			synaptics_rmi4_secure_touch_enable_store),
+	__ATTR(secure_touch, 0444,
+			synaptics_rmi4_secure_touch_show,
+			NULL),
+#endif
 #ifdef USE_DATA_SERVER
 	__ATTR(synad_pid, 0220,
 			synaptics_rmi4_show_error,
@@ -720,6 +739,224 @@ static struct kobj_attribute virtual_key_map_attr = {
 	.show = synaptics_rmi4_virtual_key_map_show,
 };
 
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static int synaptics_rmi4_i2c_change_pipe_owner(
+		struct synaptics_rmi4_data *rmi4_data, enum subsystem subsystem)
+{
+	struct scm_desc desc;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	int ret = 0;
+
+	desc.arginfo = SCM_ARGS(2);
+	desc.args[0] = i2c->adapter->nr;
+	desc.args[1] = subsystem;
+
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, TZ_BLSP_MODIFY_OWNERSHIP_ID),
+			&desc);
+	if (ret) {
+		dev_err(rmi4_data->pdev->dev.parent, "%s: failed\n",
+				__func__);
+		return ret;
+	}
+
+	return desc.ret[0];
+}
+
+static void synaptics_rmi4_secure_touch_init(struct synaptics_rmi4_data *data)
+{
+	data->st_initialized = false;
+	init_completion(&data->st_powerdown);
+	init_completion(&data->st_irq_processed);
+
+	/* Get clocks */
+	data->core_clk = devm_clk_get(data->pdev->dev.parent, "core_clk");
+	if (IS_ERR(data->core_clk)) {
+		dev_dbg(data->pdev->dev.parent,
+				"%s: error on clk_get(core_clk): %ld\n",
+				__func__, PTR_ERR(data->core_clk));
+		data->core_clk = NULL;
+	}
+
+	data->iface_clk = devm_clk_get(data->pdev->dev.parent, "iface_clk");
+	if (IS_ERR(data->iface_clk)) {
+		dev_dbg(data->pdev->dev.parent,
+				"%s: error on clk_get(iface_clk): %ld\n",
+				__func__, PTR_ERR(data->iface_clk));
+		data->iface_clk = NULL;
+	}
+	data->st_initialized = true;
+}
+
+static void synaptics_rmi4_secure_touch_notify(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+	sysfs_notify(&rmi4_data->input_dev->dev.kobj, NULL, "secure_touch");
+}
+
+static irqreturn_t synaptics_rmi4_filter_interrupt(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+	if (atomic_read(&rmi4_data->st_enabled)) {
+		if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 0, 1) == 0) {
+			reinit_completion(&rmi4_data->st_irq_processed);
+			synaptics_rmi4_secure_touch_notify(rmi4_data);
+			wait_for_completion_interruptible(
+					&rmi4_data->st_irq_processed);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/*
+ * 'blocking' variable will have value 'true' when we want to prevent the driver
+ * from accessing the xPU/SMMU protected HW resources while the session is
+ * active.
+ */
+static void synaptics_rmi4_secure_touch_stop(
+		struct synaptics_rmi4_data *rmi4_data, bool blocking)
+{
+	if (atomic_read(&rmi4_data->st_enabled)) {
+		atomic_set(&rmi4_data->st_pending_irqs, -1);
+		synaptics_rmi4_secure_touch_notify(rmi4_data);
+		if (blocking)
+			wait_for_completion_interruptible(
+					&rmi4_data->st_powerdown);
+	}
+}
+
+#else
+static void synaptics_rmi4_secure_touch_init(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+}
+
+static irqreturn_t synaptics_rmi4_filter_interrupt(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+	return IRQ_NONE;
+}
+
+static void synaptics_rmi4_secure_touch_stop(
+		struct synaptics_rmi4_data *rmi4_data, bool blocking)
+{
+}
+#endif
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d",
+			atomic_read(&rmi4_data->st_enabled));
+}
+
+/*
+ * Accept only "0" and "1" valid values.
+ * "0" will reset the st_enabled flag, then wake up the reading process and
+ * the interrupt handler.
+ * The bus driver is notified via pm_runtime that it is not required to stay
+ * awake anymore.
+ * It will also make sure the queue of events is emptied in the controller,
+ * in case a touch happened in between the secure touch being disabled and
+ * the local ISR being ungated.
+ * "1" will set the st_enabled flag and clear the st_pending_irqs flag.
+ * The bus driver is requested via pm_runtime to stay awake.
+ */
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	unsigned long value;
+	int err = 0;
+
+	if (count > 2)
+		return -EINVAL;
+
+	if (!rmi4_data->st_initialized)
+		return -EIO;
+
+	err = kstrtoul(buf, 10, &value);
+	if (err)
+		return err;
+
+	err = count;
+
+	switch (value) {
+	case 0:
+		if (atomic_read(&rmi4_data->st_enabled) == 0)
+			break;
+		synaptics_rmi4_i2c_change_pipe_owner(rmi4_data, APSS);
+		synaptics_rmi4_bus_put(rmi4_data);
+		atomic_set(&rmi4_data->st_enabled, 0);
+		synaptics_rmi4_secure_touch_notify(rmi4_data);
+		complete(&rmi4_data->st_irq_processed);
+		synaptics_rmi4_irq(rmi4_data->irq, rmi4_data);
+		complete(&rmi4_data->st_powerdown);
+
+		break;
+	case 1:
+		if (atomic_read(&rmi4_data->st_enabled)) {
+			err = -EBUSY;
+			break;
+		}
+		synchronize_irq(rmi4_data->irq);
+
+		if (synaptics_rmi4_bus_get(rmi4_data) < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"synaptics_rmi4_bus_get failed\n");
+			err = -EIO;
+			break;
+		}
+		synaptics_rmi4_i2c_change_pipe_owner(rmi4_data, TZ);
+		reinit_completion(&rmi4_data->st_powerdown);
+		reinit_completion(&rmi4_data->st_irq_processed);
+		atomic_set(&rmi4_data->st_enabled, 1);
+		atomic_set(&rmi4_data->st_pending_irqs,  0);
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"unsupported value: %lu\n", value);
+		err = -EINVAL;
+		break;
+	}
+	return err;
+}
+
+/*
+ * This function returns whether there are pending interrupts, or
+ * other error conditions that need to be signaled to the userspace library,
+ * according tot he following logic:
+ * - st_enabled is 0 if secure touch is not enabled, returning -EBADF
+ * - st_pending_irqs is -1 to signal that secure touch is in being stopped,
+ *   returning -EINVAL
+ * - st_pending_irqs is 1 to signal that there is a pending irq, returning
+ *   the value "1" to the sysfs read operation
+ * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt
+ *   has been processed, so the interrupt handler can be allowed to continue.
+ */
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	int val = 0;
+
+	if (atomic_read(&rmi4_data->st_enabled) == 0)
+		return -EBADF;
+	if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, -1, 0) == -1)
+		return -EINVAL;
+	if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 1, 0) == 1)
+		val = 1;
+	else
+		complete(&rmi4_data->st_irq_processed);
+
+	return scnprintf(buf, PAGE_SIZE, "%u", val);
+}
+#endif
+
 static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -1052,7 +1289,7 @@ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
 	unsigned char num_of_finger_status_regs;
 	unsigned char finger_shift;
 	unsigned char finger_status;
-	unsigned char finger_status_reg[3];
+	unsigned char *finger_status_reg = NULL;
 	unsigned char detected_gestures;
 	unsigned short data_addr;
 	unsigned short data_offset;
@@ -1061,7 +1298,7 @@ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
 	int wx;
 	int wy;
 	int temp;
-	struct synaptics_rmi4_f11_data_1_5 data;
+	struct synaptics_rmi4_f11_data_1_5 *data = NULL;
 	struct synaptics_rmi4_f11_extra_data *extra_data;
 
 	/*
@@ -1094,13 +1331,24 @@ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
 /*		synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
 		return 0;
 	}
+	finger_status_reg = kcalloc(3, sizeof(char), GFP_KERNEL);
+	if (!finger_status_reg) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		retval = -ENOMEM;
+		goto exit;
+	}
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			data_addr,
 			finger_status_reg,
 			num_of_finger_status_regs);
 	if (retval < 0)
-		return 0;
+		goto exit;
 
 	mutex_lock(&(rmi4_data->rmi4_report_mutex));
 
@@ -1126,20 +1374,20 @@ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
 		if (finger_status) {
 			data_offset = data_addr +
 					num_of_finger_status_regs +
-					(finger * sizeof(data.data));
+					(finger * sizeof(data->data));
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					data_offset,
-					data.data,
-					sizeof(data.data));
+					data->data,
+					sizeof(data->data));
 			if (retval < 0) {
 				touch_count = 0;
 				goto exit;
 			}
 
-			x = (data.x_position_11_4 << 4) | data.x_position_3_0;
-			y = (data.y_position_11_4 << 4) | data.y_position_3_0;
-			wx = data.wx;
-			wy = data.wy;
+			x = (data->x_position_11_4 << 4) | data->x_position_3_0;
+			y = (data->y_position_11_4 << 4) | data->y_position_3_0;
+			wx = data->wx;
+			wy = data->wy;
 
 			if (rmi4_data->hw_if->board_data->swap_axes) {
 				temp = x;
@@ -1196,6 +1444,8 @@ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
 	input_sync(rmi4_data->input_dev);
 
 exit:
+	kfree(finger_status_reg);
+	kfree(data);
 	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
 
 	return touch_count;
@@ -1673,10 +1923,9 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
 		bool report)
 {
 	int retval;
-	unsigned char data[MAX_INTR_REGISTERS + 1];
-	unsigned char *intr = &data[1];
+	unsigned char *data = NULL;
+	unsigned char *intr;
 	bool was_in_bl_mode;
-	struct synaptics_rmi4_f01_device_status status;
 	struct synaptics_rmi4_fn *fhandler;
 	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
 	struct synaptics_rmi4_device_info *rmi;
@@ -1687,6 +1936,14 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
 	 * Get interrupt status information from F01 Data1 register to
 	 * determine the source(s) that are flagging the interrupt.
 	 */
+	data = kcalloc((MAX_INTR_REGISTERS + 1), sizeof(char), GFP_KERNEL);
+	if (!data) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	intr = &data[1];
+
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			rmi4_data->f01_data_base_addr,
 			data,
@@ -1695,31 +1952,31 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read interrupt status\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
-	status.data[0] = data[0];
-	if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+	rmi4_data->status.data[0] = data[0];
+	if (rmi4_data->status.status_code == STATUS_CRC_IN_PROGRESS) {
 		retval = synaptics_rmi4_check_status(rmi4_data,
 				&was_in_bl_mode);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to check status\n",
 					__func__);
-			return retval;
+			goto exit;
 		}
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				rmi4_data->f01_data_base_addr,
-				status.data,
-				sizeof(status.data));
+				rmi4_data->status.data,
+				sizeof(rmi4_data->status.data));
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read device status\n",
 					__func__);
-			return retval;
+			goto exit;
 		}
 	}
-	if (status.unconfigured && !status.flash_prog) {
+	if (rmi4_data->status.unconfigured && !rmi4_data->status.flash_prog) {
 		pr_notice("%s: spontaneous reset detected\n", __func__);
 		retval = synaptics_rmi4_reinit_device(rmi4_data);
 		if (retval < 0) {
@@ -1730,7 +1987,7 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
 	}
 
 	if (!report)
-		return retval;
+		goto exit;
 
 	/*
 	 * Traverse the function handler list and service the source(s)
@@ -1758,7 +2015,8 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
 		}
 	}
 	mutex_unlock(&exp_data.mutex);
-
+exit:
+	kfree(data);
 	return retval;
 }
 
@@ -1768,6 +2026,9 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
 	const struct synaptics_dsx_board_data *bdata =
 			rmi4_data->hw_if->board_data;
 
+	if (synaptics_rmi4_filter_interrupt(data) == IRQ_HANDLED)
+		return IRQ_HANDLED;
+
 	if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
 		goto exit;
 
@@ -1816,12 +2077,18 @@ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
 		bool enable, bool attn_only)
 {
 	int retval = 0;
-	unsigned char data[MAX_INTR_REGISTERS];
+	unsigned char *data = NULL;
 	const struct synaptics_dsx_board_data *bdata =
 			rmi4_data->hw_if->board_data;
 
 	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
 
+	data = kcalloc(MAX_INTR_REGISTERS, sizeof(char), GFP_KERNEL);
+	if (!data) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	if (attn_only) {
 		retval = synaptics_rmi4_int_enable(rmi4_data, enable);
 		goto exit;
@@ -1875,8 +2142,8 @@ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
 	}
 
 exit:
+	kfree(data);
 	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
-
 	return retval;
 }
 
@@ -1930,12 +2197,12 @@ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
 	unsigned char offset;
 	unsigned char fingers_supported;
 	struct synaptics_rmi4_f11_extra_data *extra_data;
-	struct synaptics_rmi4_f11_query_0_5 query_0_5;
+	struct synaptics_rmi4_f11_query_0_5 *query_0_5 = NULL;
 	struct synaptics_rmi4_f11_query_7_8 query_7_8;
 	struct synaptics_rmi4_f11_query_9 query_9;
-	struct synaptics_rmi4_f11_query_12 query_12;
-	struct synaptics_rmi4_f11_query_27 query_27;
-	struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+	struct synaptics_rmi4_f11_query_12 *query_12 = NULL;
+	struct synaptics_rmi4_f11_query_27 *query_27 = NULL;
+	struct synaptics_rmi4_f11_ctrl_6_9 *control_6_9 = NULL;
 	const struct synaptics_dsx_board_data *bdata =
 				rmi4_data->hw_if->board_data;
 
@@ -1950,33 +2217,57 @@ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
 	}
 	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
 
+	query_0_5 = kzalloc(sizeof(*query_0_5), GFP_KERNEL);
+	if (!query_0_5) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	control_6_9 = kzalloc(sizeof(*control_6_9), GFP_KERNEL);
+	if (!control_6_9) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_12 = kzalloc(sizeof(*query_12), GFP_KERNEL);
+	if (!query_12) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_27 = kzalloc(sizeof(*query_27), GFP_KERNEL);
+	if (!query_27) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.query_base,
-			query_0_5.data,
-			sizeof(query_0_5.data));
+			query_0_5->data,
+			sizeof(query_0_5->data));
 	if (retval < 0)
-		return retval;
+		goto exit;
 
 	/* Maximum number of fingers supported */
-	if (query_0_5.num_of_fingers <= 4)
-		fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
-	else if (query_0_5.num_of_fingers == 5)
+	if (query_0_5->num_of_fingers <= 4)
+		fhandler->num_of_data_points = query_0_5->num_of_fingers + 1;
+	else if (query_0_5->num_of_fingers == 5)
 		fhandler->num_of_data_points = 10;
 
 	rmi4_data->num_of_fingers = fhandler->num_of_data_points;
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.ctrl_base + 6,
-			control_6_9.data,
-			sizeof(control_6_9.data));
+			control_6_9->data,
+			sizeof(control_6_9->data));
 	if (retval < 0)
-		return retval;
+		goto exit;
 
 	/* Maximum x and y */
-	rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
-			(control_6_9.sensor_max_x_pos_11_8 << 8);
-	rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
-			(control_6_9.sensor_max_y_pos_11_8 << 8);
+	rmi4_data->sensor_max_x = control_6_9->sensor_max_x_pos_7_0 |
+			(control_6_9->sensor_max_x_pos_11_8 << 8);
+	rmi4_data->sensor_max_y = control_6_9->sensor_max_y_pos_7_0 |
+			(control_6_9->sensor_max_y_pos_11_8 << 8);
 	dev_dbg(rmi4_data->pdev->dev.parent,
 			"%s: Function %02x max x = %d max y = %d\n",
 			__func__, fhandler->fn_number,
@@ -1995,82 +2286,82 @@ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
 
 	fhandler->data = NULL;
 
-	offset = sizeof(query_0_5.data);
+	offset = sizeof(query_0_5->data);
 
 	/* query 6 */
-	if (query_0_5.has_rel)
+	if (query_0_5->has_rel)
 		offset += 1;
 
 	/* queries 7 8 */
-	if (query_0_5.has_gestures) {
+	if (query_0_5->has_gestures) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				fhandler->full_addr.query_base + offset,
 				query_7_8.data,
 				sizeof(query_7_8.data));
 		if (retval < 0)
-			return retval;
+			goto exit;
 
 		offset += sizeof(query_7_8.data);
 	}
 
 	/* query 9 */
-	if (query_0_5.has_query_9) {
+	if (query_0_5->has_query_9) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				fhandler->full_addr.query_base + offset,
 				query_9.data,
 				sizeof(query_9.data));
 		if (retval < 0)
-			return retval;
+			goto exit;
 
 		offset += sizeof(query_9.data);
 	}
 
 	/* query 10 */
-	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+	if (query_0_5->has_gestures && query_7_8.has_touch_shapes)
 		offset += 1;
 
 	/* query 11 */
-	if (query_0_5.has_query_11)
+	if (query_0_5->has_query_11)
 		offset += 1;
 
 	/* query 12 */
-	if (query_0_5.has_query_12) {
+	if (query_0_5->has_query_12) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				fhandler->full_addr.query_base + offset,
-				query_12.data,
-				sizeof(query_12.data));
+				query_12->data,
+				sizeof(query_12->data));
 		if (retval < 0)
-			return retval;
+			goto exit;
 
-		offset += sizeof(query_12.data);
+		offset += sizeof(query_12->data);
 	}
 
 	/* query 13 */
-	if (query_0_5.has_jitter_filter)
+	if (query_0_5->has_jitter_filter)
 		offset += 1;
 
 	/* query 14 */
-	if (query_0_5.has_query_12 && query_12.has_general_information_2)
+	if (query_0_5->has_query_12 && query_12->has_general_information_2)
 		offset += 1;
 
 	/* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
-	if (query_0_5.has_query_12 && query_12.has_physical_properties)
+	if (query_0_5->has_query_12 && query_12->has_physical_properties)
 		offset += 12;
 
 	/* query 27 */
-	if (query_0_5.has_query_27) {
+	if (query_0_5->has_query_27) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				fhandler->full_addr.query_base + offset,
-				query_27.data,
-				sizeof(query_27.data));
+				query_27->data,
+				sizeof(query_27->data));
 		if (retval < 0)
-			return retval;
+			goto exit;
 
-		rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+		rmi4_data->f11_wakeup_gesture = query_27->has_wakeup_gesture;
 	}
 
 	if (!rmi4_data->f11_wakeup_gesture)
-		return retval;
+		goto exit;
 
 	/* data 0 */
 	fingers_supported = fhandler->num_of_data_points;
@@ -2080,82 +2371,87 @@ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
 	offset += 5 * fingers_supported;
 
 	/* data 6 7 */
-	if (query_0_5.has_rel)
+	if (query_0_5->has_rel)
 		offset += 2 * fingers_supported;
 
 	/* data 8 */
-	if (query_0_5.has_gestures && query_7_8.data[0])
+	if (query_0_5->has_gestures && query_7_8.data[0])
 		offset += 1;
 
 	/* data 9 */
-	if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+	if (query_0_5->has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
 		offset += 1;
 
 	/* data 10 */
-	if (query_0_5.has_gestures &&
+	if (query_0_5->has_gestures &&
 			(query_7_8.has_pinch || query_7_8.has_flick))
 		offset += 1;
 
 	/* data 11 12 */
-	if (query_0_5.has_gestures &&
+	if (query_0_5->has_gestures &&
 			(query_7_8.has_flick || query_7_8.has_rotate))
 		offset += 2;
 
 	/* data 13 */
-	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+	if (query_0_5->has_gestures && query_7_8.has_touch_shapes)
 		offset += (fingers_supported + 3) / 4;
 
 	/* data 14 15 */
-	if (query_0_5.has_gestures &&
+	if (query_0_5->has_gestures &&
 			(query_7_8.has_scroll_zones ||
 			query_7_8.has_multi_finger_scroll ||
 			query_7_8.has_chiral_scroll))
 		offset += 2;
 
 	/* data 16 17 */
-	if (query_0_5.has_gestures &&
+	if (query_0_5->has_gestures &&
 			(query_7_8.has_scroll_zones &&
 			query_7_8.individual_scroll_zones))
 		offset += 2;
 
 	/* data 18 19 20 21 22 23 24 25 26 27 */
-	if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+	if (query_0_5->has_query_9 && query_9.has_contact_geometry)
 		offset += 10 * fingers_supported;
 
 	/* data 28 */
-	if (query_0_5.has_bending_correction ||
-			query_0_5.has_large_object_suppression)
+	if (query_0_5->has_bending_correction ||
+			query_0_5->has_large_object_suppression)
 		offset += 1;
 
 	/* data 29 30 31 */
-	if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+	if (query_0_5->has_query_9 && query_9.has_pen_hover_discrimination)
 		offset += 3;
 
 	/* data 32 */
-	if (query_0_5.has_query_12 &&
-			query_12.has_small_object_detection_tuning)
+	if (query_0_5->has_query_12 &&
+			query_12->has_small_object_detection_tuning)
 		offset += 1;
 
 	/* data 33 34 */
-	if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+	if (query_0_5->has_query_27 && query_27->f11_query27_b0)
 		offset += 2;
 
 	/* data 35 */
-	if (query_0_5.has_query_12 && query_12.has_8bit_w)
+	if (query_0_5->has_query_12 && query_12->has_8bit_w)
 		offset += fingers_supported;
 
 	/* data 36 */
-	if (query_0_5.has_bending_correction)
+	if (query_0_5->has_bending_correction)
 		offset += 1;
 
 	/* data 37 */
-	if (query_0_5.has_query_27 && query_27.has_data_37)
+	if (query_0_5->has_query_27 && query_27->has_data_37)
 		offset += 1;
 
 	/* data 38 */
-	if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+	if (query_0_5->has_query_27 && query_27->has_wakeup_gesture)
 		extra_data->data38_offset = offset;
 
+exit:
+	kfree(query_0_5);
+	kfree(query_12);
+	kfree(query_27);
+	kfree(control_6_9);
 	return retval;
 }
 
@@ -2280,8 +2576,8 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 	unsigned char subpacket;
 	unsigned char ctrl_23_size;
 	unsigned char size_of_2d_data;
-	unsigned char size_of_query5;
-	unsigned char size_of_query8;
+	unsigned char *size_of_query5 = NULL;
+	unsigned char *size_of_query8 = NULL;
 	unsigned char ctrl_8_offset;
 	unsigned char ctrl_20_offset;
 	unsigned char ctrl_23_offset;
@@ -2311,6 +2607,18 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
 	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
 
+	size_of_query5 = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!size_of_query5) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	size_of_query8 = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!size_of_query8) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
 	if (!query_5) {
 		dev_err(rmi4_data->pdev->dev.parent,
@@ -2367,19 +2675,19 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.query_base + 4,
-			&size_of_query5,
-			sizeof(size_of_query5));
+			size_of_query5,
+			sizeof(*size_of_query5));
 	if (retval < 0)
 		goto exit;
 
-	if (size_of_query5 > sizeof(query_5->data))
-		size_of_query5 = sizeof(query_5->data);
+	if (*size_of_query5 > sizeof(query_5->data))
+		*size_of_query5 = sizeof(query_5->data);
 	memset(query_5->data, 0x00, sizeof(query_5->data));
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.query_base + 5,
 			query_5->data,
-			size_of_query5);
+			*size_of_query5);
 	if (retval < 0)
 		goto exit;
 
@@ -2494,26 +2802,26 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.query_base + 7,
-			&size_of_query8,
-			sizeof(size_of_query8));
+			size_of_query8,
+			sizeof(*size_of_query8));
 	if (retval < 0)
 		goto exit;
 
-	if (size_of_query8 > sizeof(query_8->data))
-		size_of_query8 = sizeof(query_8->data);
+	if (*size_of_query8 > sizeof(query_8->data))
+		*size_of_query8 = sizeof(query_8->data);
 	memset(query_8->data, 0x00, sizeof(query_8->data));
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fhandler->full_addr.query_base + 8,
 			query_8->data,
-			size_of_query8);
+			*size_of_query8);
 	if (retval < 0)
 		goto exit;
 
 	/* Determine the presence of the Data0 register */
 	extra_data->data1_offset = query_8->data0_is_present;
 
-	if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+	if ((*size_of_query8 >= 3) && (query_8->data15_is_present)) {
 		extra_data->data15_offset = query_8->data0_is_present +
 				query_8->data1_is_present +
 				query_8->data2_is_present +
@@ -2535,7 +2843,7 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 	}
 
 #ifdef REPORT_2D_PRESSURE
-	if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+	if ((*size_of_query8 >= 5) && (query_8->data29_is_present)) {
 		extra_data->data29_offset = query_8->data0_is_present +
 				query_8->data1_is_present +
 				query_8->data2_is_present +
@@ -2683,6 +2991,8 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
 	}
 
 exit:
+	kfree(size_of_query5);
+	kfree(size_of_query8);
 	kfree(query_5);
 	kfree(query_8);
 	kfree(ctrl_8);
@@ -2908,16 +3218,15 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 {
 	int retval;
 	int timeout = CHECK_STATUS_TIMEOUT_MS;
-	struct synaptics_rmi4_f01_device_status status;
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			rmi4_data->f01_data_base_addr,
-			status.data,
-			sizeof(status.data));
+			rmi4_data->status.data,
+			sizeof(rmi4_data->status.data));
 	if (retval < 0)
 		return retval;
 
-	while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+	while (rmi4_data->status.status_code == STATUS_CRC_IN_PROGRESS) {
 		if (timeout > 0)
 			msleep(20);
 		else
@@ -2925,8 +3234,8 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				rmi4_data->f01_data_base_addr,
-				status.data,
-				sizeof(status.data));
+				rmi4_data->status.data,
+				sizeof(rmi4_data->status.data));
 		if (retval < 0)
 			return retval;
 
@@ -2936,11 +3245,11 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 	if (timeout != CHECK_STATUS_TIMEOUT_MS)
 		*was_in_bl_mode = true;
 
-	if (status.flash_prog == 1) {
+	if (rmi4_data->status.flash_prog == 1) {
 		rmi4_data->flash_prog_mode = true;
 		pr_notice("%s: In flash prog mode, status = 0x%02x\n",
 				__func__,
-				status.status_code);
+				rmi4_data->status.status_code);
 	} else {
 		rmi4_data->flash_prog_mode = false;
 	}
@@ -2951,32 +3260,39 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
 {
 	int retval;
-	unsigned char device_ctrl;
+	unsigned char *device_ctrl = NULL;
+
+	device_ctrl = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!device_ctrl) {
+		retval = -ENOMEM;
+		goto exit;
+	}
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			rmi4_data->f01_ctrl_base_addr,
-			&device_ctrl,
-			sizeof(device_ctrl));
+			device_ctrl,
+			sizeof(*device_ctrl));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to set configured\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
-	rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
-	device_ctrl |= CONFIGURED;
+	rmi4_data->no_sleep_setting = *device_ctrl & NO_SLEEP_ON;
+	*device_ctrl |= CONFIGURED;
 
 	retval = synaptics_rmi4_reg_write(rmi4_data,
 			rmi4_data->f01_ctrl_base_addr,
-			&device_ctrl,
-			sizeof(device_ctrl));
+			device_ctrl,
+			sizeof(*device_ctrl));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to set configured\n",
 				__func__);
 	}
-
+exit:
+	kfree(device_ctrl);
 	return retval;
 }
 
@@ -3013,7 +3329,6 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 	bool f01found;
 	bool f35found;
 	bool was_in_bl_mode;
-	struct synaptics_rmi4_fn_desc rmi_fd;
 	struct synaptics_rmi4_fn *fhandler;
 	struct synaptics_rmi4_device_info *rmi;
 
@@ -3034,8 +3349,8 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					pdt_entry_addr,
-					(unsigned char *)&rmi_fd,
-					sizeof(rmi_fd));
+					(unsigned char *)&rmi4_data->rmi_fd,
+					sizeof(rmi4_data->rmi_fd));
 			if (retval < 0)
 				return retval;
 
@@ -3043,7 +3358,7 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 
 			fhandler = NULL;
 
-			if (rmi_fd.fn_number == 0) {
+			if (rmi4_data->rmi_fd.fn_number == 0) {
 				dev_dbg(rmi4_data->pdev->dev.parent,
 						"%s: Reached end of PDT\n",
 						__func__);
@@ -3052,28 +3367,30 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 
 			dev_dbg(rmi4_data->pdev->dev.parent,
 					"%s: F%02x found (page %d)\n",
-					__func__, rmi_fd.fn_number,
+					__func__, rmi4_data->rmi_fd.fn_number,
 					page_number);
 
-			switch (rmi_fd.fn_number) {
+			switch (rmi4_data->rmi_fd.fn_number) {
 			case SYNAPTICS_RMI4_F01:
-				if (rmi_fd.intr_src_count == 0)
+				if (rmi4_data->rmi_fd.intr_src_count == 0)
 					break;
 
 				f01found = true;
 
 				retval = synaptics_rmi4_alloc_fh(&fhandler,
-						&rmi_fd, page_number);
+						&rmi4_data->rmi_fd,
+						page_number);
 				if (retval < 0) {
 					dev_err(rmi4_data->pdev->dev.parent,
-							"%s: Failed to alloc for F%d\n",
-							__func__,
-							rmi_fd.fn_number);
+						"%s: Failed to alloc for F%d\n",
+						__func__,
+						rmi4_data->rmi_fd.fn_number);
 					return retval;
 				}
 
 				retval = synaptics_rmi4_f01_init(rmi4_data,
-						fhandler, &rmi_fd, intr_count);
+						fhandler, &rmi4_data->rmi_fd,
+						intr_count);
 				if (retval < 0)
 					return retval;
 
@@ -3097,59 +3414,65 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 
 				break;
 			case SYNAPTICS_RMI4_F11:
-				if (rmi_fd.intr_src_count == 0)
+				if (rmi4_data->rmi_fd.intr_src_count == 0)
 					break;
 
 				retval = synaptics_rmi4_alloc_fh(&fhandler,
-						&rmi_fd, page_number);
+						&rmi4_data->rmi_fd,
+						page_number);
 				if (retval < 0) {
 					dev_err(rmi4_data->pdev->dev.parent,
-							"%s: Failed to alloc for F%d\n",
-							__func__,
-							rmi_fd.fn_number);
+						"%s: Failed to alloc for F%d\n",
+						__func__,
+						rmi4_data->rmi_fd.fn_number);
 					return retval;
 				}
 
 				retval = synaptics_rmi4_f11_init(rmi4_data,
-						fhandler, &rmi_fd, intr_count);
+						fhandler, &rmi4_data->rmi_fd,
+						intr_count);
 				if (retval < 0)
 					return retval;
 				break;
 			case SYNAPTICS_RMI4_F12:
-				if (rmi_fd.intr_src_count == 0)
+				if (rmi4_data->rmi_fd.intr_src_count == 0)
 					break;
 
 				retval = synaptics_rmi4_alloc_fh(&fhandler,
-						&rmi_fd, page_number);
+						&rmi4_data->rmi_fd,
+						page_number);
 				if (retval < 0) {
 					dev_err(rmi4_data->pdev->dev.parent,
-							"%s: Failed to alloc for F%d\n",
-							__func__,
-							rmi_fd.fn_number);
+						"%s: Failed to alloc for F%d\n",
+						__func__,
+						rmi4_data->rmi_fd.fn_number);
 					return retval;
 				}
 
 				retval = synaptics_rmi4_f12_init(rmi4_data,
-						fhandler, &rmi_fd, intr_count);
+						fhandler, &rmi4_data->rmi_fd,
+						intr_count);
 				if (retval < 0)
 					return retval;
 				break;
 			case SYNAPTICS_RMI4_F1A:
-				if (rmi_fd.intr_src_count == 0)
+				if (rmi4_data->rmi_fd.intr_src_count == 0)
 					break;
 
 				retval = synaptics_rmi4_alloc_fh(&fhandler,
-						&rmi_fd, page_number);
+						&rmi4_data->rmi_fd,
+						page_number);
 				if (retval < 0) {
 					dev_err(rmi4_data->pdev->dev.parent,
-							"%s: Failed to alloc for F%d\n",
-							__func__,
-							rmi_fd.fn_number);
+						"%s: Failed to alloc for F%d\n",
+						__func__,
+						rmi4_data->rmi_fd.fn_number);
 					return retval;
 				}
 
 				retval = synaptics_rmi4_f1a_init(rmi4_data,
-						fhandler, &rmi_fd, intr_count);
+						fhandler, &rmi4_data->rmi_fd,
+						intr_count);
 				if (retval < 0) {
 #ifdef IGNORE_FN_INIT_FAILURE
 					kfree(fhandler);
@@ -3161,25 +3484,27 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 				break;
 #ifdef USE_DATA_SERVER
 			case SYNAPTICS_RMI4_F21:
-				if (rmi_fd.intr_src_count == 0)
+				if (rmi4_data->rmi_fd.intr_src_count == 0)
 					break;
 
 				retval = synaptics_rmi4_alloc_fh(&fhandler,
-						&rmi_fd, page_number);
+						&rmi4_data->rmi_fd,
+						page_number);
 				if (retval < 0) {
 					dev_err(rmi4_data->pdev->dev.parent,
-							"%s: Failed to alloc for F%d\n",
-							__func__,
-							rmi_fd.fn_number);
+						"%s: Failed to alloc for F%d\n",
+						__func__,
+						rmi4_data->rmi_fd.fn_number);
 					return retval;
 				}
 
-				fhandler->fn_number = rmi_fd.fn_number;
+				fhandler->fn_number =
+					rmi4_data->rmi_fd.fn_number;
 				fhandler->num_of_data_sources =
-						rmi_fd.intr_src_count;
+					rmi4_data->rmi_fd.intr_src_count;
 
-				synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
-						intr_count);
+				synaptics_rmi4_set_intr_mask(fhandler,
+						&rmi4_data->rmi_fd, intr_count);
 				break;
 #endif
 			case SYNAPTICS_RMI4_F35:
@@ -3188,16 +3513,16 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
 #ifdef F51_DISCRETE_FORCE
 			case SYNAPTICS_RMI4_F51:
 				rmi4_data->f51_query_base_addr =
-						rmi_fd.query_base_addr |
-						(page_number << 8);
+					rmi4_data->rmi_fd.query_base_addr |
+					(page_number << 8);
 				break;
 #endif
 			}
 
 			/* Accumulate the interrupt count */
-			intr_count += rmi_fd.intr_src_count;
+			intr_count += rmi4_data->rmi_fd.intr_src_count;
 
-			if (fhandler && rmi_fd.intr_src_count) {
+			if (fhandler && rmi4_data->rmi_fd.intr_src_count) {
 				list_add_tail(&fhandler->link,
 						&rmi->support_fn_list);
 			}
@@ -4114,39 +4439,47 @@ static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
 		bool enable)
 {
 	int retval;
-	unsigned char device_ctrl;
+	unsigned char *device_ctrl = NULL;
 	unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
 
+	device_ctrl = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!device_ctrl) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			rmi4_data->f01_ctrl_base_addr,
-			&device_ctrl,
-			sizeof(device_ctrl));
+			device_ctrl,
+			sizeof(*device_ctrl));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read device control\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
-	device_ctrl = device_ctrl & ~MASK_3BIT;
+	*device_ctrl = *device_ctrl & ~MASK_3BIT;
 	if (enable)
-		device_ctrl = device_ctrl | SENSOR_SLEEP;
+		*device_ctrl = *device_ctrl | SENSOR_SLEEP;
 	else
-		device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+		*device_ctrl = *device_ctrl | no_sleep_setting |
+			NORMAL_OPERATION;
 
 	retval = synaptics_rmi4_reg_write(rmi4_data,
 			rmi4_data->f01_ctrl_base_addr,
-			&device_ctrl,
-			sizeof(device_ctrl));
+			device_ctrl,
+			sizeof(*device_ctrl));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to write device control\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	rmi4_data->sensor_sleep = enable;
-
+exit:
+	kfree(device_ctrl);
 	return retval;
 }
 
@@ -4291,6 +4624,11 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
 			goto err_drm_reg;
 		}
 	}
+
+	/* Initialize secure touch */
+	synaptics_rmi4_secure_touch_init(rmi4_data);
+	synaptics_rmi4_secure_touch_stop(rmi4_data, true);
+
 	rmi4_data->rmi4_probe_wq = create_singlethread_workqueue(
 						"Synaptics_rmi4_probe_wq");
 	if (!rmi4_data->rmi4_probe_wq) {
@@ -4641,6 +4979,7 @@ static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
 
 	if (evdata && evdata->data && rmi4_data) {
 		if (event == DRM_PANEL_EARLY_EVENT_BLANK) {
+			synaptics_rmi4_secure_touch_stop(rmi4_data, false);
 			transition = *(int *)evdata->data;
 			if (transition == DRM_PANEL_BLANK_POWERDOWN) {
 				if (rmi4_data->initialized)
@@ -4679,6 +5018,13 @@ static int synaptics_rmi4_early_suspend(struct early_suspend *h)
 
 	if (rmi4_data->stay_awake)
 		return retval;
+	/*
+	 * During early suspend/late resume, the driver doesn't access xPU/SMMU
+	 * protected HW resources. So, there is no compelling need to block,
+	 * but notifying the userspace that a power event has occurred is
+	 * enough. Hence 'blocking' variable can be set to false.
+	 */
+	synaptics_rmi4_secure_touch_stop(rmi4_data, false);
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (rmi4_data->no_sleep_setting) {
@@ -4743,6 +5089,8 @@ static int synaptics_rmi4_late_resume(struct early_suspend *h)
 	if (rmi4_data->stay_awake)
 		return retval;
 
+	synaptics_rmi4_secure_touch_stop(rmi4_data, false);
+
 	if (rmi4_data->enable_wakeup_gesture) {
 		disable_irq_wake(rmi4_data->irq);
 		goto exit;
@@ -4791,6 +5139,8 @@ static int synaptics_rmi4_suspend(struct device *dev)
 	if (rmi4_data->stay_awake)
 		return 0;
 
+	synaptics_rmi4_secure_touch_stop(rmi4_data, true);
+
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (rmi4_data->no_sleep_setting) {
 			synaptics_rmi4_reg_read(rmi4_data,
@@ -4862,6 +5212,7 @@ static int synaptics_rmi4_resume(struct device *dev)
 			rmi4_data->hw_if->board_data;
 	if (rmi4_data->stay_awake)
 		return 0;
+	synaptics_rmi4_secure_touch_stop(rmi4_data, true);
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		disable_irq_wake(rmi4_data->irq);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
index 8f175e3..0a778b0 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -51,6 +51,13 @@
 
 #include <drm/drm_panel.h>
 
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#endif
+
 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
 #define KERNEL_ABOVE_2_6_38
 #endif
@@ -282,6 +289,17 @@ struct synaptics_rmi4_device_info {
 	struct list_head support_fn_list;
 };
 
+struct synaptics_rmi4_f01_device_status {
+	union {
+		struct {
+			unsigned char status_code:4;
+			unsigned char reserved:2;
+			unsigned char flash_prog:1;
+			unsigned char unconfigured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
 /*
  * struct synaptics_rmi4_data - RMI4 device instance data
  * @pdev: pointer to platform device
@@ -356,6 +374,8 @@ struct synaptics_rmi4_data {
 	const struct synaptics_dsx_hw_interface *hw_if;
 	struct synaptics_rmi4_device_info rmi4_mod_info;
 	struct synaptics_rmi4_input_settings input_settings;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_f01_device_status status;
 	struct kobject *board_prop_dir;
 	struct regulator *pwr_reg;
 	struct regulator *bus_reg;
@@ -431,6 +451,15 @@ struct synaptics_rmi4_data {
 			bool enable);
 	void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
 			struct synaptics_rmi4_fn *fhandler);
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+	atomic_t st_enabled;
+	atomic_t st_pending_irqs;
+	struct completion st_powerdown;
+	struct completion st_irq_processed;
+	bool st_initialized;
+	struct clk *core_clk;
+	struct clk *iface_clk;
+#endif
 };
 
 struct synaptics_dsx_bus_access {
@@ -439,6 +468,8 @@ struct synaptics_dsx_bus_access {
 		unsigned char *data, unsigned int length);
 	int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
 		unsigned char *data, unsigned int length);
+	int (*get)(struct synaptics_rmi4_data *rmi4_data);
+	void (*put)(struct synaptics_rmi4_data *rmi4_data);
 };
 
 struct synaptics_dsx_hw_interface {
@@ -489,6 +520,16 @@ static inline int synaptics_rmi4_reg_write(
 	return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
 }
 
+static inline int synaptics_rmi4_bus_get(struct synaptics_rmi4_data *rmi4_data)
+{
+	return rmi4_data->hw_if->bus_access->get(rmi4_data);
+}
+
+static inline void synaptics_rmi4_bus_put(struct synaptics_rmi4_data *rmi4_data)
+{
+	rmi4_data->hw_if->bus_access->put(rmi4_data);
+}
+
 static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 2372368..6e78ebb 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -1388,30 +1388,42 @@ static int fwu_parse_image_info(void)
 
 static int fwu_read_flash_status(void)
 {
-	int retval;
-	unsigned char status;
-	unsigned char command;
+	int retval = 0;
+	unsigned char *status = NULL;
+	unsigned char *command = NULL;
 	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
 
+	status = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!status) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	command = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!command) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
-			&status,
-			sizeof(status));
+			status,
+			sizeof(*status));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read flash status\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
-	fwu->in_bl_mode = status >> 7;
+	fwu->in_bl_mode = *status >> 7;
 
 	if (fwu->bl_version == BL_V5)
-		fwu->flash_status = (status >> 4) & MASK_3BIT;
+		fwu->flash_status = (*status >> 4) & MASK_3BIT;
 	else if (fwu->bl_version == BL_V6)
-		fwu->flash_status = status & MASK_3BIT;
+		fwu->flash_status = *status & MASK_3BIT;
 	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
-		fwu->flash_status = status & MASK_5BIT;
+		fwu->flash_status = *status & MASK_5BIT;
 
 	if (fwu->write_bootloader)
 		fwu->flash_status = 0x00;
@@ -1429,26 +1441,28 @@ static int fwu_read_flash_status(void)
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
-			&command,
-			sizeof(command));
+			command,
+			sizeof(*command));
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read flash command\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	if (fwu->bl_version == BL_V5)
-		fwu->command = command & MASK_4BIT;
+		fwu->command = *command & MASK_4BIT;
 	else if (fwu->bl_version == BL_V6)
-		fwu->command = command & MASK_6BIT;
+		fwu->command = *command & MASK_6BIT;
 	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
-		fwu->command = command;
+		fwu->command = *command;
 
 	if (fwu->write_bootloader)
 		fwu->command = 0x00;
-
-	return 0;
+exit:
+	kfree(status);
+	kfree(command);
+	return retval;
 }
 
 static int fwu_wait_for_idle(int timeout_ms, bool poll)
@@ -2062,10 +2076,22 @@ static int fwu_read_f34_v5v6_queries(void)
 	unsigned char count;
 	unsigned char base;
 	unsigned char offset;
-	unsigned char buf[10];
-	struct f34_v5v6_flash_properties_2 properties_2;
+	unsigned char *buf = NULL;
+	struct f34_v5v6_flash_properties_2 *properties_2 = NULL;
 	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
 
+	buf = kcalloc(10, sizeof(char), GFP_KERNEL);
+	if (!buf) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	properties_2 = kzalloc(sizeof(*properties_2), GFP_KERNEL);
+	if (!properties_2) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	base = fwu->f34_fd.query_base_addr;
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -2076,7 +2102,7 @@ static int fwu_read_f34_v5v6_queries(void)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read bootloader ID\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	if (fwu->bl_version == BL_V5) {
@@ -2103,7 +2129,7 @@ static int fwu_read_f34_v5v6_queries(void)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read block size info\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	batohs(&fwu->block_size, &(buf[0]));
@@ -2124,7 +2150,7 @@ static int fwu_read_f34_v5v6_queries(void)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read flash properties\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	count = 4;
@@ -2146,7 +2172,7 @@ static int fwu_read_f34_v5v6_queries(void)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read block count info\n",
 				__func__);
-		return retval;
+		goto exit;
 	}
 
 	batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
@@ -2178,17 +2204,17 @@ static int fwu_read_f34_v5v6_queries(void)
 	if (fwu->flash_properties.has_query4) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				base + fwu->off.properties_2,
-				properties_2.data,
-				sizeof(properties_2.data));
+				properties_2->data,
+				sizeof(properties_2->data));
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read flash properties 2\n",
 					__func__);
-			return retval;
+			goto exit;
 		}
 		offset = fwu->off.properties_2 + 1;
 		count = 0;
-		if (properties_2.has_guest_code) {
+		if (properties_2->has_guest_code) {
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					base + offset + count,
 					buf,
@@ -2197,7 +2223,7 @@ static int fwu_read_f34_v5v6_queries(void)
 				dev_err(rmi4_data->pdev->dev.parent,
 						"%s: Failed to read guest code block count\n",
 						__func__);
-				return retval;
+				goto exit;
 			}
 
 			batohs(&fwu->blkcount.guest_code, &(buf[0]));
@@ -2205,7 +2231,7 @@ static int fwu_read_f34_v5v6_queries(void)
 			fwu->has_guest_code = true;
 		}
 #ifdef SYNA_TDDI
-		if (properties_2.has_force_config) {
+		if (properties_2->has_force_config) {
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					base + offset + count,
 					buf,
@@ -2214,13 +2240,13 @@ static int fwu_read_f34_v5v6_queries(void)
 				dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read tddi force block count\n",
 					__func__);
-				return retval;
+				goto exit;
 			}
 			batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
 			count++;
 			fwu->has_force_config = true;
 		}
-		if (properties_2.has_lockdown_data) {
+		if (properties_2->has_lockdown_data) {
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					base + offset + count,
 					buf,
@@ -2229,13 +2255,13 @@ static int fwu_read_f34_v5v6_queries(void)
 				dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read tddi lockdown block count\n",
 					__func__);
-				return retval;
+				goto exit;
 			}
 			batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
 			count++;
 			fwu->has_lockdown_data = true;
 		}
-		if (properties_2.has_lcm_data) {
+		if (properties_2->has_lcm_data) {
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					base + offset + count,
 					buf,
@@ -2244,13 +2270,13 @@ static int fwu_read_f34_v5v6_queries(void)
 				dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read tddi lcm block count\n",
 					__func__);
-				return retval;
+				goto exit;
 			}
 			batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
 			count++;
 			fwu->has_lcm_data = true;
 		}
-		if (properties_2.has_oem_data) {
+		if (properties_2->has_oem_data) {
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					base + offset + count,
 					buf,
@@ -2259,7 +2285,7 @@ static int fwu_read_f34_v5v6_queries(void)
 				dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read tddi oem block count\n",
 					__func__);
-				return retval;
+				goto exit;
 			}
 			batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
 			fwu->has_oem_data = true;
@@ -2268,8 +2294,10 @@ static int fwu_read_f34_v5v6_queries(void)
 	}
 
 	fwu->has_utility_param = false;
-
-	return 0;
+exit:
+	kfree(properties_2);
+	kfree(buf);
+	return retval;
 }
 
 static int fwu_read_f34_queries(void)
@@ -2790,7 +2818,6 @@ static int fwu_scan_pdt(void)
 	bool f01found = false;
 	bool f34found = false;
 	bool f35found = false;
-	struct synaptics_rmi4_fn_desc rmi_fd;
 	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
 
 	fwu->in_ub_mode = false;
@@ -2798,38 +2825,38 @@ static int fwu_scan_pdt(void)
 	for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				addr,
-				(unsigned char *)&rmi_fd,
-				sizeof(rmi_fd));
+				(unsigned char *)&rmi4_data->rmi_fd,
+				sizeof(rmi4_data->rmi_fd));
 		if (retval < 0)
 			return retval;
 
-		if (rmi_fd.fn_number) {
+		if (rmi4_data->rmi_fd.fn_number) {
 			dev_dbg(rmi4_data->pdev->dev.parent,
 					"%s: Found F%02x\n",
-					__func__, rmi_fd.fn_number);
-			switch (rmi_fd.fn_number) {
+					__func__, rmi4_data->rmi_fd.fn_number);
+			switch (rmi4_data->rmi_fd.fn_number) {
 			case SYNAPTICS_RMI4_F01:
 				f01found = true;
 
 				rmi4_data->f01_query_base_addr =
-						rmi_fd.query_base_addr;
+					rmi4_data->rmi_fd.query_base_addr;
 				rmi4_data->f01_ctrl_base_addr =
-						rmi_fd.ctrl_base_addr;
+					rmi4_data->rmi_fd.ctrl_base_addr;
 				rmi4_data->f01_data_base_addr =
-						rmi_fd.data_base_addr;
+					rmi4_data->rmi_fd.data_base_addr;
 				rmi4_data->f01_cmd_base_addr =
-						rmi_fd.cmd_base_addr;
+					rmi4_data->rmi_fd.cmd_base_addr;
 				break;
 			case SYNAPTICS_RMI4_F34:
 				f34found = true;
 				fwu->f34_fd.query_base_addr =
-						rmi_fd.query_base_addr;
+					rmi4_data->rmi_fd.query_base_addr;
 				fwu->f34_fd.ctrl_base_addr =
-						rmi_fd.ctrl_base_addr;
+					rmi4_data->rmi_fd.ctrl_base_addr;
 				fwu->f34_fd.data_base_addr =
-						rmi_fd.data_base_addr;
+					rmi4_data->rmi_fd.data_base_addr;
 
-				switch (rmi_fd.fn_version) {
+				switch (rmi4_data->rmi_fd.fn_version) {
 				case F34_V0:
 					fwu->bl_version = BL_V5;
 					break;
@@ -2847,7 +2874,7 @@ static int fwu_scan_pdt(void)
 				}
 
 				fwu->intr_mask = 0;
-				intr_src = rmi_fd.intr_src_count;
+				intr_src = rmi4_data->rmi_fd.intr_src_count;
 				intr_off = intr_count % 8;
 				for (ii = intr_off;
 						ii < (intr_src + intr_off);
@@ -2858,20 +2885,20 @@ static int fwu_scan_pdt(void)
 			case SYNAPTICS_RMI4_F35:
 				f35found = true;
 				fwu->f35_fd.query_base_addr =
-						rmi_fd.query_base_addr;
+					rmi4_data->rmi_fd.query_base_addr;
 				fwu->f35_fd.ctrl_base_addr =
-						rmi_fd.ctrl_base_addr;
+					rmi4_data->rmi_fd.ctrl_base_addr;
 				fwu->f35_fd.data_base_addr =
-						rmi_fd.data_base_addr;
+					rmi4_data->rmi_fd.data_base_addr;
 				fwu->f35_fd.cmd_base_addr =
-						rmi_fd.cmd_base_addr;
+					rmi4_data->rmi_fd.cmd_base_addr;
 				break;
 			}
 		} else {
 			break;
 		}
 
-		intr_count += rmi_fd.intr_src_count;
+		intr_count += rmi4_data->rmi_fd.intr_src_count;
 	}
 
 	if (!f01found || !f34found) {
@@ -5568,7 +5595,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
 {
 	int retval;
 	unsigned char attr_count;
-	struct pdt_properties pdt_props;
+	struct pdt_properties *pdt_props = NULL;
 
 	if (fwu) {
 		dev_dbg(rmi4_data->pdev->dev.parent,
@@ -5577,6 +5604,12 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
 		return 0;
 	}
 
+	pdt_props = kzalloc(sizeof(*pdt_props), GFP_KERNEL);
+	if (!pdt_props) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
 	if (!fwu) {
 		dev_err(rmi4_data->pdev->dev.parent,
@@ -5599,13 +5632,13 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
 			PDT_PROPS,
-			pdt_props.data,
-			sizeof(pdt_props.data));
+			pdt_props->data,
+			sizeof(pdt_props->data));
 	if (retval < 0) {
 		dev_dbg(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read PDT properties, assuming 0x00\n",
 				__func__);
-	} else if (pdt_props.has_bsr) {
+	} else if (pdt_props->has_bsr) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Reflash for LTS not currently supported\n",
 				__func__);
@@ -5697,6 +5730,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
 	fwu = NULL;
 
 exit:
+	kfree(pdt_props);
 	return retval;
 }
 
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
index 00ea777..9b75990 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -273,11 +273,17 @@ static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
 {
 	int retval = 0;
 	unsigned char retry;
-	unsigned char buf[PAGE_SELECT_LEN];
+	unsigned char *buf = NULL;
 	unsigned char page;
 	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
 	struct i2c_msg msg[2];
 
+	buf = kcalloc(PAGE_SELECT_LEN, sizeof(char), GFP_KERNEL);
+	if (!buf) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	msg[0].addr = hw_if.board_data->i2c_addr;
 	msg[0].flags = 0;
 	msg[0].len = PAGE_SELECT_LEN;
@@ -308,6 +314,8 @@ static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
 		retval = PAGE_SELECT_LEN;
 	}
 
+exit:
+	kfree(buf);
 	return retval;
 }
 
@@ -316,7 +324,7 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
 {
 	int retval = 0;
 	unsigned char retry;
-	unsigned char buf;
+	unsigned char *buf = NULL;
 	unsigned char index = 0;
 	unsigned char xfer_msgs;
 	unsigned char remaining_msgs;
@@ -329,6 +337,12 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
 
 	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
 
+	buf = kcalloc(1, sizeof(char), GFP_KERNEL);
+	if (!buf) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
 	if (retval != PAGE_SELECT_LEN) {
 		retval = -EIO;
@@ -338,13 +352,13 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
 	msg[0].addr = hw_if.board_data->i2c_addr;
 	msg[0].flags = 0;
 	msg[0].len = 1;
-	msg[0].buf = &buf;
+	msg[0].buf = buf;
 	msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
 	msg[rd_msgs].flags = I2C_M_RD;
 	msg[rd_msgs].len = (unsigned short)remaining_length;
 	msg[rd_msgs].buf = &data[data_offset];
 
-	buf = addr & MASK_8BIT;
+	buf[0] = addr & MASK_8BIT;
 
 	remaining_msgs = rd_msgs + 1;
 
@@ -383,8 +397,8 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
 	retval = length;
 
 exit:
+	kfree(buf);
 	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
-
 	return retval;
 }
 
@@ -504,10 +518,73 @@ static int check_default_tp(struct device_node *dt, const char *prop)
 	return ret;
 }
 
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static int synaptics_rmi4_clk_prepare_enable(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+	int ret;
+
+	ret = clk_prepare_enable(rmi4_data->iface_clk);
+	if (ret) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"error on clk_prepare_enable(iface_clk):%d\n",
+				ret);
+		return ret;
+	}
+	ret = clk_prepare_enable(rmi4_data->core_clk);
+	if (ret) {
+		clk_disable_unprepare(rmi4_data->iface_clk);
+		dev_err(rmi4_data->pdev->dev.parent,
+				"error clk_prepare_enable(core_clk):%d\n", ret);
+	}
+	return ret;
+}
+
+static void synaptics_rmi4_clk_disable_unprepare(
+		struct synaptics_rmi4_data *rmi4_data)
+{
+	clk_disable_unprepare(rmi4_data->core_clk);
+	clk_disable_unprepare(rmi4_data->iface_clk);
+}
+
+static int synaptics_rmi4_i2c_get(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+	retval = pm_runtime_get_sync(i2c->adapter->dev.parent);
+	if (retval >= 0 && rmi4_data->core_clk != NULL &&
+			rmi4_data->iface_clk != NULL) {
+		retval = synaptics_rmi4_clk_prepare_enable(rmi4_data);
+		if (retval)
+			pm_runtime_put_sync(i2c->adapter->dev.parent);
+	}
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static void synaptics_rmi4_i2c_put(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+	if (rmi4_data->core_clk != NULL && rmi4_data->iface_clk != NULL)
+		synaptics_rmi4_clk_disable_unprepare(rmi4_data);
+	pm_runtime_put_sync(i2c->adapter->dev.parent);
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+}
+#endif
+
 static struct synaptics_dsx_bus_access bus_access = {
 	.type = BUS_I2C,
 	.read = synaptics_rmi4_i2c_read,
 	.write = synaptics_rmi4_i2c_write,
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+	.get = synaptics_rmi4_i2c_get,
+	.put = synaptics_rmi4_i2c_put,
+#endif
 };
 
 static void synaptics_rmi4_i2c_dev_release(struct device *dev)
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
index c17b692..3a0be3c 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -4673,25 +4673,31 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 	unsigned char ii;
 	unsigned char rx_electrodes;
 	unsigned char tx_electrodes;
-	struct f55_control_43 ctrl_43;
+	struct f55_control_43 *ctrl_43 = NULL;
+
+	ctrl_43 = kzalloc(sizeof(*ctrl_43), GFP_KERNEL);
+	if (!ctrl_43) {
+		retval = -ENOMEM;
+		goto exit;
+	}
 
 	retval = test_f55_set_queries();
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read F55 query registers\n",
 				__func__);
-		return;
+		goto exit;
 	}
 
 	if (!f55->query.has_sensor_assignment)
-		return;
+		goto exit;
 
 	retval = test_f55_set_controls();
 	if (retval < 0) {
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to set up F55 control registers\n",
 				__func__);
-		return;
+		goto exit;
 	}
 
 	tx_electrodes = f55->query.num_of_tx_electrodes;
@@ -4708,7 +4714,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read F55 tx assignment\n",
 				__func__);
-		return;
+		goto exit;
 	}
 
 	retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -4719,7 +4725,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 		dev_err(rmi4_data->pdev->dev.parent,
 				"%s: Failed to read F55 rx assignment\n",
 				__func__);
-		return;
+		goto exit;
 	}
 
 	f54->tx_assigned = 0;
@@ -4742,17 +4748,17 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 	if (f55->extended_amp) {
 		retval = synaptics_rmi4_reg_read(rmi4_data,
 				f55->control_base_addr + f55->afe_mux_offset,
-				ctrl_43.data,
-				sizeof(ctrl_43.data));
+				ctrl_43->data,
+				sizeof(ctrl_43->data));
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read F55 AFE mux sizes\n",
 					__func__);
-			return;
+			goto exit;
 		}
 
-		f54->tx_assigned = ctrl_43.afe_l_mux_size +
-				ctrl_43.afe_r_mux_size;
+		f54->tx_assigned = ctrl_43->afe_l_mux_size +
+				ctrl_43->afe_r_mux_size;
 	}
 
 	/* force mapping */
@@ -4768,7 +4774,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read F55 force tx assignment\n",
 					__func__);
-			return;
+			goto exit;
 		}
 
 		retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -4779,7 +4785,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to read F55 force rx assignment\n",
 					__func__);
-			return;
+			goto exit;
 		}
 
 		for (ii = 0; ii < tx_electrodes; ii++) {
@@ -4792,6 +4798,10 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
 				f54->rx_assigned++;
 		}
 	}
+
+exit:
+	kfree(ctrl_43);
+	return;
 }
 
 static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
@@ -4981,7 +4991,6 @@ static int test_scan_pdt(void)
 	unsigned short addr;
 	bool f54found = false;
 	bool f55found = false;
-	struct synaptics_rmi4_fn_desc rmi_fd;
 	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
 
 	for (page = 0; page < PAGES_TO_SERVICE; page++) {
@@ -4990,30 +4999,31 @@ static int test_scan_pdt(void)
 
 			retval = synaptics_rmi4_reg_read(rmi4_data,
 					addr,
-					(unsigned char *)&rmi_fd,
-					sizeof(rmi_fd));
+					(unsigned char *)&rmi4_data->rmi_fd,
+					sizeof(rmi4_data->rmi_fd));
 			if (retval < 0)
 				return retval;
 
 			addr &= ~(MASK_8BIT << 8);
 
-			if (!rmi_fd.fn_number)
+			if (!rmi4_data->rmi_fd.fn_number)
 				break;
 
-			switch (rmi_fd.fn_number) {
+			switch (rmi4_data->rmi_fd.fn_number) {
 			case SYNAPTICS_RMI4_F54:
 				test_f54_set_regs(rmi4_data,
-						&rmi_fd, intr_count, page);
+						&rmi4_data->rmi_fd, intr_count,
+						page);
 				f54found = true;
 				break;
 			case SYNAPTICS_RMI4_F55:
 				test_f55_set_regs(rmi4_data,
-						&rmi_fd, page);
+						&rmi4_data->rmi_fd, page);
 				f55found = true;
 				break;
 			case SYNAPTICS_RMI4_F21:
 				test_f21_set_regs(rmi4_data,
-						&rmi_fd, page);
+						&rmi4_data->rmi_fd, page);
 				break;
 			default:
 				break;
@@ -5022,7 +5032,7 @@ static int test_scan_pdt(void)
 			if (f54found && f55found)
 				goto pdt_done;
 
-			intr_count += rmi_fd.intr_src_count;
+			intr_count += rmi4_data->rmi_fd.intr_src_count;
 		}
 	}
 
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 56df94f..01d5f08 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -415,6 +415,16 @@
 
 	  If unsure, say N here.
 
+config ARM_SMMU_TESTBUS_DUMP
+	bool "ARM SMMU testbus dump"
+	depends on ARM_SMMU
+	help
+	  Enables testbus dump collection on arm smmu right after TLB
+	  sync timeout failure.
+	  Note to use this only on debug builds.
+
+	  If unsure, say N here.
+
 config QCOM_LAZY_MAPPING
 	bool "Reference counted iommu-mapping support"
 	depends on ION
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index e34995b..2d4b320 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -14,10 +14,10 @@
 obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
 obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-debug.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8d9920f..1f2ed44 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1153,6 +1153,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
 	iommu_completion_wait(iommu);
 }
 
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+	struct iommu_cmd cmd;
+
+	build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+			      dom_id, 1);
+	iommu_queue_command(iommu, &cmd);
+
+	iommu_completion_wait(iommu);
+}
+
 static void amd_iommu_flush_all(struct amd_iommu *iommu)
 {
 	struct iommu_cmd cmd;
@@ -1329,18 +1340,21 @@ static void domain_flush_devices(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
 				   gfp_t gfp)
 {
+	unsigned long flags;
 	u64 *pte;
 
-	if (domain->mode == PAGE_MODE_6_LEVEL)
+	spin_lock_irqsave(&domain->lock, flags);
+
+	if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
 		/* address space already 64 bit large */
-		return false;
+		goto out;
 
 	pte = (void *)get_zeroed_page(gfp);
 	if (!pte)
-		return false;
+		goto out;
 
 	*pte             = PM_LEVEL_PDE(domain->mode,
 					iommu_virt_to_phys(domain->pt_root));
@@ -1348,7 +1362,10 @@ static bool increase_address_space(struct protection_domain *domain,
 	domain->mode    += 1;
 	domain->updated  = true;
 
-	return true;
+out:
+	spin_unlock_irqrestore(&domain->lock, flags);
+
+	return;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,
@@ -1838,6 +1855,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
 {
 	u64 pte_root = 0;
 	u64 flags = 0;
+	u32 old_domid;
 
 	if (domain->mode != PAGE_MODE_NONE)
 		pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1887,8 +1905,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
 	flags &= ~DEV_DOMID_MASK;
 	flags |= domain->id;
 
+	old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
 	amd_iommu_dev_table[devid].data[1]  = flags;
 	amd_iommu_dev_table[devid].data[0]  = pte_root;
+
+	/*
+	 * A kdump kernel might be replacing a domain ID that was copied from
+	 * the previous kernel--if so, it needs to flush the translation cache
+	 * entries for the old domain ID that is being overwritten
+	 */
+	if (old_domid) {
+		struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+		amd_iommu_flush_tlb_domid(iommu, old_domid);
+	}
 }
 
 static void clear_dte_entry(u16 devid)
@@ -2533,7 +2563,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
 			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
 			phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
-			ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+			ret = iommu_map_page(domain, bus_addr, phys_addr,
+					     PAGE_SIZE, prot,
+					     GFP_ATOMIC | __GFP_NOWARN);
 			if (ret)
 				goto out_unmap;
 
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
new file mode 100644
index 0000000..12d540d
--- /dev/null
+++ b/drivers/iommu/amd_iommu.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3a1d303..1e9a5da 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -39,6 +39,7 @@
 #include <asm/irq_remapping.h>
 
 #include <linux/crash_dump.h>
+#include "amd_iommu.h"
 #include "amd_iommu_proto.h"
 #include "amd_iommu_types.h"
 #include "irq_remapping.h"
@@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
 	set_iommu_for_device(iommu, devid);
 }
 
-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 {
 	struct devid_map *entry;
 	struct list_head *list;
@@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
 	if (ret)
 		return ret;
 
+	amd_iommu_apply_ivrs_quirks();
+
 	/*
 	 * First save the recommended feature enable bits from ACPI
 	 */
@@ -1710,7 +1713,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
 	NULL,
 };
 
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
 	int cap_ptr = iommu->cap_ptr;
 	u32 range, misc, low, high;
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
new file mode 100644
index 0000000..c235f79
--- /dev/null
+++ b/drivers/iommu/amd_iommu_quirks.c
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC		1
+
+struct ivrs_quirk_entry {
+	u8 id;
+	u16 devid;
+};
+
+enum {
+	DELL_INSPIRON_7375 = 0,
+	DELL_LATITUDE_5495,
+	LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+	/* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+	[DELL_INSPIRON_7375] = {
+		{ .id = 4, .devid = 0xa0 },
+		{ .id = 5, .devid = 0x2 },
+		{}
+	},
+	/* ivrs_ioapic[4]=00:14.0 */
+	[DELL_LATITUDE_5495] = {
+		{ .id = 4, .devid = 0xa0 },
+		{}
+	},
+	/* ivrs_ioapic[32]=00:14.0 */
+	[LENOVO_IDEAPAD_330S_15ARR] = {
+		{ .id = 32, .devid = 0xa0 },
+		{}
+	},
+	{}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+	const struct ivrs_quirk_entry *i;
+
+	for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+		add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+
+	return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+	{
+		.callback = ivrs_ioapic_quirk_cb,
+		.ident = "Dell Inspiron 7375",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+		},
+		.driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+	},
+	{
+		.callback = ivrs_ioapic_quirk_cb,
+		.ident = "Dell Latitude 5495",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+		},
+		.driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+	},
+	{
+		.callback = ivrs_ioapic_quirk_cb,
+		.ident = "Lenovo ideapad 330S-15ARR",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+		},
+		.driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+	},
+	{}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+	dmi_check_system(ivrs_quirks);
+}
+#endif
diff --git a/drivers/iommu/arm-smmu-debug.c b/drivers/iommu/arm-smmu-debug.c
new file mode 100644
index 0000000..37bff7f
--- /dev/null
+++ b/drivers/iommu/arm-smmu-debug.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include "arm-smmu-regs.h"
+#include "arm-smmu-debug.h"
+
+u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+		void __iomem *tcu_base, u32 testbus_version,
+		bool write, u32 val)
+{
+	void __iomem *base;
+	int offset;
+
+	if (testbus_version == 1) {
+		base = tcu_base;
+		offset = ARM_SMMU_TESTBUS_SEL_HLOS1_NS;
+	} else {
+		base = tbu_base;
+		offset = DEBUG_TESTBUS_SEL_TBU;
+	}
+
+	if (write) {
+		writel_relaxed(val, base + offset);
+		/* Make sure tbu select register is written to */
+		wmb();
+	} else {
+		return readl_relaxed(base + offset);
+	}
+	return 0;
+}
+
+u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base,
+			u32 testbus_version)
+{
+	int offset = (testbus_version == 1) ?
+			CLIENT_DEBUG_SR_HALT_ACK : DEBUG_TESTBUS_TBU;
+
+	return readl_relaxed(tbu_base + offset);
+}
+
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val)
+{
+	int offset;
+
+	if (testbus == CLK_TESTBUS) {
+		base = tcu_base;
+		offset = ARM_SMMU_TESTBUS_SEL_HLOS1_NS;
+	} else {
+		offset = ARM_SMMU_TESTBUS_SEL;
+	}
+
+	if (write) {
+		writel_relaxed(val, base + offset);
+		/* Make sure tcu select register is written to */
+		wmb();
+	} else {
+		return readl_relaxed(base + offset);
+	}
+
+	return 0;
+}
+
+u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base)
+{
+	return readl_relaxed(base + ARM_SMMU_TESTBUS);
+}
+
+static void arm_smmu_debug_dump_tbu_qns4_testbus(struct device *dev,
+			void __iomem *tbu_base, void __iomem *tcu_base,
+			u32  testbus_version)
+{
+	int i;
+	u32 reg;
+
+	for (i = 0 ; i < TBU_QNS4_BRIDGE_SIZE; ++i) {
+		reg = arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+				testbus_version, READ, 0);
+		reg = (reg & ~GENMASK(4, 0)) | i << 0;
+		arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+				testbus_version, WRITE, reg);
+		dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+				testbus_version, READ, 0), i,
+			arm_smmu_debug_tbu_testbus_output(tbu_base,
+							testbus_version));
+	}
+}
+
+static void arm_smmu_debug_program_tbu_testbus(void __iomem *tbu_base,
+				void __iomem *tcu_base, u32 testbus_version,
+				int tbu_testbus)
+{
+	u32 reg;
+
+	reg = arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+						testbus_version, READ, 0);
+	if (testbus_version == 1)
+		reg = (reg & ~GENMASK(9, 0));
+	else
+		reg = (reg & ~GENMASK(7, 0));
+
+	reg |= tbu_testbus;
+	arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+						testbus_version, WRITE, reg);
+}
+
+void arm_smmu_debug_dump_tbu_testbus(struct device *dev, void __iomem *tbu_base,
+		void __iomem *tcu_base, int tbu_testbus_sel,
+		u32 testbus_version)
+{
+	if (tbu_testbus_sel & TBU_CLK_GATE_CONTROLLER_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TBU clk gate controller:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base, tcu_base,
+				testbus_version,
+				TBU_CLK_GATE_CONTROLLER_TESTBUS);
+		dev_info(dev, "testbus_sel: 0x%lx val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+						testbus_version, READ, 0),
+			arm_smmu_debug_tbu_testbus_output(tbu_base,
+							testbus_version));
+	}
+
+	if (tbu_testbus_sel & TBU_QNS4_A2Q_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TBU qns4 a2q test bus:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base, tcu_base,
+				testbus_version, TBU_QNS4_A2Q_TESTBUS);
+		arm_smmu_debug_dump_tbu_qns4_testbus(dev, tbu_base,
+				tcu_base, testbus_version);
+	}
+
+	if (tbu_testbus_sel & TBU_QNS4_Q2A_TESTBUS_SEL) {
+		dev_info(dev, "Dumping qns4 q2a test bus:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base, tcu_base,
+				testbus_version, TBU_QNS4_Q2A_TESTBUS);
+		arm_smmu_debug_dump_tbu_qns4_testbus(dev, tbu_base,
+				tcu_base, testbus_version);
+	}
+
+	if (tbu_testbus_sel & TBU_MULTIMASTER_QCHANNEL_TESTBUS_SEL) {
+		dev_info(dev, "Dumping multi master qchannel:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base, tcu_base,
+				testbus_version,
+				TBU_MULTIMASTER_QCHANNEL_TESTBUS);
+		dev_info(dev, "testbus_sel: 0x%lx val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base,
+				tcu_base, testbus_version, READ, 0),
+			arm_smmu_debug_tbu_testbus_output(tbu_base,
+							testbus_version));
+	}
+}
+
+static void arm_smmu_debug_program_tcu_testbus(struct device *dev,
+		void __iomem *base, void __iomem *tcu_base,
+		unsigned long mask, int start, int end, int shift,
+		bool print)
+{
+	u32 reg;
+	int i;
+
+	for (i = start; i < end; i++) {
+		reg = arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+				PTW_AND_CACHE_TESTBUS, READ, 0);
+		reg &= mask;
+		reg |= i << shift;
+		arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+				PTW_AND_CACHE_TESTBUS, WRITE, reg);
+		if (print)
+			dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%lx\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+				 tcu_base, PTW_AND_CACHE_TESTBUS, READ, 0),
+				 i, arm_smmu_debug_tcu_testbus_output(base));
+	}
+}
+
+void arm_smmu_debug_dump_tcu_testbus(struct device *dev, void __iomem *base,
+			void __iomem *tcu_base, int tcu_testbus_sel)
+{
+	int i;
+
+	if (tcu_testbus_sel & TCU_CACHE_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TCU cache testbus:\n");
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				TCU_CACHE_TESTBUS, 0, 1, 0, false);
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				~GENMASK(7, 0), 0, TCU_CACHE_LOOKUP_QUEUE_SIZE,
+				2, true);
+	}
+
+	if (tcu_testbus_sel & TCU_PTW_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TCU PTW test bus:\n");
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base, 1,
+				TCU_PTW_TESTBUS, TCU_PTW_TESTBUS + 1, 0, false);
+
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				~GENMASK(7, 2), 0, TCU_PTW_INTERNAL_STATES,
+				2, true);
+
+		for (i = TCU_PTW_QUEUE_START;
+			i < TCU_PTW_QUEUE_START + TCU_PTW_QUEUE_SIZE; ++i) {
+			arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+					~GENMASK(7, 0), i, i + 1, 2, true);
+			arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+					~GENMASK(1, 0), TCU_PTW_TESTBUS_SEL2,
+					TCU_PTW_TESTBUS_SEL2 + 1, 0, false);
+			dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%lx\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+				 tcu_base, PTW_AND_CACHE_TESTBUS, READ, 0),
+				 i, arm_smmu_debug_tcu_testbus_output(base));
+		}
+	}
+
+	/* program ARM_SMMU_TESTBUS_SEL_HLOS1_NS to select TCU clk testbus*/
+	arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+			CLK_TESTBUS, WRITE, TCU_CLK_TESTBUS_SEL);
+	dev_info(dev, "Programming Tcu clk gate controller: testbus_sel: 0x%lx\n",
+		arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+						CLK_TESTBUS, READ, 0));
+}
+
+void arm_smmu_debug_set_tnx_tcr_cntl(void __iomem *tbu_base, u64 val)
+{
+	writel_relaxed(val, tbu_base + ARM_SMMU_TNX_TCR_CNTL);
+}
+
+unsigned long arm_smmu_debug_get_tnx_tcr_cntl(void __iomem *tbu_base)
+{
+	return readl_relaxed(tbu_base + ARM_SMMU_TNX_TCR_CNTL);
+}
+
+void arm_smmu_debug_set_mask_and_match(void __iomem *tbu_base, u64 sel,
+					u64 mask, u64 match)
+{
+	writeq_relaxed(mask, tbu_base + ARM_SMMU_CAPTURE1_MASK(sel));
+	writeq_relaxed(match, tbu_base + ARM_SMMU_CAPTURE1_MATCH(sel));
+}
+
+void arm_smmu_debug_get_mask_and_match(void __iomem *tbu_base, u64 *mask,
+					u64 *match)
+{
+	int i;
+
+	for (i = 0; i < NO_OF_MASK_AND_MATCH; ++i) {
+		mask[i] = readq_relaxed(tbu_base +
+				ARM_SMMU_CAPTURE1_MASK(i+1));
+		match[i] = readq_relaxed(tbu_base +
+				ARM_SMMU_CAPTURE1_MATCH(i+1));
+	}
+}
+
+void arm_smmu_debug_get_capture_snapshot(void __iomem *tbu_base,
+		u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT])
+{
+	int valid, i, j;
+
+	valid = readl_relaxed(tbu_base + APPS_SMMU_TNX_TCR_CNTL_2);
+
+	for (i = 0; i < NO_OF_CAPTURE_POINTS ; ++i) {
+		if (valid & (1 << i))
+			for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j)
+				snapshot[i][j] = readq_relaxed(tbu_base +
+					ARM_SMMU_CAPTURE_SNAPSHOT(i, j));
+		else
+			for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j)
+				snapshot[i][j] = 0xdededede;
+	}
+}
+
+void arm_smmu_debug_clear_intr_and_validbits(void __iomem *tbu_base)
+{
+	int val = 0;
+
+	val |= INTR_CLR;
+	val |= RESET_VALID;
+	writel_relaxed(val, tbu_base + ARM_SMMU_TNX_TCR_CNTL);
+}
diff --git a/drivers/iommu/arm-smmu-debug.h b/drivers/iommu/arm-smmu-debug.h
new file mode 100644
index 0000000..c2dc6d9
--- /dev/null
+++ b/drivers/iommu/arm-smmu-debug.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define ARM_SMMU_TESTBUS_SEL			0x25E4
+#define ARM_SMMU_TESTBUS			0x25E8
+#define ARM_SMMU_TESTBUS_SEL_HLOS1_NS		0x8
+#define DEBUG_TESTBUS_SEL_TBU			0x50
+#define DEBUG_TESTBUS_TBU			0x58
+#define CLIENT_DEBUG_SR_HALT_ACK		0x24
+
+#define TCU_PTW_TESTBUS				(0x1 << 8)
+#define TCU_CACHE_TESTBUS			~TCU_PTW_TESTBUS
+#define TCU_PTW_TESTBUS_SEL			(0x1 << 1)
+#define TCU_PTW_INTERNAL_STATES			3
+#define TCU_PTW_TESTBUS_SEL2			3
+#define TCU_PTW_QUEUE_START			32
+#define TCU_PTW_QUEUE_SIZE			32
+#define TCU_CACHE_TESTBUS_SEL			0x1
+#define TCU_CACHE_LOOKUP_QUEUE_SIZE		32
+#define TCU_CLK_TESTBUS_SEL			0x200
+
+#define TBU_CLK_GATE_CONTROLLER_TESTBUS_SEL	0x1
+#define TBU_QNS4_A2Q_TESTBUS_SEL		(0x1 << 1)
+#define TBU_QNS4_Q2A_TESTBUS_SEL		(0x1 << 2)
+#define TBU_MULTIMASTER_QCHANNEL_TESTBUS_SEL	(0x1 << 3)
+#define TBU_CLK_GATE_CONTROLLER_TESTBUS		(0x1 << 6)
+#define TBU_QNS4_A2Q_TESTBUS			(0x2 << 6)
+#define TBU_QNS4_Q2A_TESTBUS			(0x5 << 5)
+#define TBU_MULTIMASTER_QCHANNEL_TESTBUS	(0x3 << 6)
+#define TBU_QNS4_BRIDGE_SIZE			32
+
+enum tcu_testbus {
+	PTW_AND_CACHE_TESTBUS,
+	CLK_TESTBUS,
+};
+
+enum testbus_sel {
+	SEL_TCU,
+	SEL_TBU,
+};
+
+enum testbus_ops {
+	TESTBUS_SELECT,
+	TESTBUS_OUTPUT,
+};
+
+#define ARM_SMMU_TNX_TCR_CNTL		0x130
+#define ARM_SMMU_CAPTURE1_MASK(i)	(0x100 + (0x8)*(i-1))
+#define ARM_SMMU_CAPTURE1_MATCH(i)	(0x118 + (0x8)*(i-1))
+#define ARM_SMMU_CAPTURE_SNAPSHOT(i, j)	((0x138 + (0x10)*i) + j*0x8)
+#define APPS_SMMU_TNX_TCR_CNTL_2	0x178
+
+#define NO_OF_MASK_AND_MATCH		0x3
+#define NO_OF_CAPTURE_POINTS		0x4
+#define REGS_PER_CAPTURE_POINT		0x2
+#define INTR_CLR			(1 << 0)
+#define RESET_VALID			(1 << 7)
+
+#ifdef CONFIG_ARM_SMMU
+
+u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+		void __iomem *tcu_base, u32 testbus_version,
+		bool write, u32 reg);
+u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base,
+					u32 testbus_version);
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val);
+u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base);
+void arm_smmu_debug_dump_tbu_testbus(struct device *dev, void __iomem *tbu_base,
+		void __iomem *tcu_base, int tbu_testbus_sel,
+		u32 testbus_version);
+void arm_smmu_debug_dump_tcu_testbus(struct device *dev, void __iomem *base,
+			void __iomem *tcu_base, int tcu_testbus_sel);
+void arm_smmu_debug_set_tnx_tcr_cntl(void __iomem *tbu_base, u64 val);
+unsigned long arm_smmu_debug_get_tnx_tcr_cntl(void __iomem *tbu_base);
+unsigned long arm_smmu_debug_get_tnx_tcr_cntl_2(void __iomem *tbu_base);
+void arm_smmu_debug_set_mask_and_match(void __iomem *tbu_base, u64 sel,
+					u64 mask, u64 match);
+void arm_smmu_debug_get_mask_and_match(void __iomem *tbu_base,
+					u64 *mask, u64 *match);
+void arm_smmu_debug_get_capture_snapshot(void __iomem *tbu_base,
+		u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT]);
+void arm_smmu_debug_clear_intr_and_validbits(void __iomem *tbu_base);
+#else
+static inline u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+		void __iomem *tcu_base,	u32 testbus_version, bool write,
+		u32 val)
+{
+}
+static inline u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base,
+						u32 testbus_version)
+{
+}
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val)
+{
+}
+static inline u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base)
+{
+}
+static inline void arm_smmu_debug_dump_tbu_testbus(struct device *dev,
+			void __iomem *tbu_base, void __iomem *tcu_base,
+			int tbu_testbus_sel, u32 testbus_version)
+{
+}
+static inline void arm_smmu_debug_dump_tcu_testbus(struct device *dev,
+			void __iomem *base, void __iomem *tcu_base,
+			int tcu_testbus_sel)
+{
+}
+void arm_smmu_debug_set_tnx_tcr_cntl(void __iomem *tbu_base, u64 val)
+{
+}
+unsigned long arm_smmu_debug_get_tnx_tcr_cntl(void __iomem *tbu_base)
+{
+}
+unsigned long arm_smmu_debug_get_tnx_tcr_cntl_2(void __iomem *tbu_base)
+{
+}
+void arm_smmu_debug_set_mask_and_match(void __iomem *tbu_base, u64 sel,
+					u64 mask, u64 match)
+{
+}
+void arm_smmu_debug_get_mask_and_match(void __iomem *tbu_base,
+					u64 *mask, u64 *match)
+{
+}
+void arm_smmu_debug_get_capture_snapshot(void __iomem *tbu_base,
+		u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT])
+{
+}
+void arm_smmu_debug_clear_intr_and_validbits(void __iomem *tbu_base)
+{
+}
+#endif
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index c2dffa7..0d9688a 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -192,8 +192,21 @@ enum arm_smmu_s2cr_privcfg {
 #define ARM_SMMU_CB_ATS1PR		0x800
 #define ARM_SMMU_CB_ATSR		0x8f0
 #define ARM_SMMU_STATS_SYNC_INV_TBU_ACK 0x25dc
+#define TBU_SYNC_ACK_MASK		0x1ff
+#define TBU_SYNC_ACK_SHIFT		17
+#define TBU_SYNC_REQ_MASK		0x1
+#define TBU_SYNC_REQ_SHIFT		16
+#define TBU_INV_ACK_MASK		0x1ff
+#define TBU_INV_ACK_SHIFT		1
+#define TBU_INV_REQ_MASK		0x1
+#define TBU_INV_REQ_SHIFT		0
 #define ARM_SMMU_TBU_PWR_STATUS         0x2204
 #define ARM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR 0x2670
+#define TCU_SYNC_IN_PRGSS_MASK		0x1
+#define TCU_SYNC_IN_PRGSS_SHIFT		20
+#define TCU_INV_IN_PRGSS_MASK		0x1
+#define TCU_INV_IN_PRGSS_SHIFT		16
+#define TBUID_SHIFT			10
 
 #define SCTLR_MEM_ATTR_SHIFT		16
 #define SCTLR_SHCFG_SHIFT		22
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fb793de..8456e14 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -66,6 +66,9 @@
 #include <asm/dma-iommu.h>
 #include "io-pgtable.h"
 #include "arm-smmu-regs.h"
+#include "arm-smmu-debug.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
 
 /*
  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -345,6 +348,18 @@ struct arm_smmu_cfg {
 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + \
 							(cfg)->cbndx + 1)
 
+#define TCU_TESTBUS_SEL_ALL 0x3
+#define TBU_TESTBUS_SEL_ALL 0xf
+
+static int tbu_testbus_sel = TBU_TESTBUS_SEL_ALL;
+static int tcu_testbus_sel = TCU_TESTBUS_SEL_ALL;
+static struct dentry *debugfs_testbus_dir;
+static DEFINE_SPINLOCK(testbus_lock);
+static struct dentry *debugfs_capturebus_dir;
+
+module_param_named(tcu_testbus_sel, tcu_testbus_sel, int, 0644);
+module_param_named(tbu_testbus_sel, tbu_testbus_sel, int, 0644);
+
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
 	ARM_SMMU_DOMAIN_S2,
@@ -386,6 +401,40 @@ struct arm_smmu_option_prop {
 	const char *prop;
 };
 
+struct actlr_setting {
+	struct arm_smmu_smr smr;
+	u32 actlr;
+};
+
+struct qsmmuv500_archdata {
+	struct list_head		tbus;
+	void __iomem			*tcu_base;
+	u32				version;
+	struct actlr_setting		*actlrs;
+	u32				actlr_tbl_size;
+	u32				testbus_version;
+};
+
+#define get_qsmmuv500_archdata(smmu)				\
+	((struct qsmmuv500_archdata *)(smmu->archdata))
+
+struct qsmmuv500_tbu_device {
+	struct list_head		list;
+	struct device			*dev;
+	struct arm_smmu_device		*smmu;
+	void __iomem			*base;
+	void __iomem			*status_reg;
+
+	struct arm_smmu_power_resources *pwr;
+	u32				sid_start;
+	u32				num_sids;
+
+	/* Protects halt count */
+	spinlock_t			halt_lock;
+	u32				halt_count;
+	unsigned int			*irqs;
+};
+
 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
 
 static bool using_legacy_binding, using_generic_binding;
@@ -1104,12 +1153,133 @@ static void arm_smmu_domain_power_off(struct iommu_domain *domain,
 	arm_smmu_power_off(smmu->pwr);
 }
 
+static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
+	struct arm_smmu_device *smmu, u32 sid)
+{
+	struct qsmmuv500_tbu_device *tbu = NULL;
+	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+
+	list_for_each_entry(tbu, &data->tbus, list) {
+		if (tbu->sid_start <= sid &&
+		    sid < tbu->sid_start + tbu->num_sids)
+			return tbu;
+	}
+	return NULL;
+}
+
+static void arm_smmu_testbus_dump(struct arm_smmu_device *smmu, u16 sid)
+{
+	if (smmu->model == QCOM_SMMUV500 &&
+	    IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS_DUMP)) {
+		struct qsmmuv500_archdata *data;
+		struct qsmmuv500_tbu_device *tbu;
+
+		data = smmu->archdata;
+		tbu = qsmmuv500_find_tbu(smmu, sid);
+		spin_lock(&testbus_lock);
+		if (tbu)
+			arm_smmu_debug_dump_tbu_testbus(tbu->dev,
+							tbu->base,
+							data->tcu_base,
+							tbu_testbus_sel,
+							data->testbus_version);
+		else
+			arm_smmu_debug_dump_tcu_testbus(smmu->dev,
+							ARM_SMMU_GR0(smmu),
+							data->tcu_base,
+							tcu_testbus_sel);
+		spin_unlock(&testbus_lock);
+	}
+}
+
+static void __arm_smmu_tlb_sync_timeout(struct arm_smmu_device *smmu)
+{
+	u32 sync_inv_ack, tbu_pwr_status, sync_inv_progress;
+	u32 tbu_inv_pending = 0, tbu_sync_pending = 0;
+	u32 tbu_inv_acked = 0, tbu_sync_acked = 0;
+	u32 tcu_inv_pending = 0, tcu_sync_pending = 0;
+	u32 tbu_ids = 0;
+	phys_addr_t base_phys = smmu->phys_addr;
+
+
+	static DEFINE_RATELIMIT_STATE(_rs,
+				      DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+
+	sync_inv_ack = scm_io_read(base_phys + ARM_SMMU_STATS_SYNC_INV_TBU_ACK);
+	tbu_pwr_status = scm_io_read(base_phys + ARM_SMMU_TBU_PWR_STATUS);
+	sync_inv_progress = scm_io_read(base_phys +
+					ARM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR);
+
+	if (sync_inv_ack) {
+		tbu_inv_pending = (sync_inv_ack >> TBU_INV_REQ_SHIFT) &
+			TBU_INV_REQ_MASK;
+		tbu_inv_acked = (sync_inv_ack >> TBU_INV_ACK_SHIFT) &
+			TBU_INV_ACK_MASK;
+		tbu_sync_pending = (sync_inv_ack >> TBU_SYNC_REQ_SHIFT) &
+			TBU_SYNC_REQ_MASK;
+		tbu_sync_acked = (sync_inv_ack >> TBU_SYNC_ACK_SHIFT) &
+			TBU_SYNC_ACK_MASK;
+	}
+
+	if (tbu_pwr_status) {
+		if (tbu_sync_pending)
+			tbu_ids = tbu_pwr_status & ~tbu_sync_acked;
+		else if (tbu_inv_pending)
+			tbu_ids = tbu_pwr_status & ~tbu_inv_acked;
+	}
+
+	tcu_inv_pending = (sync_inv_progress >> TCU_INV_IN_PRGSS_SHIFT) &
+		TCU_INV_IN_PRGSS_MASK;
+	tcu_sync_pending = (sync_inv_progress >> TCU_SYNC_IN_PRGSS_SHIFT) &
+		TCU_SYNC_IN_PRGSS_MASK;
+
+	if (__ratelimit(&_rs)) {
+		unsigned long tbu_id, tbus_t = tbu_ids;
+
+		dev_err(smmu->dev,
+			"TLB sync timed out -- SMMU may be deadlocked\n"
+			"TBU ACK 0x%x TBU PWR 0x%x TCU sync_inv 0x%x\n",
+			sync_inv_ack, tbu_pwr_status, sync_inv_progress);
+		dev_err(smmu->dev,
+			"TCU invalidation %s, TCU sync %s\n",
+			tcu_inv_pending?"pending":"completed",
+			tcu_sync_pending?"pending":"completed");
+
+		dev_err(smmu->dev, "TBU PWR status 0x%x\n", tbu_pwr_status);
+
+		while (tbus_t) {
+			struct qsmmuv500_tbu_device *tbu;
+
+			tbu_id = __ffs(tbus_t);
+			tbus_t = tbus_t & ~(1 << tbu_id);
+			tbu = qsmmuv500_find_tbu(smmu,
+						 (u16)(tbu_id << TBUID_SHIFT));
+			if (tbu) {
+				dev_err(smmu->dev,
+					"TBU %s ack pending for TBU %s, %s\n",
+					tbu_sync_pending?"sync" : "inv",
+					dev_name(tbu->dev),
+					tbu_sync_pending ?
+					"check pending transactions on TBU"
+					: "check for TBU power status");
+				arm_smmu_testbus_dump(smmu,
+						(u16)(tbu_id << TBUID_SHIFT));
+			}
+		}
+
+		/*dump TCU testbus*/
+		arm_smmu_testbus_dump(smmu, U16_MAX);
+	}
+
+	BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
+}
+
 /* Wait for any pending TLB invalidations to complete */
 static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
-				void __iomem *sync, void __iomem *status)
+			void __iomem *sync, void __iomem *status)
 {
 	unsigned int spin_cnt, delay;
-	u32 sync_inv_ack, tbu_pwr_status, sync_inv_progress;
 
 	writel_relaxed(QCOM_DUMMY_VAL, sync);
 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
@@ -1120,17 +1290,8 @@ static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
 		}
 		udelay(delay);
 	}
-	sync_inv_ack = scm_io_read((unsigned long)(smmu->phys_addr +
-				     ARM_SMMU_STATS_SYNC_INV_TBU_ACK));
-	tbu_pwr_status = scm_io_read((unsigned long)(smmu->phys_addr +
-				     ARM_SMMU_TBU_PWR_STATUS));
-	sync_inv_progress = scm_io_read((unsigned long)(smmu->phys_addr +
-					ARM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR));
 	trace_tlbsync_timeout(smmu->dev, 0);
-	dev_err_ratelimited(smmu->dev,
-			    "TLB sync timed out -- SMMU may be deadlocked ack 0x%x pwr 0x%x sync and invalidation progress 0x%x\n",
-			    sync_inv_ack, tbu_pwr_status, sync_inv_progress);
-	BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
+	__arm_smmu_tlb_sync_timeout(smmu);
 	return -EINVAL;
 }
 
@@ -1141,50 +1302,29 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
 
 	spin_lock_irqsave(&smmu->global_sync_lock, flags);
 	if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
-					base + ARM_SMMU_GR0_sTLBGSTATUS))
+				base + ARM_SMMU_GR0_sTLBGSTATUS)) {
 		dev_err_ratelimited(smmu->dev,
 				    "TLB global sync failed!\n");
+	}
 	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
 }
 
-static void arm_smmu_tlb_inv_context_s1(void *cookie);
-
 static void arm_smmu_tlb_sync_context(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	struct device *dev = smmu_domain->dev;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
 	unsigned long flags;
-	size_t ret;
-	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
-	ktime_t cur = ktime_get();
-
-	ret = arm_smmu_domain_power_on(&smmu_domain->domain,
-				       smmu_domain->smmu);
-	if (ret)
-		return;
-
-	trace_tlbi_start(dev, 0);
-
-	if (!use_tlbiall)
-		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
-	else
-		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
 
 	spin_lock_irqsave(&smmu_domain->sync_lock, flags);
 	if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
-				base + ARM_SMMU_CB_TLBSTATUS))
+				base + ARM_SMMU_CB_TLBSTATUS)) {
 		dev_err_ratelimited(smmu->dev,
-				    "TLB sync on cb%d failed for device %s\n",
-				    smmu_domain->cfg.cbndx,
-				    dev_name(smmu_domain->dev));
+				"TLB sync on cb%d failed for device %s\n",
+				smmu_domain->cfg.cbndx,
+				dev_name(smmu_domain->dev));
+	}
 	spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
-
-	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
-
-	arm_smmu_domain_power_off(&smmu_domain->domain, smmu_domain->smmu);
 }
 
 static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1196,7 +1336,23 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
 
 static void arm_smmu_tlb_inv_context_s1(void *cookie)
 {
-	return;
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct device *dev = smmu_domain->dev;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
+	ktime_t cur = ktime_get();
+
+	trace_tlbi_start(dev, 0);
+
+	if (!use_tlbiall)
+		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+	else
+		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
+
+	arm_smmu_tlb_sync_context(cookie);
+	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
 }
 
 static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -1490,7 +1646,6 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
 
 	phys = arm_smmu_iova_to_phys_hard(domain, iova);
 	smmu_domain->pgtbl_cfg.tlb->tlb_flush_all(smmu_domain);
-	smmu_domain->pgtbl_cfg.tlb->tlb_sync(smmu_domain);
 	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
 
 	if (phys != phys_post_tlbiall) {
@@ -2547,7 +2702,6 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
 
 	/* Ensure there are no stale mappings for this context bank */
 	tlb->tlb_flush_all(smmu_domain);
-	tlb->tlb_sync(smmu_domain);
 }
 
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
@@ -3034,12 +3188,17 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 	if (arm_smmu_is_slave_side_secure(smmu_domain))
 		return msm_secure_smmu_unmap(domain, iova, size);
 
+	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
+	if (ret)
+		return ret;
+
 	arm_smmu_secure_domain_lock(smmu_domain);
 
 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
 	ret = ops->unmap(ops, iova, size);
 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
 
+	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
 	/*
 	 * While splitting up block mappings, we might allocate page table
 	 * memory during unmap, so the vmids needs to be assigned to the
@@ -3198,14 +3357,6 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
 	return ret;
 }
 
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	if (smmu_domain->tlb_ops)
-		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
-}
-
 /*
  * This function can sleep, and cannot be called from atomic context. Will
  * power on register block if required. This restriction does not apply to the
@@ -3972,8 +4123,6 @@ static struct iommu_ops arm_smmu_ops = {
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
 	.map_sg			= arm_smmu_map_sg,
-	.flush_iotlb_all	= arm_smmu_iotlb_sync,
-	.iotlb_sync		= arm_smmu_iotlb_sync,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
 	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
@@ -5070,37 +5219,6 @@ module_exit(arm_smmu_exit);
 
 #define TBU_DBG_TIMEOUT_US		100
 
-struct actlr_setting {
-	struct arm_smmu_smr smr;
-	u32 actlr;
-};
-
-struct qsmmuv500_archdata {
-	struct list_head		tbus;
-	void __iomem			*tcu_base;
-	u32				version;
-	struct actlr_setting		*actlrs;
-	u32				actlr_tbl_size;
-};
-#define get_qsmmuv500_archdata(smmu)				\
-	((struct qsmmuv500_archdata *)(smmu->archdata))
-
-struct qsmmuv500_tbu_device {
-	struct list_head		list;
-	struct device			*dev;
-	struct arm_smmu_device		*smmu;
-	void __iomem			*base;
-	void __iomem			*status_reg;
-
-	struct arm_smmu_power_resources *pwr;
-	u32				sid_start;
-	u32				num_sids;
-
-	/* Protects halt count */
-	spinlock_t			halt_lock;
-	u32				halt_count;
-};
-
 struct qsmmuv500_group_iommudata {
 	bool has_actlr;
 	u32 actlr;
@@ -5131,8 +5249,8 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
 				struct arm_smmu_domain *smmu_domain)
 {
 	unsigned long flags;
-	u32 halt, fsr, sctlr_orig, sctlr, status;
-	void __iomem *base, *cb_base;
+	u32 halt, fsr, status;
+	void __iomem *tbu_base, *cb_base;
 
 	if (of_property_read_bool(tbu->dev->of_node,
 						"qcom,opt-out-tbu-halting")) {
@@ -5148,47 +5266,49 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
 	}
 
 	cb_base = ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
-	base = tbu->base;
-	halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
+	tbu_base = tbu->base;
+	halt = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
 	halt |= DEBUG_SID_HALT_VAL;
-	writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
-
-	if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
-					(status & DEBUG_SR_HALT_ACK_VAL),
-					0, TBU_DBG_TIMEOUT_US))
-		goto out;
+	writel_relaxed(halt, tbu_base + DEBUG_SID_HALT_REG);
 
 	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
-	if (!(fsr & FSR_FAULT)) {
-		dev_err(tbu->dev, "Couldn't halt TBU!\n");
-		spin_unlock_irqrestore(&tbu->halt_lock, flags);
-		return -ETIMEDOUT;
+	if ((fsr & FSR_FAULT) && (fsr & FSR_SS)) {
+		u32 sctlr_orig, sctlr;
+		/*
+		 * We are in a fault; Our request to halt the bus will not
+		 * complete until transactions in front of us (such as the fault
+		 * itself) have completed. Disable iommu faults and terminate
+		 * any existing transactions.
+		 */
+		sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+		sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+		writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+		/*
+		 * Barrier required to ensure that the FSR is cleared
+		 * before resuming SMMU operation
+		 */
+		wmb();
+		writel_relaxed(RESUME_TERMINATE, cb_base +
+			       ARM_SMMU_CB_RESUME);
+
+		writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
 	}
 
-	/*
-	 * We are in a fault; Our request to halt the bus will not complete
-	 * until transactions in front of us (such as the fault itself) have
-	 * completed. Disable iommu faults and terminate any existing
-	 * transactions.
-	 */
-	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
-	sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
-	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
-
-	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
-	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
-
-	if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+	if (readl_poll_timeout_atomic(tbu_base + DEBUG_SR_HALT_ACK_REG, status,
 					(status & DEBUG_SR_HALT_ACK_VAL),
 					0, TBU_DBG_TIMEOUT_US)) {
-		dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
-		writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+		dev_err(tbu->dev, "Couldn't halt TBU!\n");
+
+		halt = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
+		halt &= ~DEBUG_SID_HALT_VAL;
+		writel_relaxed(halt, tbu_base + DEBUG_SID_HALT_REG);
+
 		spin_unlock_irqrestore(&tbu->halt_lock, flags);
 		return -ETIMEDOUT;
 	}
 
-	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
-out:
 	tbu->halt_count = 1;
 	spin_unlock_irqrestore(&tbu->halt_lock, flags);
 	return 0;
@@ -5221,19 +5341,6 @@ static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
 	spin_unlock_irqrestore(&tbu->halt_lock, flags);
 }
 
-static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
-	struct arm_smmu_device *smmu, u32 sid)
-{
-	struct qsmmuv500_tbu_device *tbu = NULL;
-	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
-
-	list_for_each_entry(tbu, &data->tbus, list) {
-		if (tbu->sid_start <= sid &&
-		    sid < tbu->sid_start + tbu->num_sids)
-			return tbu;
-	}
-	return NULL;
-}
 
 static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
 				struct qsmmuv500_tbu_device *tbu,
@@ -5319,6 +5426,25 @@ static phys_addr_t qsmmuv500_iova_to_phys(
 	sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
 	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
 
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+	if (fsr & FSR_FAULT) {
+		/* Clear pending interrupts */
+		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+		/*
+		 * Barrier required to ensure that the FSR is cleared
+		 * before resuming SMMU operation.
+		 */
+		wmb();
+
+		/*
+		 * TBU halt takes care of resuming any stalled transcation.
+		 * Kept it here for completeness sake.
+		 */
+		if (fsr & FSR_SS)
+			writel_relaxed(RESUME_TERMINATE, cb_base +
+				       ARM_SMMU_CB_RESUME);
+	}
+
 	/* Only one concurrent atos operation */
 	ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
 	if (ret)
@@ -5363,10 +5489,12 @@ static phys_addr_t qsmmuv500_iova_to_phys(
 
 	val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
 	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
-	if (fsr & FSR_FAULT) {
+	if (val & DEBUG_PAR_FAULT_VAL) {
 		dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx, SID=0x%x\n",
-				fsr, sid);
+			fsr, sid);
 
+		dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
+			val);
 		/* Clear pending interrupts */
 		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
 		/*
@@ -5374,12 +5502,11 @@ static phys_addr_t qsmmuv500_iova_to_phys(
 		 * before resuming SMMU operation.
 		 */
 		wmb();
-		writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
 
-		/* Check if ECATS translation failed */
-		if (val & DEBUG_PAR_FAULT_VAL)
-			dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
-					val);
+		if (fsr & FSR_SS)
+			writel_relaxed(RESUME_TERMINATE, cb_base +
+				       ARM_SMMU_CB_RESUME);
+
 		ret = -EINVAL;
 	}
 
@@ -5571,6 +5698,584 @@ static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
 	return 0;
 }
 
+static int qsmmuv500_get_testbus_version(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+	u32 testbus_version;
+	const __be32 *cell;
+
+	cell = of_get_property(dev->of_node, "qcom,testbus-version", NULL);
+	if (!cell)
+		return 0;
+
+	testbus_version = of_read_number(cell, 1);
+
+	data->testbus_version = testbus_version;
+	return 0;
+}
+static ssize_t arm_smmu_debug_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset,
+		enum testbus_sel tbu, enum testbus_ops ops)
+
+{
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+	int buf_len = sizeof(buf);
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, buf_len);
+
+	if (tbu == SEL_TBU) {
+		struct qsmmuv500_tbu_device *tbu = file->private_data;
+		struct arm_smmu_device *smmu = tbu->smmu;
+		void __iomem *tbu_base = tbu->base;
+		struct qsmmuv500_archdata *data = smmu->archdata;
+		void __iomem *tcu_base = data->tcu_base;
+		u32 testbus_version = data->testbus_version;
+		struct arm_smmu_power_resources *pwr;
+		long val;
+
+		if (testbus_version == 1)
+			pwr = smmu->pwr;
+		else
+			pwr = tbu->pwr;
+
+		arm_smmu_power_on(pwr);
+
+		if (ops == TESTBUS_SELECT)
+			val = arm_smmu_debug_tbu_testbus_select(tbu_base,
+					tcu_base, testbus_version, READ, 0);
+		else
+			val = arm_smmu_debug_tbu_testbus_output(tbu_base,
+							testbus_version);
+		arm_smmu_power_off(pwr);
+
+		snprintf(buf, buf_len, "0x%0x\n", val);
+	} else {
+
+		struct arm_smmu_device *smmu = file->private_data;
+		struct qsmmuv500_archdata *data = smmu->archdata;
+		void __iomem *base = ARM_SMMU_GR0(smmu);
+		void __iomem *tcu_base = data->tcu_base;
+
+		arm_smmu_power_on(smmu->pwr);
+
+		if (ops == TESTBUS_SELECT) {
+			snprintf(buf, buf_len, "TCU clk testbus sel: 0x%0x\n",
+				arm_smmu_debug_tcu_testbus_select(base,
+					tcu_base, CLK_TESTBUS, READ, 0));
+			snprintf(buf + strlen(buf), buf_len - strlen(buf),
+				 "TCU testbus sel : 0x%0x\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+					 tcu_base, PTW_AND_CACHE_TESTBUS,
+					 READ, 0));
+		} else {
+			snprintf(buf, buf_len, "0x%0x\n",
+				 arm_smmu_debug_tcu_testbus_output(base));
+		}
+
+		arm_smmu_power_off(smmu->pwr);
+	}
+	buflen = min(count, strlen(buf));
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err_ratelimited("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;
+		retval = buflen;
+	}
+
+	return retval;
+}
+static ssize_t arm_smmu_debug_tcu_testbus_sel_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct arm_smmu_device *smmu = file->private_data;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+	void __iomem *tcu_base = data->tcu_base;
+	void __iomem *base = ARM_SMMU_GR0(smmu);
+	char *comma;
+	char buf[100];
+	u64 sel, val;
+
+	if (count >= 100) {
+		pr_err_ratelimited("Value too large\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err_ratelimited("Couldn't copy from user\n");
+		return -EFAULT;
+	}
+
+	comma = strnchr(buf, count, ',');
+	if (!comma)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma = '\0';
+
+	if (kstrtou64(buf, 0, &sel))
+		goto invalid_format;
+
+	if (kstrtou64(comma + 1, 0, &val))
+		goto invalid_format;
+
+	arm_smmu_power_on(smmu->pwr);
+
+	if (sel == 1)
+		arm_smmu_debug_tcu_testbus_select(base,
+				tcu_base, CLK_TESTBUS, WRITE, val);
+	else if (sel == 2)
+		arm_smmu_debug_tcu_testbus_select(base,
+				tcu_base, PTW_AND_CACHE_TESTBUS, WRITE, val);
+	else
+		goto invalid_format;
+
+	arm_smmu_power_off(smmu->pwr);
+
+	return count;
+
+invalid_format:
+	pr_err_ratelimited("Invalid format. Expected: <1, testbus select> for tcu CLK testbus (or) <2, testbus select> for tcu PTW/CACHE testbuses\n");
+	return -EINVAL;
+}
+
+static ssize_t arm_smmu_debug_tcu_testbus_sel_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TCU, TESTBUS_SELECT);
+}
+
+static const struct file_operations arm_smmu_debug_tcu_testbus_sel_fops = {
+	.open	= simple_open,
+	.write	= arm_smmu_debug_tcu_testbus_sel_write,
+	.read	= arm_smmu_debug_tcu_testbus_sel_read,
+};
+
+static ssize_t arm_smmu_debug_tcu_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TCU, TESTBUS_OUTPUT);
+}
+
+static const struct file_operations arm_smmu_debug_tcu_testbus_fops = {
+	.open	= simple_open,
+	.read	= arm_smmu_debug_tcu_testbus_read,
+};
+
+static int qsmmuv500_tcu_testbus_init(struct arm_smmu_device *smmu)
+{
+	struct dentry *testbus_dir;
+
+	if (!iommu_debugfs_top)
+		return 0;
+
+	if (!debugfs_testbus_dir) {
+		debugfs_testbus_dir = debugfs_create_dir("testbus",
+						       iommu_debugfs_top);
+		if (!debugfs_testbus_dir) {
+			pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
+			return -ENODEV;
+		}
+	}
+
+	testbus_dir = debugfs_create_dir(dev_name(smmu->dev),
+				debugfs_testbus_dir);
+
+	if (!testbus_dir) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
+		       dev_name(smmu->dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("tcu_testbus_sel", 0400, testbus_dir, smmu,
+			&arm_smmu_debug_tcu_testbus_sel_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_sel debugfs file\n",
+		       dev_name(smmu->dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("tcu_testbus_output", 0400, testbus_dir, smmu,
+			&arm_smmu_debug_tcu_testbus_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_output debugfs file\n",
+		       dev_name(smmu->dev));
+		goto err_rmdir;
+	}
+
+	return 0;
+err_rmdir:
+	debugfs_remove_recursive(testbus_dir);
+err:
+	return 0;
+}
+
+static ssize_t arm_smmu_debug_tbu_testbus_sel_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct qsmmuv500_tbu_device *tbu = file->private_data;
+	void __iomem *tbu_base = tbu->base;
+	struct arm_smmu_device *smmu = tbu->smmu;
+	struct arm_smmu_power_resources *pwr;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+	void __iomem *tcu_base = data->tcu_base;
+	u32 testbus_version = data->testbus_version;
+	u64 val;
+
+	if (kstrtoull_from_user(ubuf, count, 0, &val)) {
+		pr_err_ratelimited("Invalid format for tbu testbus select\n");
+		return -EINVAL;
+	}
+
+	if (testbus_version == 1)
+		pwr = smmu->pwr;
+	else
+		pwr = tbu->pwr;
+
+	arm_smmu_power_on(pwr);
+	arm_smmu_debug_tbu_testbus_select(tbu_base, tcu_base,
+			testbus_version, WRITE, val);
+	arm_smmu_power_off(pwr);
+
+	return count;
+}
+
+static ssize_t arm_smmu_debug_tbu_testbus_sel_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TBU, TESTBUS_SELECT);
+}
+
+static const struct file_operations arm_smmu_debug_tbu_testbus_sel_fops = {
+	.open	= simple_open,
+	.write	= arm_smmu_debug_tbu_testbus_sel_write,
+	.read	= arm_smmu_debug_tbu_testbus_sel_read,
+};
+
+static ssize_t arm_smmu_debug_tbu_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TBU, TESTBUS_OUTPUT);
+}
+
+static const struct file_operations arm_smmu_debug_tbu_testbus_fops = {
+	.open	= simple_open,
+	.read	= arm_smmu_debug_tbu_testbus_read,
+};
+
+static int qsmmuv500_tbu_testbus_init(struct qsmmuv500_tbu_device *tbu)
+{
+	struct dentry *testbus_dir;
+
+	if (!iommu_debugfs_top)
+		return 0;
+
+	if (!debugfs_testbus_dir) {
+		debugfs_testbus_dir = debugfs_create_dir("testbus",
+						       iommu_debugfs_top);
+		if (!debugfs_testbus_dir) {
+			pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
+			return -ENODEV;
+		}
+	}
+
+	testbus_dir = debugfs_create_dir(dev_name(tbu->dev),
+				debugfs_testbus_dir);
+
+	if (!testbus_dir) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
+				dev_name(tbu->dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("tbu_testbus_sel", 0400, testbus_dir, tbu,
+			&arm_smmu_debug_tbu_testbus_sel_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_sel debugfs file\n",
+				dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("tbu_testbus_output", 0400, testbus_dir, tbu,
+			&arm_smmu_debug_tbu_testbus_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_output debugfs file\n",
+				dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+
+	return 0;
+err_rmdir:
+	debugfs_remove_recursive(testbus_dir);
+err:
+	return 0;
+}
+
+static ssize_t arm_smmu_debug_capturebus_snapshot_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct qsmmuv500_tbu_device *tbu = file->private_data;
+	struct arm_smmu_device *smmu = tbu->smmu;
+	void __iomem *tbu_base = tbu->base;
+	u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT];
+	char buf[400];
+	ssize_t retval;
+	size_t buflen;
+	int buf_len = sizeof(buf);
+	int i, j;
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, buf_len);
+
+	arm_smmu_power_on(smmu->pwr);
+	arm_smmu_power_on(tbu->pwr);
+
+	arm_smmu_debug_get_capture_snapshot(tbu_base, snapshot);
+
+	arm_smmu_power_off(tbu->pwr);
+	arm_smmu_power_off(smmu->pwr);
+
+	for (i = 0; i < NO_OF_CAPTURE_POINTS ; ++i) {
+		for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j) {
+			snprintf(buf + strlen(buf), buf_len - strlen(buf),
+				 "Capture_%d_Snapshot_%d : 0x%0llx\n",
+				  i+1, j+1, snapshot[i][j]);
+		}
+	}
+
+	buflen = min(count, strlen(buf));
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err_ratelimited("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;
+		retval = buflen;
+	}
+
+	return retval;
+}
+static const struct file_operations arm_smmu_debug_capturebus_snapshot_fops = {
+	.open	= simple_open,
+	.read	= arm_smmu_debug_capturebus_snapshot_read,
+};
+
+static ssize_t arm_smmu_debug_capturebus_config_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct qsmmuv500_tbu_device *tbu = file->private_data;
+	struct arm_smmu_device *smmu = tbu->smmu;
+	void __iomem *tbu_base = tbu->base;
+	char *comma1, *comma2;
+	char buf[100];
+	u64 sel, mask, match, val;
+
+	if (count >= 100) {
+		pr_err_ratelimited("Input too large\n");
+		goto invalid_format;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err_ratelimited("Couldn't copy from user\n");
+		return -EFAULT;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	*comma1  = '\0';
+
+	if (kstrtou64(buf, 0, &sel))
+		goto invalid_format;
+
+	if (sel > 4) {
+		goto invalid_format;
+	} else if (sel == 4) {
+		if (kstrtou64(comma1 + 1, 0, &val))
+			goto invalid_format;
+		goto program_capturebus;
+	}
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma2 = '\0';
+
+	if (kstrtou64(comma1 + 1, 0, &mask))
+		goto invalid_format;
+
+	if (kstrtou64(comma2 + 1, 0, &match))
+		goto invalid_format;
+
+program_capturebus:
+	arm_smmu_power_on(smmu->pwr);
+	arm_smmu_power_on(tbu->pwr);
+
+	if (sel == 4)
+		arm_smmu_debug_set_tnx_tcr_cntl(tbu_base, val);
+	else
+		arm_smmu_debug_set_mask_and_match(tbu_base, sel, mask, match);
+
+	arm_smmu_power_off(tbu->pwr);
+	arm_smmu_power_off(smmu->pwr);
+	return count;
+
+invalid_format:
+	pr_err_ratelimited("Invalid format. Expected: <1/2/3,Mask,Match> (or) <4,TNX_TCR_CNTL>>\n");
+	return -EINVAL;
+}
+static ssize_t arm_smmu_debug_capturebus_config_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct qsmmuv500_tbu_device *tbu = file->private_data;
+	struct arm_smmu_device *smmu = tbu->smmu;
+	void __iomem *tbu_base = tbu->base;
+	unsigned long val;
+	u64 mask[NO_OF_MASK_AND_MATCH], match[NO_OF_MASK_AND_MATCH];
+	char buf[400];
+	ssize_t retval;
+	size_t buflen;
+	int buf_len = sizeof(buf);
+	int i;
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, buf_len);
+
+	arm_smmu_power_on(smmu->pwr);
+	arm_smmu_power_on(tbu->pwr);
+
+	arm_smmu_debug_get_mask_and_match(tbu_base,
+					mask, match);
+	val = arm_smmu_debug_get_tnx_tcr_cntl(tbu_base);
+
+	arm_smmu_power_off(tbu->pwr);
+	arm_smmu_power_off(smmu->pwr);
+
+	for (i = 0; i < NO_OF_MASK_AND_MATCH; ++i) {
+		snprintf(buf + strlen(buf), buf_len - strlen(buf),
+				"Mask_%d : 0x%0llx\t", i+1, mask[i]);
+		snprintf(buf + strlen(buf), buf_len - strlen(buf),
+				"Match_%d : 0x%0llx\n", i+1, match[i]);
+	}
+	snprintf(buf + strlen(buf), buf_len - strlen(buf), "0x%0x\n", val);
+
+	buflen = min(count, strlen(buf));
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err_ratelimited("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations arm_smmu_debug_capturebus_config_fops = {
+	.open	= simple_open,
+	.write	= arm_smmu_debug_capturebus_config_write,
+	.read	= arm_smmu_debug_capturebus_config_read,
+};
+
+static int qsmmuv500_capturebus_init(struct qsmmuv500_tbu_device *tbu)
+{
+	struct dentry *capturebus_dir;
+
+	if (!iommu_debugfs_top)
+		return 0;
+
+	if (!debugfs_capturebus_dir) {
+		debugfs_capturebus_dir = debugfs_create_dir(
+					 "capturebus", iommu_debugfs_top);
+		if (!debugfs_capturebus_dir) {
+			pr_err_ratelimited("Couldn't create iommu/capturebus debugfs directory\n");
+			return -ENODEV;
+		}
+	}
+	capturebus_dir = debugfs_create_dir(dev_name(tbu->dev),
+				debugfs_capturebus_dir);
+	if (!capturebus_dir) {
+		pr_err_ratelimited("Couldn't create iommu/capturebus/%s debugfs directory\n",
+				dev_name(tbu->dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("config", 0400, capturebus_dir, tbu,
+			&arm_smmu_debug_capturebus_config_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/capturebus/%s/config debugfs file\n",
+				dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("snapshot", 0400, capturebus_dir, tbu,
+			&arm_smmu_debug_capturebus_snapshot_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/capturebus/%s/snapshot debugfs file\n",
+				dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+	return 0;
+err_rmdir:
+	debugfs_remove_recursive(capturebus_dir);
+err:
+	return 0;
+}
+
+static irqreturn_t arm_smmu_debug_capture_bus_match(int irq, void *dev)
+{
+	struct qsmmuv500_tbu_device *tbu = dev;
+	struct arm_smmu_device *smmu = tbu->smmu;
+	void __iomem *tbu_base = tbu->base;
+	u64 mask[NO_OF_MASK_AND_MATCH], match[NO_OF_MASK_AND_MATCH];
+	u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT];
+	int i, j, val;
+
+	if (arm_smmu_power_on(smmu->pwr) || arm_smmu_power_on(tbu->pwr))
+		return IRQ_NONE;
+
+	val = arm_smmu_debug_get_tnx_tcr_cntl(tbu_base);
+	arm_smmu_debug_get_mask_and_match(tbu_base, mask, match);
+	arm_smmu_debug_get_capture_snapshot(tbu_base, snapshot);
+	arm_smmu_debug_clear_intr_and_validbits(tbu_base);
+
+	arm_smmu_power_off(tbu->pwr);
+	arm_smmu_power_off(smmu->pwr);
+
+	dev_info(tbu->dev, "TNX_TCR_CNTL : 0x%0llx\n", val);
+
+	for (i = 0; i < NO_OF_MASK_AND_MATCH; ++i) {
+		dev_info(tbu->dev,
+				"Mask_%d : 0x%0llx\n", i+1, mask[i]);
+		dev_info(tbu->dev,
+				"Match_%d : 0x%0llx\n", i+1, match[i]);
+	}
+
+	for (i = 0; i < NO_OF_CAPTURE_POINTS ; ++i) {
+		for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j) {
+			dev_info(tbu->dev,
+					"Capture_%d_Snapshot_%d : 0x%0llx\n",
+					i+1, j+1, snapshot[i][j]);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
 	struct resource *res;
@@ -5600,6 +6305,12 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
 	smmu->archdata = data;
 
+	ret = qsmmuv500_get_testbus_version(smmu);
+	if (ret)
+		return ret;
+
+	qsmmuv500_tcu_testbus_init(smmu);
+
 	if (arm_smmu_is_static_cb(smmu))
 		return 0;
 
@@ -5649,7 +6360,7 @@ static int qsmmuv500_tbu_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct qsmmuv500_tbu_device *tbu;
 	const __be32 *cell;
-	int len;
+	int len, i, err, num_irqs = 0;
 
 	tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
 	if (!tbu)
@@ -5676,11 +6387,43 @@ static int qsmmuv500_tbu_probe(struct platform_device *pdev)
 	tbu->sid_start = of_read_number(cell, 1);
 	tbu->num_sids = of_read_number(cell + 1, 1);
 
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs)))
+		num_irqs++;
+
+	tbu->irqs = devm_kzalloc(dev, sizeof(*tbu->irqs) * num_irqs,
+				  GFP_KERNEL);
+	if (!tbu->irqs)
+		return -ENOMEM;
+
+	for (i = 0; i < num_irqs; ++i) {
+		int irq = platform_get_irq(pdev, i);
+
+		if (irq < 0) {
+			dev_err(dev, "failed to get irq index %d\n", i);
+			return -ENODEV;
+		}
+		tbu->irqs[i] = irq;
+
+		err = devm_request_threaded_irq(tbu->dev, tbu->irqs[i],
+					NULL, arm_smmu_debug_capture_bus_match,
+					IRQF_ONESHOT | IRQF_SHARED,
+					"capture bus", tbu);
+		if (err) {
+			dev_err(dev, "failed to request capture bus irq%d (%u)\n",
+				i, tbu->irqs[i]);
+			return err;
+		}
+	}
+
 	tbu->pwr = arm_smmu_init_power_resources(pdev);
 	if (IS_ERR(tbu->pwr))
 		return PTR_ERR(tbu->pwr);
 
 	dev_set_drvdata(dev, tbu);
+
+	qsmmuv500_tbu_testbus_init(tbu);
+	qsmmuv500_capturebus_init(tbu);
+
 	return 0;
 }
 
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8da68f3..7b65083 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -698,7 +698,7 @@ int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 		 * - and wouldn't make the resulting output segment too long
 		 */
 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
-		    (cur_len + s_length <= max_len)) {
+		    (max_len - cur_len >= s_length)) {
 			/* ...then concatenate it with the previous one */
 			cur_len += s_length;
 		} else {
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ec88a51..2fcc75f 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -11,6 +11,7 @@
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
 #include <linux/slab.h>
+#include <linux/genalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/pci.h>
 #include <linux/dma-iommu.h>
@@ -23,6 +24,18 @@
 #define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
 #define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
 
+#define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
+static struct gen_pool *atomic_pool __ro_after_init;
+
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
+
+static int __init early_coherent_pool(char *p)
+{
+	atomic_pool_size = memparse(p, &p);
+	return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 				 bool coherent)
 {
@@ -33,6 +46,48 @@ static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 	return prot;
 }
 
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
+{
+	unsigned long val;
+	void *ptr = NULL;
+
+	if (!atomic_pool) {
+		WARN(1, "coherent pool not initialised!\n");
+		return NULL;
+	}
+
+	val = gen_pool_alloc(atomic_pool, size);
+	if (val) {
+		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+		*ret_page = phys_to_page(phys);
+		ptr = (void *)val;
+		memset(ptr, 0, size);
+	}
+
+	return ptr;
+}
+
+static phys_addr_t __atomic_get_phys(void *addr)
+{
+	return gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+}
+
+static bool __in_atomic_pool(void *start, size_t size)
+{
+	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+}
+
+static int __free_from_pool(void *start, size_t size)
+{
+	if (!__in_atomic_pool(start, size))
+		return 0;
+
+	gen_pool_free(atomic_pool, (unsigned long)start, size);
+
+	return 1;
+}
+
 static bool is_dma_coherent(struct device *dev, unsigned long attrs)
 {
 	bool is_coherent;
@@ -126,7 +181,6 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
 		nbits, align);
 	if (unlikely(bit > mapping->num_4k_pages)) {
 		/* try wrapping */
-		mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
 		bit = bitmap_find_next_zero_area(
 			mapping->bitmap, mapping->num_4k_pages, 0, nbits,
 			align);
@@ -153,7 +207,6 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
 		bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
 
 		iommu_tlbiall(mapping->domain);
-		iommu_tlb_sync(mapping->domain);
 		mapping->have_stale_tlbs = false;
 		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
 	}
@@ -248,6 +301,69 @@ static bool __bit_is_sooner(unsigned long candidate,
 	return true;
 }
 
+
+static int __init atomic_pool_init(void)
+{
+	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
+	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+	struct page *page;
+	void *addr;
+	unsigned int pool_size_order = get_order(atomic_pool_size);
+
+	if (dev_get_cma_area(NULL))
+		page = dma_alloc_from_contiguous(NULL, nr_pages,
+						 pool_size_order, false);
+	else
+		page = alloc_pages(GFP_DMA32, pool_size_order);
+
+	if (page) {
+		int ret;
+		void *page_addr = page_address(page);
+
+		memset(page_addr, 0, atomic_pool_size);
+		__dma_flush_area(page_addr, atomic_pool_size);
+
+		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+		if (!atomic_pool)
+			goto free_page;
+
+		addr = dma_common_contiguous_remap(page, atomic_pool_size,
+					VM_USERMAP, prot, atomic_pool_init);
+
+		if (!addr)
+			goto destroy_genpool;
+
+		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
+					page_to_phys(page),
+					atomic_pool_size, -1);
+		if (ret)
+			goto remove_mapping;
+
+		gen_pool_set_algo(atomic_pool,
+				  gen_pool_first_fit_order_align,
+				  NULL);
+
+		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
+			atomic_pool_size / 1024);
+		return 0;
+	}
+	goto out;
+
+remove_mapping:
+	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false);
+destroy_genpool:
+	gen_pool_destroy(atomic_pool);
+	atomic_pool = NULL;
+free_page:
+	if (!dma_release_from_contiguous(NULL, page, nr_pages))
+		__free_pages(page, pool_size_order);
+out:
+	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
+		atomic_pool_size / 1024);
+	return -ENOMEM;
+}
+arch_initcall(atomic_pool_init);
+
 static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
 				  dma_addr_t iova, size_t size)
 {
@@ -499,11 +615,56 @@ static void __fast_smmu_free_pages(struct page **pages, int count)
 {
 	int i;
 
+	if (!pages)
+		return;
 	for (i = 0; i < count; i++)
 		__free_page(pages[i]);
 	kvfree(pages);
 }
 
+static void *fast_smmu_alloc_atomic(struct dma_fast_smmu_mapping *mapping,
+				    size_t size, gfp_t gfp, unsigned long attrs,
+				    dma_addr_t *handle, bool coherent)
+{
+	void *addr;
+	unsigned long flags;
+	struct page *page;
+	dma_addr_t dma_addr;
+	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+
+	if (coherent) {
+		page = alloc_pages(gfp, get_order(size));
+		addr = page ? page_address(page) : NULL;
+	} else
+		addr = __alloc_from_pool(size, &page, gfp);
+	if (!addr)
+		return NULL;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size);
+	if (dma_addr == DMA_ERROR_CODE) {
+		dev_err(mapping->dev, "no iova\n");
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		goto out_free_page;
+	}
+	if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, dma_addr,
+					  page_to_phys(page), size, prot))) {
+		dev_err(mapping->dev, "no map public\n");
+		goto out_free_iova;
+	}
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	*handle = dma_addr;
+	return addr;
+
+out_free_iova:
+	__fast_smmu_free_iova(mapping, dma_addr, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+out_free_page:
+	coherent ? __free_pages(page, get_order(size)) :
+		   __free_from_pool(addr, size);
+	return NULL;
+}
+
 static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
 {
 	struct page **pages;
@@ -531,6 +692,54 @@ static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
 	return pages;
 }
 
+static void *__fast_smmu_alloc_contiguous(struct device *dev, size_t size,
+			dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
+	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
+	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
+	struct page *page;
+	dma_addr_t iova;
+	unsigned long flags;
+	void *coherent_addr;
+
+	page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+					get_order(size), gfp & __GFP_NOWARN);
+	if (!page)
+		return NULL;
+
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	iova = __fast_smmu_alloc_iova(mapping, attrs, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	if (iova == DMA_ERROR_CODE)
+		goto release_page;
+
+	if (av8l_fast_map_public(mapping->pgtbl_ops, iova, page_to_phys(page),
+				 size, prot))
+		goto release_iova;
+
+	coherent_addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+				remap_prot, __fast_smmu_alloc_contiguous);
+	if (!coherent_addr)
+		goto release_mapping;
+
+	if (!is_coherent)
+		__dma_flush_area(page_to_virt(page), size);
+
+	*handle = iova;
+	return coherent_addr;
+
+release_mapping:
+	av8l_fast_unmap_public(mapping->pgtbl_ops, iova, size);
+release_iova:
+	__fast_smmu_free_iova(mapping, iova, size);
+release_page:
+	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	return NULL;
+}
+
 static void *fast_smmu_alloc(struct device *dev, size_t size,
 			     dma_addr_t *handle, gfp_t gfp,
 			     unsigned long attrs)
@@ -558,6 +767,14 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
 	}
 
 	*handle = DMA_ERROR_CODE;
+	size = ALIGN(size, SZ_4K);
+
+	if (!gfpflags_allow_blocking(gfp))
+		return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle,
+					      is_coherent);
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		return __fast_smmu_alloc_contiguous(dev, size, handle, gfp,
+						    attrs);
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
 	if (!pages) {
@@ -565,7 +782,6 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
 		return NULL;
 	}
 
-	size = ALIGN(size, SZ_4K);
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
 		dev_err(dev, "no sg tablen\n");
 		goto out_free_pages;
@@ -633,28 +849,55 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
 }
 
 static void fast_smmu_free(struct device *dev, size_t size,
-			   void *vaddr, dma_addr_t dma_handle,
+			   void *cpu_addr, dma_addr_t dma_handle,
 			   unsigned long attrs)
 {
 	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	struct vm_struct *area;
-	struct page **pages;
-	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
 	unsigned long flags;
 
-	size = ALIGN(size, SZ_4K);
+	size = ALIGN(size, FAST_PAGE_SIZE);
 
-	area = find_vm_area(vaddr);
-	if (WARN_ON_ONCE(!area))
-		return;
-
-	pages = area->pages;
-	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
 	__fast_smmu_free_iova(mapping, dma_handle, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
-	__fast_smmu_free_pages(pages, count);
+
+	area = find_vm_area(cpu_addr);
+	if (area && area->pages) {
+		struct page **pages = area->pages;
+
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
+		__fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT);
+	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+		struct page *page = vmalloc_to_page(cpu_addr);
+
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
+		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	} else if (!is_vmalloc_addr(cpu_addr)) {
+		__free_pages(virt_to_page(cpu_addr), get_order(size));
+	} else if (__in_atomic_pool(cpu_addr, size)) {
+		// Keep remap
+		__free_from_pool(cpu_addr, size);
+	}
+}
+
+/* __swiotlb_mmap_pfn is not currently exported. */
+static int fast_smmu_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn,
+			     size_t size)
+{
+	int ret = -ENXIO;
+	unsigned long nr_vma_pages = vma_pages(vma);
+	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long off = vma->vm_pgoff;
+
+	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start, pfn + off,
+				      vma->vm_end - vma->vm_start,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
 }
 
 static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
@@ -662,27 +905,26 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 				size_t size, unsigned long attrs)
 {
 	struct vm_struct *area;
-	unsigned long uaddr = vma->vm_start;
-	struct page **pages;
-	int i, nr_pages, ret = 0;
 	bool coherent = is_dma_coherent(dev, attrs);
+	unsigned long pfn = 0;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 					     coherent);
 	area = find_vm_area(cpu_addr);
-	if (!area)
-		return -EINVAL;
+	if (area && area->pages)
+		return iommu_dma_mmap(area->pages, size, vma);
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		pfn = vmalloc_to_pfn(cpu_addr);
+	else if (!is_vmalloc_addr(cpu_addr))
+		pfn = page_to_pfn(virt_to_page(cpu_addr));
+	else if (__in_atomic_pool(cpu_addr, size))
+		pfn = __atomic_get_phys(cpu_addr) >> PAGE_SHIFT;
 
-	pages = area->pages;
-	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
-		ret = vm_insert_page(vma, uaddr, pages[i]);
-		if (ret)
-			break;
-		uaddr += PAGE_SIZE;
-	}
 
-	return ret;
+	if (pfn)
+		return fast_smmu_mmap_pfn(vma, pfn, size);
+
+	return -EINVAL;
 }
 
 static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -691,13 +933,27 @@ static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
 {
 	unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	struct vm_struct *area;
+	struct page *page = NULL;
+	int ret = -ENXIO;
 
 	area = find_vm_area(cpu_addr);
-	if (!area || !area->pages)
-		return -EINVAL;
+	if (area && area->pages)
+		return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0,
+						 size, GFP_KERNEL);
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		page = vmalloc_to_page(cpu_addr);
+	else if (!is_vmalloc_addr(cpu_addr))
+		page = virt_to_page(cpu_addr);
+	else if (__in_atomic_pool(cpu_addr, size))
+		page = phys_to_page(__atomic_get_phys(cpu_addr));
 
-	return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size,
-					GFP_KERNEL);
+	if (page) {
+		ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+		if (!ret)
+			sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	}
+
+	return ret;
 }
 
 static dma_addr_t fast_smmu_dma_map_resource(
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 688e037..541abb2 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -86,7 +86,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
 
 	iop = container_of(ops, struct io_pgtable, ops);
 	io_pgtable_tlb_flush_all(iop);
-	io_pgtable_tlb_sync(iop);
 	io_pgtable_init_table[iop->fmt]->free(iop);
 }
 
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 5481f0c..dee31ad 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1538,6 +1538,8 @@ static ssize_t iommu_debug_atos_write(struct file *file,
 {
 	struct iommu_debug_device *ddev = file->private_data;
 	dma_addr_t iova;
+	phys_addr_t phys;
+	unsigned long pfn;
 
 	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
 		pr_err_ratelimited("Invalid format for iova\n");
@@ -1546,6 +1548,13 @@ static ssize_t iommu_debug_atos_write(struct file *file,
 	}
 
 	ddev->iova = iova;
+	phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
+	pfn = __phys_to_pfn(phys);
+	if (!pfn_valid(pfn)) {
+		dev_err(ddev->dev, "Invalid ATOS operation page %pa\n", &phys);
+		return -EINVAL;
+	}
+
 	pr_err_ratelimited("Saved iova=%pa for future ATOS commands\n", &iova);
 	return count;
 }
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 14651aa..74f0dbf 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -148,8 +148,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
 	struct iova *cached_iova;
 
 	cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
-	if (free->pfn_hi < iovad->dma_32bit_pfn &&
-	    free->pfn_lo >= cached_iova->pfn_lo)
+	if (free == cached_iova ||
+	    (free->pfn_hi < iovad->dma_32bit_pfn &&
+	     free->pfn_lo >= cached_iova->pfn_lo))
 		iovad->cached32_node = rb_next(&free->node);
 
 	cached_iova = rb_entry(iovad->cached_node, struct iova, node);
@@ -597,7 +598,9 @@ void queue_iova(struct iova_domain *iovad,
 
 	spin_unlock_irqrestore(&fq->lock, flags);
 
-	if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+	/* Avoid false sharing as much as possible. */
+	if (!atomic_read(&iovad->fq_timer_on) &&
+	    !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
 		mod_timer(&iovad->fq_timer,
 			  jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
 }
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3295f30..3fb07b1 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -391,6 +391,17 @@
 	  Power Domain Controller driver to manage and configure wakeup
 	  IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
 
+config QCOM_MPM
+	bool "QCOM MPM"
+	depends on ARCH_QCOM
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+	help
+	  MSM Power Manager driver to manage and configure wakeup
+	  IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+	  Say yes here to enable the MSM Power Manager interrupt
+	  controller to use as a wakeup interrupt controller.
+
 endmenu
 
 config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b0d063f..73f2dfd 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -88,4 +88,5 @@
 obj-$(CONFIG_GOLDFISH_PIC) 		+= irq-goldfish-pic.o
 obj-$(CONFIG_NDS32)			+= irq-ativic32.o
 obj-$(CONFIG_QCOM_PDC)			+= qcom-pdc.o
+obj-$(CONFIG_QCOM_MPM)			+= qcom-mpm.o qcom-mpm-bengal.o
 obj-$(CONFIG_SIFIVE_PLIC)		+= irq-sifive-plic.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ee30e89..e7549a2b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2514,14 +2514,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 	struct its_node *its = its_dev->its;
 	int i;
 
+	bitmap_release_region(its_dev->event_map.lpi_map,
+			      its_get_event_id(irq_domain_get_irq_data(domain, virq)),
+			      get_count_order(nr_irqs));
+
 	for (i = 0; i < nr_irqs; i++) {
 		struct irq_data *data = irq_domain_get_irq_data(domain,
 								virq + i);
-		u32 event = its_get_event_id(data);
-
-		/* Mark interrupt index as unused */
-		clear_bit(event, its_dev->event_map.lpi_map);
-
 		/* Nuke the entry in the domain */
 		irq_domain_reset_irq_data(data);
 	}
@@ -2883,7 +2882,7 @@ static int its_vpe_init(struct its_vpe *vpe)
 
 	if (!its_alloc_vpe_table(vpe_id)) {
 		its_vpe_id_free(vpe_id);
-		its_free_pending_table(vpe->vpt_page);
+		its_free_pending_table(vpt_page);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4760307..cef8f5e 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
 	.irq_unmask		= imx_gpcv2_irq_unmask,
 	.irq_set_wake		= imx_gpcv2_irq_set_wake,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
diff --git a/drivers/irqchip/qcom-mpm-bengal.c b/drivers/irqchip/qcom-mpm-bengal.c
new file mode 100644
index 0000000..9250a7a
--- /dev/null
+++ b/drivers/irqchip/qcom-mpm-bengal.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/mpm.h>
+const struct mpm_pin mpm_bengal_gic_chip_data[] = {
+	{2, 222},
+	{12, 454}, /* b3_lfps_rxterm_irq */
+	{86, 215}, /* mpm_wake,spmi_m */
+	{90, 292}, /* eud_p0_dpse_int_mx */
+	{91, 292}, /* eud_p0_dmse_int_mx */
+	{-1},
+};
diff --git a/drivers/irqchip/qcom-mpm.c b/drivers/irqchip/qcom-mpm.c
new file mode 100644
index 0000000..c2ed53a
--- /dev/null
+++ b/drivers/irqchip/qcom-mpm.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/tick.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include<linux/ktime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/of_irq.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/cpu_pm.h>
+#include <asm/arch_timer.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/mpm.h>
+#define CREATE_TRACE_POINTS
+#include "trace/events/mpm.h"
+
+#define ARCH_TIMER_HZ (19200000)
+#define MAX_MPM_PIN_PER_IRQ 2
+#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
+#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
+#define CLEAR_TYPE(reg, type) (reg & ~(1 << type))
+#define ENABLE_TYPE(reg, type) (reg | (1 << type))
+#define MPM_REG_ENABLE 0
+#define MPM_REG_FALLING_EDGE 1
+#define MPM_REG_RISING_EDGE 2
+#define MPM_REG_POLARITY 3
+#define MPM_REG_STATUS 4
+#define MPM_GPIO 0
+#define MPM_GIC 1
+
+#define QCOM_MPM_REG_WIDTH  DIV_ROUND_UP(num_mpm_irqs, 32)
+#define MPM_REGISTER(reg, index) ((reg * QCOM_MPM_REG_WIDTH + index + 2) * (4))
+#define GPIO_NO_WAKE_IRQ	~0U
+
+struct msm_mpm_device_data {
+	struct device *dev;
+	void __iomem *mpm_request_reg_base;
+	void __iomem *mpm_ipc_reg;
+	irq_hw_number_t ipc_irq;
+	struct irq_domain *gic_chip_domain;
+	struct irq_domain *gpio_chip_domain;
+};
+
+static int msm_pm_sleep_time_override;
+static int num_mpm_irqs = 64;
+module_param_named(sleep_time_override,
+	msm_pm_sleep_time_override, int, 0664);
+static struct msm_mpm_device_data msm_mpm_dev_data;
+static unsigned int *mpm_to_irq;
+static DEFINE_SPINLOCK(mpm_lock);
+
+static void msm_get_mpm_pin(struct irq_data *d, int *mpm_pin, bool is_mpmgic)
+{
+	struct mpm_pin *mpm_data = NULL;
+	int i = 0, j = 0;
+
+	if (!d)
+		return;
+
+	if (is_mpmgic && d->domain->host_data) {
+		mpm_data = d->domain->host_data;
+		for (i = 0; (mpm_data[i].pin >= 0) &&
+				(j < MAX_MPM_PIN_PER_IRQ); i++) {
+			if (mpm_data[i].hwirq == d->hwirq) {
+				mpm_pin[j] = mpm_data[i].pin;
+				mpm_to_irq[mpm_data[i].pin] = d->irq;
+				j++;
+			}
+		}
+	} else if (!is_mpmgic) {
+		mpm_pin[j] = d->hwirq;
+		mpm_to_irq[d->hwirq] = d->irq;
+	}
+}
+
+static inline uint32_t msm_mpm_read(unsigned int reg, unsigned int subreg_index)
+{
+	unsigned int offset = MPM_REGISTER(reg, subreg_index);
+
+	return readl_relaxed(msm_mpm_dev_data.mpm_request_reg_base + offset);
+}
+
+static inline void msm_mpm_write(unsigned int reg,
+					unsigned int subreg_index,
+					uint32_t value)
+{
+	void __iomem *mpm_reg_base = msm_mpm_dev_data.mpm_request_reg_base;
+	/*
+	 * Add 2 to offset to account for the 64 bit timer in the vMPM
+	 * mapping
+	 */
+	unsigned int offset = MPM_REGISTER(reg, subreg_index);
+	u32 r_value;
+
+	writel_relaxed(value, mpm_reg_base + offset);
+
+	do {
+		r_value = readl_relaxed(mpm_reg_base + offset);
+		udelay(5);
+	} while (r_value != value);
+}
+
+static inline void msm_mpm_enable_irq(struct irq_data *d, bool on,
+							bool is_mpmgic)
+{
+	int mpm_pin[MAX_MPM_PIN_PER_IRQ] = {-1, -1};
+	unsigned long flags;
+	int i = 0;
+	u32 enable;
+	unsigned int index, mask;
+	unsigned int reg;
+
+	reg = MPM_REG_ENABLE;
+	msm_get_mpm_pin(d, mpm_pin, is_mpmgic);
+	for (i = 0; i < MAX_MPM_PIN_PER_IRQ; i++) {
+		if (mpm_pin[i] < 0)
+			return;
+
+		index = mpm_pin[i]/32;
+		mask = mpm_pin[i]%32;
+		spin_lock_irqsave(&mpm_lock, flags);
+		enable = msm_mpm_read(reg, index);
+
+		if (on)
+			enable = ENABLE_INTR(enable, mask);
+		else
+			enable = CLEAR_INTR(enable, mask);
+
+		msm_mpm_write(reg, index, enable);
+		spin_unlock_irqrestore(&mpm_lock, flags);
+	}
+}
+
+static void msm_mpm_program_set_type(bool set, unsigned int reg,
+					unsigned int index, unsigned int mask)
+{
+	u32 type;
+
+	type = msm_mpm_read(reg, index);
+	if (set)
+		type = ENABLE_TYPE(type, mask);
+	else
+		type = CLEAR_TYPE(type, mask);
+
+	msm_mpm_write(reg, index, type);
+}
+
+static void msm_mpm_set_type(struct irq_data *d,
+					unsigned int flowtype, bool is_mpmgic)
+{
+	int mpm_pin[MAX_MPM_PIN_PER_IRQ] = {-1, -1};
+	unsigned long flags;
+	int i = 0;
+	unsigned int index, mask;
+	unsigned int reg = 0;
+
+	msm_get_mpm_pin(d, mpm_pin, is_mpmgic);
+	for (i = 0; i < MAX_MPM_PIN_PER_IRQ; i++) {
+		if (mpm_pin[i] < 0)
+			return;
+
+		index = mpm_pin[i]/32;
+		mask = mpm_pin[i]%32;
+
+		spin_lock_irqsave(&mpm_lock, flags);
+		reg = MPM_REG_RISING_EDGE;
+		if (flowtype & IRQ_TYPE_EDGE_RISING)
+			msm_mpm_program_set_type(1, reg, index, mask);
+		else
+			msm_mpm_program_set_type(0, reg, index, mask);
+
+		reg = MPM_REG_FALLING_EDGE;
+		if (flowtype & IRQ_TYPE_EDGE_FALLING)
+			msm_mpm_program_set_type(1, reg, index, mask);
+		else
+			msm_mpm_program_set_type(0, reg, index, mask);
+
+		reg = MPM_REG_POLARITY;
+		if (flowtype & IRQ_TYPE_LEVEL_HIGH)
+			msm_mpm_program_set_type(1, reg, index, mask);
+		else
+			msm_mpm_program_set_type(0, reg, index, mask);
+		spin_unlock_irqrestore(&mpm_lock, flags);
+	}
+}
+
+static void msm_mpm_gpio_chip_mask(struct irq_data *d)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return;
+
+	msm_mpm_enable_irq(d, false, MPM_GPIO);
+}
+
+static void msm_mpm_gpio_chip_unmask(struct irq_data *d)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return;
+
+	msm_mpm_enable_irq(d, true, MPM_GPIO);
+}
+
+static int msm_mpm_gpio_chip_set_type(struct irq_data *d, unsigned int type)
+{
+	if (d->hwirq == GPIO_NO_WAKE_IRQ)
+		return 0;
+
+	msm_mpm_set_type(d, type, MPM_GPIO);
+
+	return 0;
+}
+
+static void msm_mpm_gic_chip_mask(struct irq_data *d)
+{
+	msm_mpm_enable_irq(d, false, MPM_GIC);
+	irq_chip_mask_parent(d);
+}
+
+static void msm_mpm_gic_chip_unmask(struct irq_data *d)
+{
+	msm_mpm_enable_irq(d, true, MPM_GIC);
+	irq_chip_unmask_parent(d);
+}
+
+static int msm_mpm_gic_chip_set_type(struct irq_data *d, unsigned int type)
+{
+	msm_mpm_set_type(d, type, MPM_GIC);
+	return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip msm_mpm_gic_chip = {
+	.name		= "mpm-gic",
+	.irq_eoi	= irq_chip_eoi_parent,
+	.irq_mask	= msm_mpm_gic_chip_mask,
+	.irq_disable	= msm_mpm_gic_chip_mask,
+	.irq_unmask	= msm_mpm_gic_chip_unmask,
+	.irq_retrigger	= irq_chip_retrigger_hierarchy,
+	.irq_set_type	= msm_mpm_gic_chip_set_type,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static struct irq_chip msm_mpm_gpio_chip = {
+	.name		= "mpm-gpio",
+	.irq_mask	= msm_mpm_gpio_chip_mask,
+	.irq_disable	= msm_mpm_gpio_chip_mask,
+	.irq_unmask	= msm_mpm_gpio_chip_unmask,
+	.irq_set_type	= msm_mpm_gpio_chip_set_type,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+	.irq_retrigger          = irq_chip_retrigger_hierarchy,
+};
+
+static int msm_mpm_gpio_chip_translate(struct irq_domain *d,
+		struct irq_fwspec *fwspec,
+		unsigned long *hwirq,
+		unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 2)
+			return -EINVAL;
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int msm_mpm_gpio_chip_alloc(struct irq_domain *domain,
+		unsigned int virq,
+		unsigned int nr_irqs,
+		void *data)
+{
+	int ret = 0;
+	struct irq_fwspec *fwspec = data;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+
+	ret = msm_mpm_gpio_chip_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				&msm_mpm_gpio_chip, NULL);
+
+	return 0;
+}
+
+static const struct irq_domain_ops msm_mpm_gpio_chip_domain_ops = {
+	.translate	= msm_mpm_gpio_chip_translate,
+	.alloc		= msm_mpm_gpio_chip_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int msm_mpm_gic_chip_translate(struct irq_domain *d,
+					struct irq_fwspec *fwspec,
+					unsigned long *hwirq,
+					unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count < 3)
+			return -EINVAL;
+
+		switch (fwspec->param[0]) {
+		case 0:			/* SPI */
+			*hwirq = fwspec->param[1] + 32;
+			break;
+		case 1:			/* PPI */
+			*hwirq = fwspec->param[1] + 16;
+			break;
+		case GIC_IRQ_TYPE_LPI:	/* LPI */
+			*hwirq = fwspec->param[1];
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	if (is_fwnode_irqchip(fwspec->fwnode)) {
+		if (fwspec->param_count != 2)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int msm_mpm_gic_chip_alloc(struct irq_domain *domain,
+					unsigned int virq,
+					unsigned int nr_irqs,
+					void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	int  ret;
+
+	ret = msm_mpm_gic_chip_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+						&msm_mpm_gic_chip, NULL);
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops msm_mpm_gic_chip_domain_ops = {
+	.translate	= msm_mpm_gic_chip_translate,
+	.alloc		= msm_mpm_gic_chip_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static inline void msm_mpm_send_interrupt(void)
+{
+	writel_relaxed(2, msm_mpm_dev_data.mpm_ipc_reg);
+	/* Ensure the write is complete before returning. */
+	wmb();
+}
+
+static inline void msm_mpm_timer_write(uint32_t *expiry)
+{
+	writel_relaxed(expiry[0], msm_mpm_dev_data.mpm_request_reg_base);
+	writel_relaxed(expiry[1], msm_mpm_dev_data.mpm_request_reg_base + 0x4);
+}
+
+static void msm_mpm_enter_sleep(struct cpumask *cpumask)
+{
+	msm_mpm_send_interrupt();
+	irq_set_affinity(msm_mpm_dev_data.ipc_irq, cpumask);
+}
+
+static void system_pm_exit_sleep(bool success)
+{
+	msm_rpm_exit_sleep();
+}
+
+static u64 us_to_ticks(uint64_t sleep_val)
+{
+	uint64_t sec, nsec;
+	u64 wakeup;
+
+	sec = sleep_val;
+	do_div(sec, USEC_PER_SEC);
+	nsec = sleep_val - sec * USEC_PER_SEC;
+
+	if (nsec > 0) {
+		nsec = nsec * NSEC_PER_USEC;
+		do_div(nsec, NSEC_PER_SEC);
+	}
+
+	sleep_val = sec + nsec;
+
+	wakeup = (u64)sleep_val * ARCH_TIMER_HZ;
+
+	if (sleep_val)
+		wakeup += arch_counter_get_cntvct();
+	else
+		wakeup = (~0ULL);
+
+	return wakeup;
+}
+
+static int system_pm_update_wakeup(bool from_idle)
+{
+	uint64_t wake_time;
+	uint32_t lo = ~0U, hi = ~0U;
+	u64 wakeup;
+
+	if (unlikely(!from_idle && msm_pm_sleep_time_override)) {
+		wake_time = msm_pm_sleep_time_override * USEC_PER_SEC;
+		wakeup = us_to_ticks(wake_time);
+	} else {
+		/* Read the hardware to get the most accurate value */
+
+		arch_timer_mem_get_cval(&lo, &hi);
+		wakeup = lo;
+		wakeup |= ((uint64_t)(hi) << 32);
+	}
+
+	msm_mpm_timer_write((uint32_t *)&wakeup);
+	trace_mpm_wakeup_time(from_idle, wakeup, arch_counter_get_cntvct());
+
+	return 0;
+}
+
+static int system_pm_enter_sleep(struct cpumask *mask)
+{
+	int ret = 0, i = 0;
+
+	ret = msm_rpm_enter_sleep(0, mask);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < QCOM_MPM_REG_WIDTH; i++)
+		msm_mpm_write(MPM_REG_STATUS, i, 0);
+
+	msm_mpm_enter_sleep(mask);
+
+	return ret;
+}
+
+static bool system_pm_sleep_allowed(void)
+{
+	return !msm_rpm_waiting_for_ack();
+}
+
+static struct system_pm_ops pm_ops = {
+	.enter = system_pm_enter_sleep,
+	.exit = system_pm_exit_sleep,
+	.update_wakeup = system_pm_update_wakeup,
+	.sleep_allowed = system_pm_sleep_allowed,
+};
+
+/*
+ * Triggered by RPM when system resumes from deep sleep
+ */
+static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
+{
+	unsigned long pending;
+	uint32_t value[3];
+	int i, k, apps_irq;
+	unsigned int mpm_irq;
+	struct irq_desc *desc = NULL;
+	unsigned int reg = MPM_REG_ENABLE;
+
+	for (i = 0; i < QCOM_MPM_REG_WIDTH; i++) {
+		value[i] = msm_mpm_read(reg, i);
+		trace_mpm_wakeup_enable_irqs(i, value[i]);
+	}
+
+	for (i = 0; i < QCOM_MPM_REG_WIDTH; i++) {
+		pending = msm_mpm_read(MPM_REG_STATUS, i);
+		pending &= (unsigned long)value[i];
+
+		trace_mpm_wakeup_pending_irqs(i, pending);
+		for_each_set_bit(k, &pending, 32) {
+			mpm_irq = 32 * i + k;
+			apps_irq = mpm_to_irq[mpm_irq];
+			desc = apps_irq ?
+				irq_to_desc(apps_irq) : NULL;
+
+			if (desc && !irqd_is_level_type(&desc->irq_data))
+				irq_set_irqchip_state(apps_irq,
+						IRQCHIP_STATE_PENDING, true);
+
+		}
+
+	}
+	return IRQ_HANDLED;
+}
+
+static int msm_mpm_init(struct device_node *node)
+{
+	struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
+	int ret = 0;
+	int irq, index;
+
+	index = of_property_match_string(node, "reg-names", "vmpm");
+	if (index < 0) {
+		ret = -EADDRNOTAVAIL;
+		goto reg_base_err;
+	}
+
+	dev->mpm_request_reg_base = of_iomap(node, index);
+	if (!dev->mpm_request_reg_base) {
+		pr_err("Unable to iomap\n");
+		ret = -EADDRNOTAVAIL;
+		goto reg_base_err;
+	}
+
+	index = of_property_match_string(node, "reg-names", "ipc");
+	if (index < 0) {
+		ret = -EADDRNOTAVAIL;
+		goto reg_base_err;
+	}
+
+	dev->mpm_ipc_reg = of_iomap(node, index);
+	if (!dev->mpm_ipc_reg) {
+		pr_err("Unable to iomap IPC register\n");
+		ret = -EADDRNOTAVAIL;
+		goto ipc_reg_err;
+	}
+
+	irq = of_irq_get(node, 0);
+	if (irq <= 0) {
+		pr_err("no IRQ resource info\n");
+		ret = irq;
+		goto ipc_irq_err;
+	}
+	dev->ipc_irq = irq;
+
+	ret = request_irq(dev->ipc_irq, msm_mpm_irq,
+		IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "mpm",
+		msm_mpm_irq);
+	if (ret) {
+		pr_err("request_irq failed errno: %d\n", ret);
+		goto ipc_irq_err;
+	}
+
+	ret = irq_set_irq_wake(dev->ipc_irq, 1);
+	if (ret) {
+		pr_err("failed to set wakeup irq %lu: %d\n",
+			dev->ipc_irq, ret);
+		goto set_wake_irq_err;
+	}
+
+	return register_system_pm_ops(&pm_ops);
+
+set_wake_irq_err:
+	free_irq(dev->ipc_irq, msm_mpm_irq);
+ipc_irq_err:
+	iounmap(dev->mpm_ipc_reg);
+ipc_reg_err:
+	iounmap(dev->mpm_request_reg_base);
+reg_base_err:
+	return ret;
+}
+
+static const struct of_device_id mpm_gic_chip_data_table[] = {
+	{
+		.compatible = "qcom,mpm-gic-bengal",
+		.data = mpm_bengal_gic_chip_data,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table);
+
+static int __init mpm_gic_chip_init(struct device_node *node,
+					struct device_node *parent)
+{
+	struct irq_domain *parent_domain;
+	const struct of_device_id *id;
+	int ret;
+
+	if (!parent) {
+		pr_err("%s(): no parent for mpm-gic\n", node->full_name);
+		return -ENXIO;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("unable to obtain gic parent domain\n");
+		return -ENXIO;
+	}
+
+	of_property_read_u32(node, "qcom,num-mpm-irqs", &num_mpm_irqs);
+
+	mpm_to_irq = kcalloc(num_mpm_irqs, sizeof(*mpm_to_irq), GFP_KERNEL);
+	if (!mpm_to_irq)
+		return -ENOMEM;
+
+	id = of_match_node(mpm_gic_chip_data_table, node);
+	if (!id) {
+		pr_err("can not find mpm_gic_data_table of_node\n");
+		ret = -ENODEV;
+		goto mpm_map_err;
+	}
+
+	msm_mpm_dev_data.gic_chip_domain = irq_domain_add_hierarchy(
+			parent_domain, 0, num_mpm_irqs, node,
+			&msm_mpm_gic_chip_domain_ops, (void *)id->data);
+	if (!msm_mpm_dev_data.gic_chip_domain) {
+		pr_err("gic domain add failed\n");
+		ret = -ENOMEM;
+		goto mpm_map_err;
+	}
+
+	msm_mpm_dev_data.gic_chip_domain->name = "qcom,mpm-gic";
+
+	ret = msm_mpm_init(node);
+	if (!ret)
+		return ret;
+	irq_domain_remove(msm_mpm_dev_data.gic_chip_domain);
+
+mpm_map_err:
+	kfree(mpm_to_irq);
+	return ret;
+}
+
+IRQCHIP_DECLARE(mpm_gic_chip, "qcom,mpm-gic", mpm_gic_chip_init);
+
+static int __init mpm_gpio_chip_init(struct device_node *node,
+					struct device_node *parent)
+{
+	msm_mpm_dev_data.gpio_chip_domain = irq_domain_create_linear(
+			of_node_to_fwnode(node), num_mpm_irqs,
+			&msm_mpm_gpio_chip_domain_ops, NULL);
+
+	if (!msm_mpm_dev_data.gpio_chip_domain)
+		return -ENOMEM;
+
+	irq_domain_update_bus_token(msm_mpm_dev_data.gpio_chip_domain,
+							DOMAIN_BUS_WAKEUP);
+	msm_mpm_dev_data.gpio_chip_domain->name = "qcom,mpm-gpio";
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(mpm_gpio_chip, "qcom,mpm-gpio", mpm_gpio_chip_init);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ef5560b..21786a4 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
 	if (!cdev->ap.applid)
 		return -ENODEV;
 
+	if (count < CAPIMSG_BASELEN)
+		return -EINVAL;
+
 	skb = alloc_skb(count, GFP_USER);
 	if (!skb)
 		return -ENOMEM;
@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
 	}
 	mlen = CAPIMSG_LEN(skb->data);
 	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-		if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+		if (count < CAPI_DATA_B3_REQ_LEN ||
+		    (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
 			kfree_skb(skb);
 			return -EINVAL;
 		}
@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
 	CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
 
 	if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+		if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+			kfree_skb(skb);
+			return -EINVAL;
+		}
 		mutex_lock(&cdev->lock);
 		capincci_free(cdev, CAPIMSG_NCCI(skb->data));
 		mutex_unlock(&cdev->lock);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 060dc7f..c952002 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
 				printk(KERN_DEBUG
 				       "%s: %s: alloc urb for fifo %i failed",
 				       hw->name, __func__, fifo->fifonum);
+				continue;
 			}
 			fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
 			fifo->iso[i].indx = i;
@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
 static int
 setup_hfcsusb(struct hfcsusb *hw)
 {
+	void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
 	u_char b;
+	int ret;
 
 	if (debug & DBG_HFC_CALL_TRACE)
 		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
 
+	if (!dmabuf)
+		return -ENOMEM;
+
+	ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+	memcpy(&b, dmabuf, sizeof(u_char));
+	kfree(dmabuf);
+
 	/* check the chip id */
-	if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+	if (ret != 1) {
 		printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
 		       hw->name, __func__);
 		return 1;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index a73337b..db588a7 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -764,6 +764,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 
 	if (sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
+	if (!capable(CAP_NET_RAW))
+		return -EPERM;
 
 	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
 	if (!sk)
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index d839811..1391b90 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -178,6 +178,7 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
 	list_del(&led_cdev->trig_list);
 	write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
 	led_set_brightness(led_cdev, LED_OFF);
+	kfree(event);
 
 	return ret;
 }
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 2a9009f..18edc8b 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
 {
 	const struct firmware *fw = chip->fw;
 
-	if (fw->size > LP5562_PROGRAM_LENGTH) {
+	/*
+	 * the firmware is encoded in ascii hex character, with 2 chars
+	 * per byte
+	 */
+	if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
 		dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
 			fw->size);
 		return;
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 7a669e8..4a2d664 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -27,132 +27,162 @@
 #include "leds.h"
 
 #define	FLASH_LED_REG_LED_STATUS1(base)		(base + 0x08)
+
 #define	FLASH_LED_REG_LED_STATUS2(base)		(base + 0x09)
+#define	FLASH_LED_VPH_DROOP_FAULT_MASK		BIT(4)
+
 #define	FLASH_LED_REG_INT_RT_STS(base)		(base + 0x10)
+
 #define	FLASH_LED_REG_SAFETY_TMR(base)		(base + 0x40)
+#define	FLASH_LED_SAFETY_TMR_ENABLE		BIT(7)
+
 #define	FLASH_LED_REG_TGR_CURRENT(base)		(base + 0x43)
+
 #define	FLASH_LED_REG_MOD_CTRL(base)		(base + 0x46)
+#define	FLASH_LED_MOD_CTRL_MASK			BIT(7)
+#define	FLASH_LED_MOD_ENABLE			BIT(7)
+
 #define	FLASH_LED_REG_IRES(base)		(base + 0x47)
+
 #define	FLASH_LED_REG_STROBE_CFG(base)		(base + 0x48)
+#define	FLASH_LED_STROBE_MASK			GENMASK(1, 0)
+
 #define	FLASH_LED_REG_STROBE_CTRL(base)		(base + 0x49)
+#define	FLASH_LED_HW_SW_STROBE_SEL_BIT		BIT(2)
+#define	FLASH_HW_STROBE_MASK			GENMASK(2, 0)
+
 #define	FLASH_LED_EN_LED_CTRL(base)		(base + 0x4C)
+#define	FLASH_LED_ENABLE			BIT(0)
+
 #define	FLASH_LED_REG_HDRM_PRGM(base)		(base + 0x4D)
+#define	FLASH_LED_HDRM_VOL_MASK			GENMASK(7, 4)
+#define	FLASH_LED_HDRM_VOL_SHIFT		4
+
 #define	FLASH_LED_REG_HDRM_AUTO_MODE_CTRL(base)	(base + 0x50)
 #define	FLASH_LED_REG_WARMUP_DELAY(base)	(base + 0x51)
-#define	FLASH_LED_REG_ISC_DELAY(base)		(base + 0x52)
-#define	FLASH_LED_REG_THERMAL_RMP_DN_RATE(base)	(base + 0x55)
-#define	FLASH_LED_REG_THERMAL_THRSH1(base)	(base + 0x56)
-#define	FLASH_LED_REG_THERMAL_THRSH2(base)	(base + 0x57)
-#define	FLASH_LED_REG_THERMAL_THRSH3(base)	(base + 0x58)
-#define	FLASH_LED_REG_THERMAL_HYSTERESIS(base)	(base + 0x59)
-#define	FLASH_LED_REG_THERMAL_DEBOUNCE(base)	(base + 0x5A)
-#define	FLASH_LED_REG_VPH_DROOP_THRESHOLD(base)	(base + 0x61)
-#define	FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base)	(base + 0x62)
-#define	FLASH_LED_REG_ILED_GRT_THRSH(base)	(base + 0x67)
-#define	FLASH_LED_REG_LED1N2_ICLAMP_LOW(base)	(base + 0x68)
-#define	FLASH_LED_REG_LED1N2_ICLAMP_MID(base)	(base + 0x69)
-#define	FLASH_LED_REG_LED3_ICLAMP_LOW(base)	(base + 0x6A)
-#define	FLASH_LED_REG_LED3_ICLAMP_MID(base)	(base + 0x6B)
-#define	FLASH_LED_REG_MITIGATION_SEL(base)	(base + 0x6E)
-#define	FLASH_LED_REG_MITIGATION_SW(base)	(base + 0x6F)
-#define	FLASH_LED_REG_LMH_LEVEL(base)		(base + 0x70)
-#define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
-#define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
-#define	FLASH_LED_REG_CURRENT_DERATE_EN(base)	(base + 0x76)
 
-#define	FLASH_LED_HDRM_VOL_MASK			GENMASK(7, 4)
-#define	FLASH_LED_CURRENT_MASK			GENMASK(6, 0)
-#define	FLASH_LED_STROBE_MASK			GENMASK(1, 0)
-#define	FLASH_HW_STROBE_MASK			GENMASK(2, 0)
+#define	FLASH_LED_REG_ISC_DELAY(base)		(base + 0x52)
 #define	FLASH_LED_ISC_WARMUP_DELAY_MASK		GENMASK(1, 0)
-#define	FLASH_LED_CURRENT_DERATE_EN_MASK	GENMASK(2, 0)
-#define	FLASH_LED_VPH_DROOP_DEBOUNCE_MASK	GENMASK(1, 0)
-#define	FLASH_LED_CHGR_MITIGATION_SEL_MASK	GENMASK(5, 4)
-#define	FLASH_LED_LMH_MITIGATION_SEL_MASK	GENMASK(1, 0)
-#define	FLASH_LED_ILED_GRT_THRSH_MASK		GENMASK(5, 0)
-#define	FLASH_LED_LMH_LEVEL_MASK		GENMASK(1, 0)
-#define	FLASH_LED_VPH_DROOP_HYSTERESIS_MASK	GENMASK(5, 4)
-#define	FLASH_LED_VPH_DROOP_THRESHOLD_MASK	GENMASK(2, 0)
-#define	FLASH_LED_THERMAL_HYSTERESIS_MASK	GENMASK(1, 0)
-#define	FLASH_LED_THERMAL_DEBOUNCE_MASK		GENMASK(1, 0)
-#define	FLASH_LED_THERMAL_THRSH_MASK		GENMASK(2, 0)
-#define	FLASH_LED_MOD_CTRL_MASK			BIT(7)
-#define	FLASH_LED_HW_SW_STROBE_SEL_BIT		BIT(2)
-#define	FLASH_LED_VPH_DROOP_FAULT_MASK		BIT(4)
-#define	FLASH_LED_LMH_MITIGATION_EN_MASK	BIT(0)
-#define	FLASH_LED_CHGR_MITIGATION_EN_MASK	BIT(4)
+#define	FLASH_LED_ISC_WARMUP_DELAY_SHIFT		6
+
+#define	FLASH_LED_REG_THERMAL_RMP_DN_RATE(base)	(base + 0x55)
 #define	THERMAL_OTST1_RAMP_CTRL_MASK		BIT(7)
 #define	THERMAL_OTST1_RAMP_CTRL_SHIFT		7
 #define	THERMAL_DERATE_SLOW_SHIFT		4
 #define	THERMAL_DERATE_SLOW_MASK		GENMASK(6, 4)
 #define	THERMAL_DERATE_FAST_MASK		GENMASK(2, 0)
-#define	LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
+
+#define	FLASH_LED_REG_THERMAL_THRSH1(base)	(base + 0x56)
+#define	FLASH_LED_THERMAL_THRSH_MASK		GENMASK(2, 0)
+
+#define	FLASH_LED_REG_THERMAL_THRSH2(base)	(base + 0x57)
+#define	FLASH_LED_REG_THERMAL_THRSH3(base)	(base + 0x58)
+
+#define	FLASH_LED_REG_THERMAL_HYSTERESIS(base)	(base + 0x59)
+#define	FLASH_LED_THERMAL_HYSTERESIS_MASK	GENMASK(1, 0)
+
+#define	FLASH_LED_REG_THERMAL_DEBOUNCE(base)	(base + 0x5A)
+#define	FLASH_LED_THERMAL_DEBOUNCE_MASK		GENMASK(1, 0)
+
+#define	FLASH_LED_REG_VPH_DROOP_THRESHOLD(base)	(base + 0x61)
+#define	FLASH_LED_VPH_DROOP_HYSTERESIS_MASK	GENMASK(5, 4)
+#define	FLASH_LED_VPH_DROOP_THRESHOLD_MASK	GENMASK(2, 0)
+#define	FLASH_LED_VPH_DROOP_HYST_SHIFT		4
+
+#define	FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base)	(base + 0x62)
+#define	FLASH_LED_VPH_DROOP_DEBOUNCE_MASK	GENMASK(1, 0)
+
+#define	FLASH_LED_REG_ILED_GRT_THRSH(base)	(base + 0x67)
+#define	FLASH_LED_ILED_GRT_THRSH_MASK		GENMASK(5, 0)
+
+#define	FLASH_LED_REG_LED1N2_ICLAMP_LOW(base)	(base + 0x68)
+#define	FLASH_LED_REG_LED1N2_ICLAMP_MID(base)	(base + 0x69)
+#define	FLASH_LED_REG_LED3_ICLAMP_LOW(base)	(base + 0x6A)
+
+#define	FLASH_LED_REG_LED3_ICLAMP_MID(base)	(base + 0x6B)
+#define	FLASH_LED_CURRENT_MASK			GENMASK(6, 0)
+
+#define	FLASH_LED_REG_MITIGATION_SEL(base)	(base + 0x6E)
+#define	FLASH_LED_CHGR_MITIGATION_SEL_MASK	GENMASK(5, 4)
+#define	FLASH_LED_LMH_MITIGATION_SEL_MASK	GENMASK(1, 0)
+
+#define	FLASH_LED_REG_MITIGATION_SW(base)	(base + 0x6F)
+#define	FLASH_LED_LMH_MITIGATION_EN_MASK	BIT(0)
+#define	FLASH_LED_CHGR_MITIGATION_EN_MASK	BIT(4)
+#define	FLASH_LED_CHGR_MITIGATION_ENABLE	BIT(4)
+
+#define	FLASH_LED_REG_LMH_LEVEL(base)		(base + 0x70)
+#define	FLASH_LED_LMH_LEVEL_MASK		GENMASK(1, 0)
+
+#define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
 #define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define	LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
+
+#define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
 #define	LPG_INPUT_SEL_BIT			BIT(0)
 
+#define	FLASH_LED_REG_CURRENT_DERATE_EN(base)	(base + 0x76)
+#define	FLASH_LED_CURRENT_DERATE_EN_MASK	GENMASK(2, 0)
+
 #define	VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us)	(val_us / 8)
 #define	VPH_DROOP_HYST_MV_TO_VAL(val_mv)	(val_mv / 25)
 #define	VPH_DROOP_THRESH_VAL_TO_UV(val)		((val + 25) * 100000)
 #define	MITIGATION_THRSH_MA_TO_VAL(val_ma)	(val_ma / 100)
 #define	THERMAL_HYST_TEMP_TO_VAL(val, divisor)	(val / divisor)
 
-#define	FLASH_LED_ISC_WARMUP_DELAY_SHIFT	6
-#define	FLASH_LED_WARMUP_DELAY_DEFAULT		2
-#define	FLASH_LED_ISC_DELAY_DEFAULT		3
-#define	FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT	2
-#define	FLASH_LED_VPH_DROOP_HYST_SHIFT		4
-#define	FLASH_LED_VPH_DROOP_HYST_DEFAULT	2
-#define	FLASH_LED_VPH_DROOP_THRESH_DEFAULT	5
-#define	FLASH_LED_DEBOUNCE_MAX			3
-#define	FLASH_LED_HYSTERESIS_MAX		3
-#define	FLASH_LED_VPH_DROOP_THRESH_MAX		7
-#define	THERMAL_DERATE_SLOW_MAX			314592
-#define	THERMAL_DERATE_FAST_MAX			512
-#define	THERMAL_DEBOUNCE_TIME_MAX		64
-#define	THERMAL_DERATE_HYSTERESIS_MAX		3
-#define	FLASH_LED_THERMAL_THRSH_MIN		3
-#define	FLASH_LED_THERMAL_THRSH_MAX		7
-#define	FLASH_LED_THERMAL_OTST_LEVELS		3
-#define	FLASH_LED_VLED_MAX_DEFAULT_UV		3500000
-#define	FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA	4500000
-#define	FLASH_LED_RPARA_DEFAULT_UOHM		0
-#define	FLASH_LED_SAFETY_TMR_ENABLE		BIT(7)
-#define	FLASH_LED_LMH_LEVEL_DEFAULT		0
-#define	FLASH_LED_LMH_MITIGATION_ENABLE		1
-#define	FLASH_LED_LMH_MITIGATION_DISABLE	0
-#define	FLASH_LED_CHGR_MITIGATION_ENABLE	BIT(4)
-#define	FLASH_LED_CHGR_MITIGATION_DISABLE	0
-#define	FLASH_LED_LMH_MITIGATION_SEL_DEFAULT	2
-#define	FLASH_LED_MITIGATION_SEL_MAX		2
-#define	FLASH_LED_CHGR_MITIGATION_SEL_SHIFT	4
-#define	FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT	0xA
-#define	FLASH_LED_CHGR_MITIGATION_THRSH_MAX	0x1F
-#define	FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV	3700000
-#define	FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM	400000
-#define	FLASH_LED_IRES_BASE			3
-#define	FLASH_LED_IRES_DIVISOR			2500
-#define	FLASH_LED_IRES_MIN_UA			5000
-#define	FLASH_LED_IRES_DEFAULT_UA		12500
-#define	FLASH_LED_IRES_DEFAULT_VAL		0x00
-#define	FLASH_LED_HDRM_VOL_SHIFT		4
-#define	FLASH_LED_HDRM_VOL_DEFAULT_MV		0x80
-#define	FLASH_LED_HDRM_VOL_HI_LO_WIN_DEFAULT_MV	0x04
-#define	FLASH_LED_HDRM_VOL_BASE_MV		125
-#define	FLASH_LED_HDRM_VOL_STEP_MV		25
-#define	FLASH_LED_STROBE_CFG_DEFAULT		0x00
-#define	FLASH_LED_HW_STROBE_OPTION_1		0x00
-#define	FLASH_LED_HW_STROBE_OPTION_2		0x01
-#define	FLASH_LED_HW_STROBE_OPTION_3		0x02
-#define	FLASH_LED_ENABLE			BIT(0)
-#define	FLASH_LED_MOD_ENABLE			BIT(7)
-#define	FLASH_LED_DISABLE			0x00
-#define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
-#define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
-#define	FLASH_LED_IRES5P0_MAX_CURR_MA		640
-#define	FLASH_LED_IRES7P5_MAX_CURR_MA		960
-#define	FLASH_LED_IRES10P0_MAX_CURR_MA		1280
-#define	FLASH_LED_IRES12P5_MAX_CURR_MA		1600
-#define	MAX_IRES_LEVELS				4
+#define	FLASH_LED_WARMUP_DELAY_DEFAULT			2
+#define	FLASH_LED_ISC_DELAY_DEFAULT			3
+#define	FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT		2
+#define	FLASH_LED_VPH_DROOP_HYST_DEFAULT		2
+#define	FLASH_LED_VPH_DROOP_THRESH_DEFAULT		5
+#define	BHARGER_FLASH_LED_VPH_DROOP_THRESH_DEFAULT	7
+#define	FLASH_LED_DEBOUNCE_MAX				3
+#define	FLASH_LED_HYSTERESIS_MAX			3
+#define	FLASH_LED_VPH_DROOP_THRESH_MAX			7
+#define	THERMAL_DERATE_SLOW_MAX				314592
+#define	THERMAL_DERATE_FAST_MAX				512
+#define	THERMAL_DEBOUNCE_TIME_MAX			64
+#define	THERMAL_DERATE_HYSTERESIS_MAX			3
+#define	FLASH_LED_THERMAL_THRSH_MIN			3
+#define	FLASH_LED_THERMAL_THRSH_MAX			7
+#define	FLASH_LED_THERMAL_OTST_LEVELS			3
+#define	FLASH_LED_VLED_MAX_DEFAULT_UV			3500000
+#define	FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA		4500000
+#define	FLASH_LED_RPARA_DEFAULT_UOHM			0
+#define	FLASH_LED_LMH_LEVEL_DEFAULT			0
+#define	FLASH_LED_LMH_MITIGATION_ENABLE			1
+#define	FLASH_LED_LMH_MITIGATION_DISABLE		0
+#define	FLASH_LED_CHGR_MITIGATION_DISABLE		0
+#define	FLASH_LED_LMH_MITIGATION_SEL_DEFAULT		2
+#define	FLASH_LED_MITIGATION_SEL_MAX			2
+#define	FLASH_LED_CHGR_MITIGATION_SEL_SHIFT		4
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT		0xA
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_MAX		0x1F
+#define	FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV		3700000
+#define	FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM		400000
+#define	FLASH_LED_IRES_BASE				3
+#define	FLASH_LED_IRES_DIVISOR				2500
+#define	FLASH_LED_IRES_MIN_UA				5000
+#define	FLASH_LED_IRES_DEFAULT_UA			12500
+#define	FLASH_LED_IRES_DEFAULT_VAL			0x00
+#define	FLASH_LED_HDRM_VOL_DEFAULT_MV			0x80
+#define	FLASH_LED_HDRM_VOL_HI_LO_WIN_DEFAULT_MV		0x04
+#define	FLASH_LED_HDRM_VOL_BASE_MV			125
+#define	FLASH_LED_HDRM_VOL_STEP_MV			25
+#define	FLASH_LED_STROBE_CFG_DEFAULT			0x00
+#define	FLASH_LED_HW_STROBE_OPTION_1			0x00
+#define	FLASH_LED_HW_STROBE_OPTION_2			0x01
+#define	FLASH_LED_HW_STROBE_OPTION_3			0x02
+#define	FLASH_LED_DISABLE				0x00
+#define	FLASH_LED_SAFETY_TMR_DISABLED			0x13
+#define	FLASH_LED_MAX_TOTAL_CURRENT_MA			3750
+#define	FLASH_LED_IRES5P0_MAX_CURR_MA			640
+#define	FLASH_LED_IRES7P5_MAX_CURR_MA			960
+#define	FLASH_LED_IRES10P0_MAX_CURR_MA			1280
+#define	FLASH_LED_IRES12P5_MAX_CURR_MA			1600
+#define	MAX_IRES_LEVELS					4
+#define	FLASH_BST_PWM_OVRHD_MIN_UV			300000
+#define	FLASH_BST_PWM_OVRHD_MAX_UV			600000
 
 /* notifier call chain for flash-led irqs */
 static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
@@ -252,6 +282,7 @@ struct flash_led_platform_data {
 	u32			led1n2_iclamp_mid_ma;
 	u32			led3_iclamp_low_ma;
 	u32			led3_iclamp_mid_ma;
+	u32			bst_pwm_ovrhd_uv;
 	u8			isc_delay;
 	u8			warmup_delay;
 	u8			current_derate_en_cfg;
@@ -277,6 +308,8 @@ struct qpnp_flash_led {
 	struct flash_node_data		*fnode;
 	struct flash_switch_data	*snode;
 	struct power_supply		*bms_psy;
+	struct power_supply		*main_psy;
+	struct power_supply		*usb_psy;
 	struct notifier_block		nb;
 	spinlock_t			lock;
 	int				num_fnodes;
@@ -782,10 +815,13 @@ static int get_property_from_fg(struct qpnp_flash_led *led,
 	return rc;
 }
 
-#define VOLTAGE_HDRM_DEFAULT_MV	350
+#define VOLTAGE_HDRM_DEFAULT_MV		350
+#define BHARGER_VOLTAGE_HDRM_DEFAULT_MV	400
+#define BHARGER_HEADROOM_OFFSET_MV	50
 static int qpnp_flash_led_get_voltage_headroom(struct qpnp_flash_led *led)
 {
 	int i, voltage_hdrm_mv = 0, voltage_hdrm_max = 0;
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
 
 	for (i = 0; i < led->num_fnodes; i++) {
 		if (led->fnode[i].led_on) {
@@ -809,13 +845,18 @@ static int qpnp_flash_led_get_voltage_headroom(struct qpnp_flash_led *led)
 					voltage_hdrm_mv = 350;
 			}
 
+			if (pmic_subtype == PMI632_SUBTYPE)
+				voltage_hdrm_mv += BHARGER_HEADROOM_OFFSET_MV;
+
 			voltage_hdrm_max = max(voltage_hdrm_max,
 						voltage_hdrm_mv);
 		}
 	}
 
 	if (!voltage_hdrm_max)
-		return VOLTAGE_HDRM_DEFAULT_MV;
+		return (pmic_subtype == PMI632_SUBTYPE) ?
+					BHARGER_VOLTAGE_HDRM_DEFAULT_MV :
+						VOLTAGE_HDRM_DEFAULT_MV;
 
 	return voltage_hdrm_max;
 }
@@ -826,7 +867,7 @@ static int qpnp_flash_led_get_voltage_headroom(struct qpnp_flash_led *led)
 #define BOB_EFFICIENCY		900LL
 #define VIN_FLASH_MIN_UV	3300000LL
 static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led,
-					int *max_current)
+					      int *max_current)
 {
 	int ocv_uv, ibat_now, voltage_hdrm_mv, rc;
 	int rbatt_uohm = 0;
@@ -927,8 +968,181 @@ static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led,
 	return 0;
 }
 
+static int is_main_psy_available(struct qpnp_flash_led *led)
+{
+	if (!led->main_psy) {
+		led->main_psy = power_supply_get_by_name("main");
+		if (!led->main_psy) {
+			pr_err_ratelimited("Couldn't get main_psy\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static int is_usb_psy_available(struct qpnp_flash_led *led)
+{
+	if (!led->usb_psy) {
+		led->usb_psy = power_supply_get_by_name("usb");
+		if (!led->usb_psy) {
+			pr_err_ratelimited("Couldn't get usb_psy\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+#define CHGBST_EFFICIENCY		800LL
+#define CHGBST_FLASH_VDIP_MARGIN	10000
+#define VIN_FLASH_UV			5000000
+#define BHARGER_FLASH_LED_MAX_TOTAL_CURRENT_MA		1500
+#define BHARGER_FLASH_LED_WITH_OTG_MAX_TOTAL_CURRENT_MA	1100
+static int qpnp_flash_led_calc_bharger_max_current(struct qpnp_flash_led *led,
+						    int *max_current)
+{
+	union power_supply_propval pval = {0, };
+	int ocv_uv, ibat_now, voltage_hdrm_mv, flash_led_max_total_curr_ma, rc;
+	int rbatt_uohm = 0, usb_present, otg_enable;
+	int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+	int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv, vph_flash_vdip;
+	int64_t bst_pwm_ovrhd_uv;
+
+	rc = is_usb_psy_available(led);
+	if (rc < 0)
+		return rc;
+
+	rc = power_supply_get_property(led->usb_psy, POWER_SUPPLY_PROP_SCOPE,
+					&pval);
+	if (rc < 0) {
+		pr_err("usb psy does not support usb present, rc=%d\n", rc);
+		return rc;
+	}
+	otg_enable = pval.intval;
+
+	/* RESISTANCE = esr_uohm + rslow_uohm */
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_RESISTANCE,
+			&rbatt_uohm);
+	if (rc < 0) {
+		pr_err("bms psy does not support resistance, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* If no battery is connected, return max possible flash current */
+	if (!rbatt_uohm) {
+		*max_current = (otg_enable == POWER_SUPPLY_SCOPE_SYSTEM) ?
+			       BHARGER_FLASH_LED_WITH_OTG_MAX_TOTAL_CURRENT_MA :
+			       BHARGER_FLASH_LED_MAX_TOTAL_CURRENT_MA;
+		return 0;
+	}
+
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+	if (rc < 0) {
+		pr_err("bms psy does not support OCV, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = get_property_from_fg(led, POWER_SUPPLY_PROP_CURRENT_NOW,
+			&ibat_now);
+	if (rc < 0) {
+		pr_err("bms psy does not support current, rc=%d\n", rc);
+		return rc;
+	}
+
+	bst_pwm_ovrhd_uv = led->pdata->bst_pwm_ovrhd_uv;
+
+	rc = power_supply_get_property(led->usb_psy, POWER_SUPPLY_PROP_PRESENT,
+							&pval);
+	if (rc < 0) {
+		pr_err("usb psy does not support usb present, rc=%d\n", rc);
+		return rc;
+	}
+	usb_present = pval.intval;
+
+	rbatt_uohm += led->pdata->rpara_uohm;
+	voltage_hdrm_mv = qpnp_flash_led_get_voltage_headroom(led);
+	vph_flash_vdip =
+		VPH_DROOP_THRESH_VAL_TO_UV(led->pdata->vph_droop_threshold)
+						+ CHGBST_FLASH_VDIP_MARGIN;
+
+	/* Check if LMH_MITIGATION needs to be triggered */
+	if (!led->trigger_lmh && (ocv_uv < led->pdata->lmh_ocv_threshold_uv ||
+			rbatt_uohm > led->pdata->lmh_rbatt_threshold_uohm)) {
+		led->trigger_lmh = true;
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MITIGATION_SW(led->base),
+				FLASH_LED_LMH_MITIGATION_EN_MASK,
+				FLASH_LED_LMH_MITIGATION_ENABLE);
+		if (rc < 0) {
+			pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Wait for LMH mitigation to take effect */
+		udelay(100);
+
+		return qpnp_flash_led_calc_bharger_max_current(led,
+							       max_current);
+	}
+
+	/*
+	 * Calculate the maximum current that can pulled out of the battery
+	 * before the battery voltage dips below a safe threshold.
+	 */
+	ibat_safe_ua = div_s64((ocv_uv - vph_flash_vdip) * UCONV,
+				rbatt_uohm);
+
+	if (ibat_safe_ua <= led->pdata->ibatt_ocp_threshold_ua) {
+		/*
+		 * If the calculated current is below the OCP threshold, then
+		 * use it as the possible flash current.
+		 */
+		ibat_flash_ua = ibat_safe_ua - ibat_now;
+		vph_flash_uv = vph_flash_vdip;
+	} else {
+		/*
+		 * If the calculated current is above the OCP threshold, then
+		 * use the ocp threshold instead.
+		 *
+		 * Any higher current will be tripping the battery OCP.
+		 */
+		ibat_flash_ua = led->pdata->ibatt_ocp_threshold_ua - ibat_now;
+		vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+				* led->pdata->ibatt_ocp_threshold_ua, UCONV);
+	}
+
+	/* when USB is present or OTG is enabled, VIN_FLASH is always at 5V */
+	if (usb_present || (otg_enable == POWER_SUPPLY_SCOPE_SYSTEM))
+		vin_flash_uv = VIN_FLASH_UV;
+	else
+		/* Calculate the input voltage of the flash module. */
+		vin_flash_uv = max((led->pdata->vled_max_uv +
+				   (voltage_hdrm_mv * MCONV)),
+				    vph_flash_uv + bst_pwm_ovrhd_uv);
+
+	/* Calculate the available power for the flash module. */
+	avail_flash_power_fw = CHGBST_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+	/*
+	 * Calculate the available amount of current the flash module can draw
+	 * before collapsing the battery. (available power/ flash input voltage)
+	 */
+	avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+	flash_led_max_total_curr_ma = otg_enable ?
+			       BHARGER_FLASH_LED_WITH_OTG_MAX_TOTAL_CURRENT_MA :
+			       BHARGER_FLASH_LED_MAX_TOTAL_CURRENT_MA;
+	*max_current = min(flash_led_max_total_curr_ma,
+			(int)(div64_s64(avail_flash_ua, MCONV)));
+
+	pr_debug("avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d, trigger_lmh=%d max_current=%lld usb_present=%d otg_enable=%d\n",
+		avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm, led->trigger_lmh,
+		(*max_current * MCONV), usb_present, otg_enable);
+	return 0;
+}
+
+
 static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led,
-						int *thermal_current_lim)
+						   int *thermal_current_lim)
 {
 	int rc;
 	u8 thermal_thrsh1, thermal_thrsh2, thermal_thrsh3, otst_status;
@@ -1018,9 +1232,16 @@ static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led,
 						int *max_avail_current)
 {
 	int thermal_current_lim = 0, rc;
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
 
 	led->trigger_lmh = false;
-	rc = qpnp_flash_led_calc_max_current(led, max_avail_current);
+
+	if (pmic_subtype == PMI632_SUBTYPE)
+		rc = qpnp_flash_led_calc_bharger_max_current(led,
+							max_avail_current);
+	else
+		rc = qpnp_flash_led_calc_max_current(led, max_avail_current);
+
 	if (rc < 0) {
 		pr_err("Couldn't calculate max_avail_current, rc=%d\n", rc);
 		return rc;
@@ -1062,6 +1283,7 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 	int prgm_current_ma = value;
 	int min_ma = fnode->ires_ua / 1000;
 	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
 
 	if (value <= 0)
 		prgm_current_ma = 0;
@@ -1096,7 +1318,8 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 	if (prgm_current_ma)
 		fnode->led_on = true;
 
-	if (led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) {
+	if (pmic_subtype != PMI632_SUBTYPE &&
+	       led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) {
 		qpnp_flash_led_aggregate_max_current(fnode);
 		led->trigger_chgr = false;
 		if (led->total_current_ma >= 1000)
@@ -1126,7 +1349,7 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
 		}
 	}
 
-	if (!led->trigger_chgr) {
+	if (led->pdata->chgr_mitigation_sel && !led->trigger_chgr) {
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_MITIGATION_SW(led->base),
 				FLASH_LED_CHGR_MITIGATION_EN_MASK,
@@ -1257,6 +1480,78 @@ static int qpnp_flash_led_symmetry_config(struct flash_switch_data *snode)
 	return 0;
 }
 
+#define FLASH_LED_MODULE_EN_TIME_MS	300
+static int qpnp_flash_poll_vreg_ok(struct qpnp_flash_led *led)
+{
+	int rc, i;
+	union power_supply_propval pval = {0, };
+
+	rc = is_main_psy_available(led);
+	if (rc < 0)
+		return rc;
+
+	for (i = 0; i < 60; i++) {
+		/* wait for the flash vreg_ok to be set */
+		mdelay(5);
+
+		rc = power_supply_get_property(led->main_psy,
+					POWER_SUPPLY_PROP_FLASH_TRIGGER, &pval);
+		if (rc < 0) {
+			pr_err("main psy doesn't support reading prop %d rc = %d\n",
+				POWER_SUPPLY_PROP_FLASH_TRIGGER, rc);
+			return rc;
+		}
+
+		if (pval.intval > 0) {
+			pr_debug("Flash trigger set\n");
+			break;
+		}
+
+		if (pval.intval < 0) {
+			pr_err("Error during flash trigger %d\n", pval.intval);
+			return pval.intval;
+		}
+	}
+
+	if (!pval.intval) {
+		pr_err("Failed to enable the module\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_module_enable(struct flash_switch_data *snode)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev);
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
+	int rc = 0;
+
+	if (led->enable == 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_MOD_CTRL(led->base),
+				FLASH_LED_MOD_CTRL_MASK, FLASH_LED_MOD_ENABLE);
+		if (rc < 0)
+			return rc;
+
+		if (pmic_subtype == PMI632_SUBTYPE) {
+			rc = qpnp_flash_poll_vreg_ok(led);
+			if (rc < 0) {
+				/* Disable the module */
+				qpnp_flash_led_masked_write(led,
+					FLASH_LED_REG_MOD_CTRL(led->base),
+					FLASH_LED_MOD_CTRL_MASK,
+					FLASH_LED_DISABLE);
+
+				return rc;
+			}
+		}
+	}
+	led->enable++;
+
+	return rc;
+}
+
 static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 {
 	struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev);
@@ -1348,14 +1643,9 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 		}
 	}
 
-	if (led->enable == 0) {
-		rc = qpnp_flash_led_masked_write(led,
-				FLASH_LED_REG_MOD_CTRL(led->base),
-				FLASH_LED_MOD_CTRL_MASK, FLASH_LED_MOD_ENABLE);
-		if (rc < 0)
-			return rc;
-	}
-	led->enable++;
+	rc = qpnp_flash_led_module_enable(snode);
+	if (rc < 0)
+		return rc;
 
 	if (led->trigger_lmh) {
 		rc = qpnp_flash_led_masked_write(led,
@@ -1370,7 +1660,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 		udelay(500);
 	}
 
-	if (led->trigger_chgr) {
+	if (led->pdata->chgr_mitigation_sel && led->trigger_chgr) {
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_MITIGATION_SW(led->base),
 				FLASH_LED_CHGR_MITIGATION_EN_MASK,
@@ -1391,12 +1681,91 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 	return 0;
 }
 
+static int qpnp_flash_led_regulator_control(struct led_classdev *led_cdev,
+					int options, int *max_current)
+{
+	int rc;
+	u8 pmic_subtype;
+	struct flash_switch_data *snode;
+	struct qpnp_flash_led *led;
+	union power_supply_propval ret = {0, };
+
+	snode = container_of(led_cdev, struct flash_switch_data, cdev);
+	led = dev_get_drvdata(&snode->pdev->dev);
+	pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
+
+	if (pmic_subtype == PMI632_SUBTYPE) {
+		rc = is_main_psy_available(led);
+		if (rc < 0)
+			return rc;
+
+		rc = is_usb_psy_available(led);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+		pr_err("Invalid options %d\n", options);
+		return -EINVAL;
+	}
+
+	if (options & ENABLE_REGULATOR) {
+		if (pmic_subtype == PMI632_SUBTYPE) {
+			ret.intval = 1;
+			rc = power_supply_set_property(led->main_psy,
+					 POWER_SUPPLY_PROP_FLASH_ACTIVE,
+					 &ret);
+			if (rc < 0) {
+				pr_err("Failed to set FLASH_ACTIVE on charger rc=%d\n",
+									rc);
+				return rc;
+			}
+			pr_debug("FLASH_ACTIVE = 1\n");
+		} else {
+			rc = qpnp_flash_led_regulator_enable(led, snode, true);
+			if (rc < 0) {
+				pr_err("enable regulator failed, rc=%d\n", rc);
+				return rc;
+			}
+		}
+	}
+
+	if (options & DISABLE_REGULATOR) {
+		if (pmic_subtype == PMI632_SUBTYPE) {
+			ret.intval = 0;
+			rc = power_supply_set_property(led->main_psy,
+					POWER_SUPPLY_PROP_FLASH_ACTIVE,
+					&ret);
+			if (rc < 0) {
+				pr_err("Failed to set FLASH_ACTIVE on charger rc=%d\n",
+									rc);
+				return rc;
+			}
+			pr_debug("FLASH_ACTIVE = 0\n");
+		} else {
+			rc = qpnp_flash_led_regulator_enable(led, snode, false);
+			if (rc < 0) {
+				pr_err("disable regulator failed, rc=%d\n", rc);
+				return rc;
+			}
+		}
+	}
+
+	if (options & QUERY_MAX_AVAIL_CURRENT) {
+		rc = qpnp_flash_led_get_max_avail_current(led, max_current);
+		if (rc < 0) {
+			pr_err("query max current failed, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
 int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
 					int *max_current)
 {
 	struct led_classdev *led_cdev;
-	struct flash_switch_data *snode;
-	struct qpnp_flash_led *led;
 	int rc;
 
 	if (!trig) {
@@ -1410,39 +1779,9 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
 		return -EINVAL;
 	}
 
-	snode = container_of(led_cdev, struct flash_switch_data, cdev);
-	led = dev_get_drvdata(&snode->pdev->dev);
+	rc = qpnp_flash_led_regulator_control(led_cdev, options, max_current);
 
-	if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
-		pr_err("Invalid options %d\n", options);
-		return -EINVAL;
-	}
-
-	if (options & ENABLE_REGULATOR) {
-		rc = qpnp_flash_led_regulator_enable(led, snode, true);
-		if (rc < 0) {
-			pr_err("enable regulator failed, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (options & DISABLE_REGULATOR) {
-		rc = qpnp_flash_led_regulator_enable(led, snode, false);
-		if (rc < 0) {
-			pr_err("disable regulator failed, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (options & QUERY_MAX_AVAIL_CURRENT) {
-		rc = qpnp_flash_led_get_max_avail_current(led, max_current);
-		if (rc < 0) {
-			pr_err("query max current failed, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	return 0;
+	return rc;
 }
 
 static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
@@ -1484,6 +1823,29 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
 	spin_unlock(&led->lock);
 }
 
+static ssize_t qpnp_flash_led_prepare_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc, options, max_current;
+	u32 val;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	rc = kstrtouint(buf, 0, &val);
+	if (rc < 0)
+		return rc;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	options = val ? ENABLE_REGULATOR : DISABLE_REGULATOR;
+
+	rc = qpnp_flash_led_regulator_control(led_cdev, options, &max_current);
+	if (rc < 0)
+		return rc;
+
+	return count;
+}
+
 /* sysfs show function for flash_max_current */
 static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -1506,6 +1868,7 @@ static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
 /* sysfs attributes exported by flash_led */
 static struct device_attribute qpnp_flash_led_attrs[] = {
 	__ATTR(max_current, 0664, qpnp_flash_led_max_current_show, NULL),
+	__ATTR(enable, 0664, NULL, qpnp_flash_led_prepare_store),
 };
 
 static int flash_led_psy_notifier_call(struct notifier_block *nb,
@@ -1723,22 +2086,11 @@ static int qpnp_flash_led_parse_strobe_sel_dt(struct qpnp_flash_led *led,
 	return 0;
 }
 
-static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
-			struct flash_node_data *fnode, struct device_node *node)
+static int qpnp_flash_led_parse_label_dt(struct flash_node_data *fnode,
+					 struct device_node *node)
 {
 	const char *temp_string;
-	int rc, min_ma;
-	u32 val;
-
-	fnode->pdev = led->pdev;
-	fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
-	fnode->cdev.brightness_get = qpnp_flash_led_brightness_get;
-
-	rc = of_property_read_string(node, "qcom,led-name", &fnode->cdev.name);
-	if (rc < 0) {
-		pr_err("Unable to read flash LED names\n");
-		return rc;
-	}
+	int rc;
 
 	rc = of_property_read_string(node, "label", &temp_string);
 	if (!rc) {
@@ -1755,9 +2107,38 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
+	return rc;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+			struct flash_node_data *fnode, struct device_node *node)
+{
+	int rc, min_ma;
+	u32 val;
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
+
+	fnode->pdev = led->pdev;
+	fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
+	fnode->cdev.brightness_get = qpnp_flash_led_brightness_get;
+
+	rc = of_property_read_string(node, "qcom,led-name", &fnode->cdev.name);
+	if (rc < 0) {
+		pr_err("Unable to read flash LED names\n");
+		return rc;
+	}
+
+	rc = qpnp_flash_led_parse_label_dt(fnode, node);
+	if (rc < 0)
+		return rc;
+
 	rc = of_property_read_u32(node, "qcom,id", &val);
 	if (!rc) {
 		fnode->id = (u8)val;
+
+		if (pmic_subtype == PMI632_SUBTYPE && fnode->id > LED2) {
+			pr_err("Flash node id = %d not supported\n", fnode->id);
+			return -EINVAL;
+		}
 	} else {
 		pr_err("Unable to read flash LED ID\n");
 		return rc;
@@ -1991,6 +2372,7 @@ static int qpnp_flash_led_parse_thermal_config_dt(struct qpnp_flash_led *led,
 {
 	int rc;
 	u32 val;
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
 
 	led->pdata->thermal_derate_en =
 		of_property_read_bool(node, "qcom,thermal-derate-en");
@@ -2069,7 +2451,7 @@ static int qpnp_flash_led_parse_thermal_config_dt(struct qpnp_flash_led *led,
 	led->pdata->thermal_hysteresis = -EINVAL;
 	rc = of_property_read_u32(node, "qcom,thermal-hysteresis", &val);
 	if (!rc) {
-		if (led->pdata->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+		if (pmic_subtype == PM660L_SUBTYPE)
 			val = THERMAL_HYST_TEMP_TO_VAL(val, 20);
 		else
 			val = THERMAL_HYST_TEMP_TO_VAL(val, 15);
@@ -2126,6 +2508,7 @@ static int qpnp_flash_led_parse_vph_droop_config_dt(struct qpnp_flash_led *led,
 {
 	int rc;
 	u32 val;
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
 
 	led->pdata->vph_droop_debounce = FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT;
 	rc = of_property_read_u32(node, "qcom,vph-droop-debounce-us", &val);
@@ -2142,7 +2525,13 @@ static int qpnp_flash_led_parse_vph_droop_config_dt(struct qpnp_flash_led *led,
 		return -EINVAL;
 	}
 
-	led->pdata->vph_droop_threshold = FLASH_LED_VPH_DROOP_THRESH_DEFAULT;
+	if (pmic_subtype == PMI632_SUBTYPE)
+		led->pdata->vph_droop_threshold =
+				    BHARGER_FLASH_LED_VPH_DROOP_THRESH_DEFAULT;
+	else
+		led->pdata->vph_droop_threshold =
+					FLASH_LED_VPH_DROOP_THRESH_DEFAULT;
+
 	rc = of_property_read_u32(node, "qcom,vph-droop-threshold-mv", &val);
 	if (!rc) {
 		led->pdata->vph_droop_threshold =
@@ -2270,22 +2659,162 @@ static int qpnp_flash_led_parse_lmh_config_dt(struct qpnp_flash_led *led,
 	return 0;
 }
 
-static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
-						struct device_node *node)
+static int qpnp_flash_led_parse_iled_threshold_dt(struct qpnp_flash_led *led,
+						    struct device_node *node)
 {
-	struct device_node *revid_node;
 	int rc;
 	u32 val;
-	bool short_circuit_det, open_circuit_det, vph_droop_det;
 
-	rc = of_property_read_u32(node, "reg", &val);
-	if (rc < 0) {
-		pr_err("Couldn't find reg in node %s, rc = %d\n",
-			node->full_name, rc);
+	led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
+	if (!rc) {
+		led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse iled_thrsh_val, rc=%d\n", rc);
 		return rc;
 	}
 
-	led->base = val;
+	if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) {
+		pr_err("Invalid iled_thrsh_val specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_parse_chgr_mitigation_dt(struct qpnp_flash_led *led,
+						    struct device_node *node)
+{
+	u8 pmic_subtype = led->pdata->pmic_rev_id->pmic_subtype;
+	int rc;
+	u32 val;
+
+	if (pmic_subtype == PMI632_SUBTYPE)
+		led->pdata->chgr_mitigation_sel =
+					FLASH_DISABLE_CHARGER_MITIGATION;
+	else
+		led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION;
+
+	rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
+	if (!rc) {
+		led->pdata->chgr_mitigation_sel = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse chgr_mitigation_sel, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (led->pdata->chgr_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
+		pr_err("Invalid chgr_mitigation_sel specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_parse_battery_prop_dt(struct qpnp_flash_led *led,
+						    struct device_node *node)
+{
+	int rc;
+	u32 val;
+
+	led->pdata->ibatt_ocp_threshold_ua =
+		FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA;
+	rc = of_property_read_u32(node, "qcom,ibatt-ocp-threshold-ua", &val);
+	if (!rc) {
+		led->pdata->ibatt_ocp_threshold_ua = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse ibatt_ocp threshold, rc=%d\n", rc);
+		return rc;
+	}
+
+	led->pdata->rpara_uohm = FLASH_LED_RPARA_DEFAULT_UOHM;
+	rc = of_property_read_u32(node, "qcom,rparasitic-uohm", &val);
+	if (!rc) {
+		led->pdata->rpara_uohm = val;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to parse rparasitic, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qpnp_flash_led_parse_fault_detection_dt(struct qpnp_flash_led *led,
+						    struct device_node *node)
+{
+	bool short_circuit_det, open_circuit_det, vph_droop_det;
+
+	short_circuit_det =
+		of_property_read_bool(node, "qcom,short-circuit-det");
+	open_circuit_det = of_property_read_bool(node, "qcom,open-circuit-det");
+	vph_droop_det = of_property_read_bool(node, "qcom,vph-droop-det");
+	led->pdata->current_derate_en_cfg = (vph_droop_det << 2) |
+				(open_circuit_det << 1) | short_circuit_det;
+}
+
+static int qpnp_flash_led_parse_warmup_delay_dt(struct qpnp_flash_led *led,
+						 struct device_node *node)
+{
+	int rc;
+	u32 val;
+
+	led->pdata->warmup_delay = FLASH_LED_WARMUP_DELAY_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,warmup-delay-us", &val);
+	if (!rc) {
+		led->pdata->warmup_delay =
+				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read WARMUP delay, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qpnp_flash_led_parse_irqs_dt(struct qpnp_flash_led *led,
+					 struct device_node *node)
+
+{
+	led->pdata->all_ramp_up_done_irq =
+		of_irq_get_byname(node, "all-ramp-up-done-irq");
+	if (led->pdata->all_ramp_up_done_irq < 0)
+		pr_debug("all-ramp-up-done-irq not used\n");
+
+	led->pdata->all_ramp_down_done_irq =
+		of_irq_get_byname(node, "all-ramp-down-done-irq");
+	if (led->pdata->all_ramp_down_done_irq < 0)
+		pr_debug("all-ramp-down-done-irq not used\n");
+
+	led->pdata->led_fault_irq =
+		of_irq_get_byname(node, "led-fault-irq");
+	if (led->pdata->led_fault_irq < 0)
+		pr_debug("led-fault-irq not used\n");
+}
+
+static int qpnp_flash_led_isc_delay_dt(struct qpnp_flash_led *led,
+				       struct device_node *node)
+{
+	int rc;
+	u32 val;
+
+	led->pdata->isc_delay = FLASH_LED_ISC_DELAY_DEFAULT;
+	rc = of_property_read_u32(node, "qcom,isc-delay-us", &val);
+	if (!rc) {
+		led->pdata->isc_delay =
+				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ISC delay, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_flash_led_parse_revid_dt(struct qpnp_flash_led *led,
+					 struct device_node *node)
+{
+	struct device_node *revid_node;
+
 	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
 	if (!revid_node) {
 		pr_err("Missing qcom,pmic-revid property - driver failed\n");
@@ -2312,35 +2841,38 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 	if (led->pdata->pmic_rev_id->pmic_subtype == PM8150L_SUBTYPE)
 		led->wa_flags |= PM8150L_IRES_WA;
 
+	return 0;
+}
+
+static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
+						struct device_node *node)
+{
+	int rc;
+	u32 val;
+
+	rc = of_property_read_u32(node, "reg", &val);
+	if (rc < 0) {
+		pr_err("Couldn't find reg in node %s, rc = %d\n",
+			node->full_name, rc);
+		return rc;
+	}
+
+	led->base = val;
+	rc = qpnp_flash_led_parse_revid_dt(led, node);
+	if (rc < 0)
+		return rc;
+
 	led->pdata->hdrm_auto_mode_en = of_property_read_bool(node,
 							"qcom,hdrm-auto-mode");
-
-	led->pdata->isc_delay = FLASH_LED_ISC_DELAY_DEFAULT;
-	rc = of_property_read_u32(node, "qcom,isc-delay-us", &val);
-	if (!rc) {
-		led->pdata->isc_delay =
-				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to read ISC delay, rc=%d\n", rc);
+	rc = qpnp_flash_led_isc_delay_dt(led, node);
+	if (rc < 0)
 		return rc;
-	}
 
-	led->pdata->warmup_delay = FLASH_LED_WARMUP_DELAY_DEFAULT;
-	rc = of_property_read_u32(node, "qcom,warmup-delay-us", &val);
-	if (!rc) {
-		led->pdata->warmup_delay =
-				val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to read WARMUP delay, rc=%d\n", rc);
+	rc = qpnp_flash_led_parse_warmup_delay_dt(led, node);
+	if (rc < 0)
 		return rc;
-	}
 
-	short_circuit_det =
-		of_property_read_bool(node, "qcom,short-circuit-det");
-	open_circuit_det = of_property_read_bool(node, "qcom,open-circuit-det");
-	vph_droop_det = of_property_read_bool(node, "qcom,vph-droop-det");
-	led->pdata->current_derate_en_cfg = (vph_droop_det << 2) |
-				(open_circuit_det << 1) | short_circuit_det;
+	qpnp_flash_led_parse_fault_detection_dt(led, node);
 
 	rc = qpnp_flash_led_parse_thermal_config_dt(led, node);
 	if (rc < 0)
@@ -2372,71 +2904,31 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
-	led->pdata->ibatt_ocp_threshold_ua =
-		FLASH_LED_IBATT_OCP_THRESH_DEFAULT_UA;
-	rc = of_property_read_u32(node, "qcom,ibatt-ocp-threshold-ua", &val);
-	if (!rc) {
-		led->pdata->ibatt_ocp_threshold_ua = val;
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to parse ibatt_ocp threshold, rc=%d\n", rc);
+	rc = qpnp_flash_led_parse_battery_prop_dt(led, node);
+	if (rc < 0)
 		return rc;
-	}
-
-	led->pdata->rpara_uohm = FLASH_LED_RPARA_DEFAULT_UOHM;
-	rc = of_property_read_u32(node, "qcom,rparasitic-uohm", &val);
-	if (!rc) {
-		led->pdata->rpara_uohm = val;
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to parse rparasitic, rc=%d\n", rc);
-		return rc;
-	}
 
 	rc = qpnp_flash_led_parse_lmh_config_dt(led, node);
 	if (rc < 0)
 		return rc;
 
-	led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION;
-	rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
-	if (!rc) {
-		led->pdata->chgr_mitigation_sel = val;
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to parse chgr_mitigation_sel, rc=%d\n", rc);
+	rc = qpnp_flash_led_parse_chgr_mitigation_dt(led, node);
+	if (rc < 0)
 		return rc;
-	}
 
-	if (led->pdata->chgr_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
-		pr_err("Invalid chgr_mitigation_sel specified\n");
-		return -EINVAL;
-	}
-
-	led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT;
-	rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
-	if (!rc) {
-		led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
-	} else if (rc != -EINVAL) {
-		pr_err("Unable to parse iled_thrsh_val, rc=%d\n", rc);
+	rc = qpnp_flash_led_parse_iled_threshold_dt(led, node);
+	if (rc < 0)
 		return rc;
+
+	led->pdata->bst_pwm_ovrhd_uv = FLASH_BST_PWM_OVRHD_MIN_UV;
+	rc = of_property_read_u32(node, "qcom,bst-pwm-ovrhd-uv", &val);
+	if (!rc) {
+		if (val >= FLASH_BST_PWM_OVRHD_MIN_UV &&
+					val <= FLASH_BST_PWM_OVRHD_MAX_UV)
+			led->pdata->bst_pwm_ovrhd_uv = val;
 	}
 
-	if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) {
-		pr_err("Invalid iled_thrsh_val specified\n");
-		return -EINVAL;
-	}
-
-	led->pdata->all_ramp_up_done_irq =
-		of_irq_get_byname(node, "all-ramp-up-done-irq");
-	if (led->pdata->all_ramp_up_done_irq < 0)
-		pr_debug("all-ramp-up-done-irq not used\n");
-
-	led->pdata->all_ramp_down_done_irq =
-		of_irq_get_byname(node, "all-ramp-down-done-irq");
-	if (led->pdata->all_ramp_down_done_irq < 0)
-		pr_debug("all-ramp-down-done-irq not used\n");
-
-	led->pdata->led_fault_irq =
-		of_irq_get_byname(node, "led-fault-irq");
-	if (led->pdata->led_fault_irq < 0)
-		pr_debug("led-fault-irq not used\n");
+	qpnp_flash_led_parse_irqs_dt(led, node);
 
 	return 0;
 }
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index b3cb7fe..cc948d2 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -55,7 +55,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
 
 static int qcom_apcs_ipc_probe(struct platform_device *pdev)
 {
-	struct device_node *np = pdev->dev.of_node;
 	struct qcom_apcs_ipc *apcs;
 	struct regmap *regmap;
 	struct resource *res;
@@ -63,6 +62,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
 	void __iomem *base;
 	unsigned long i;
 	int ret;
+	const struct of_device_id apcs_clk_match_table[] = {
+		{ .compatible = "qcom,msm8916-apcs-kpss-global", },
+		{ .compatible = "qcom,qcs404-apcs-apps-global", },
+		{}
+	};
 
 	apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
 	if (!apcs)
@@ -99,7 +103,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
+	if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
 		apcs->clk = platform_device_register_data(&pdev->dev,
 							  "qcom-apcs-msm8916-clk",
 							  -1, NULL, 0);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3f4211b..45f6846 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -35,7 +35,7 @@
 #include <linux/rcupdate.h>
 #include <linux/sched/clock.h>
 #include <linux/rculist.h>
-
+#include <linux/delay.h>
 #include <trace/events/bcache.h>
 
 /*
@@ -649,7 +649,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
 		up(&b->io_mutex);
 	}
 
+retry:
+	/*
+	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+	 * __bch_btree_node_write(). To avoid an extra flush, acquire
+	 * b->write_lock before checking BTREE_NODE_dirty bit.
+	 */
 	mutex_lock(&b->write_lock);
+	/*
+	 * If this btree node is selected in btree_flush_write() by journal
+	 * code, delay and retry until the node is flushed by journal code
+	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
+	 */
+	if (btree_node_journal_flush(b)) {
+		pr_debug("bnode %p is flushing by journal, retry", b);
+		mutex_unlock(&b->write_lock);
+		udelay(1);
+		goto retry;
+	}
+
 	if (btree_node_dirty(b))
 		__bch_btree_node_write(b, &cl);
 	mutex_unlock(&b->write_lock);
@@ -772,10 +790,15 @@ void bch_btree_cache_free(struct cache_set *c)
 	while (!list_empty(&c->btree_cache)) {
 		b = list_first_entry(&c->btree_cache, struct btree, list);
 
-		if (btree_node_dirty(b))
+		/*
+		 * This function is called by cache_set_free(), no I/O
+		 * request on cache now, it is unnecessary to acquire
+		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
+		 */
+		if (btree_node_dirty(b)) {
 			btree_complete_write(b, btree_current_write(b));
-		clear_bit(BTREE_NODE_dirty, &b->flags);
-
+			clear_bit(BTREE_NODE_dirty, &b->flags);
+		}
 		mca_data_free(b);
 	}
 
@@ -1061,11 +1084,25 @@ static void btree_node_free(struct btree *b)
 
 	BUG_ON(b == b->c->root);
 
+retry:
 	mutex_lock(&b->write_lock);
+	/*
+	 * If the btree node is selected and flushing in btree_flush_write(),
+	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
+	 * then it is safe to free the btree node here. Otherwise this btree
+	 * node will be in race condition.
+	 */
+	if (btree_node_journal_flush(b)) {
+		mutex_unlock(&b->write_lock);
+		pr_debug("bnode %p journal_flush set, retry", b);
+		udelay(1);
+		goto retry;
+	}
 
-	if (btree_node_dirty(b))
+	if (btree_node_dirty(b)) {
 		btree_complete_write(b, btree_current_write(b));
-	clear_bit(BTREE_NODE_dirty, &b->flags);
+		clear_bit(BTREE_NODE_dirty, &b->flags);
+	}
 
 	mutex_unlock(&b->write_lock);
 
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index a68d6c5..4d0cca1 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -158,11 +158,13 @@ enum btree_flags {
 	BTREE_NODE_io_error,
 	BTREE_NODE_dirty,
 	BTREE_NODE_write_idx,
+	BTREE_NODE_journal_flush,
 };
 
 BTREE_FLAG(io_error);
 BTREE_FLAG(dirty);
 BTREE_FLAG(write_idx);
+BTREE_FLAG(journal_flush);
 
 static inline struct btree_write *btree_current_write(struct btree *b)
 {
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 73f5319..c12cd80 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -105,8 +105,14 @@ struct closure_syncer {
 
 static void closure_sync_fn(struct closure *cl)
 {
-	cl->s->done = 1;
-	wake_up_process(cl->s->task);
+	struct closure_syncer *s = cl->s;
+	struct task_struct *p;
+
+	rcu_read_lock();
+	p = READ_ONCE(s->task);
+	s->done = 1;
+	wake_up_process(p);
+	rcu_read_unlock();
 }
 
 void __sched __closure_sync(struct closure *cl)
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c809724..8867100 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
 {
 	struct btree *b = container_of(bk, struct btree, keys);
 	unsigned int i, stale;
+	char buf[80];
 
 	if (!KEY_PTRS(k) ||
 	    bch_extent_invalid(bk, k))
@@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
 		if (!ptr_available(b->c, k, i))
 			return true;
 
-	if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
-		return false;
-
 	for (i = 0; i < KEY_PTRS(k); i++) {
 		stale = ptr_stale(b->c, k, i);
 
-		btree_bug_on(stale > 96, b,
+		if (stale && KEY_DIRTY(k)) {
+			bch_extent_to_text(buf, sizeof(buf), k);
+			pr_info("stale dirty pointer, stale %u, key: %s",
+				stale, buf);
+		}
+
+		btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
 			     "key too stale: %i, need_gc %u",
 			     stale, b->c->need_gc);
 
-		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-			     b, "stale dirty pointer");
-
 		if (stale)
 			return true;
 
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ec1e35a..7bb15cd 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -404,6 +404,7 @@ static void btree_flush_write(struct cache_set *c)
 retry:
 	best = NULL;
 
+	mutex_lock(&c->bucket_lock);
 	for_each_cached_btree(b, c, i)
 		if (btree_current_write(b)->journal) {
 			if (!best)
@@ -416,9 +417,14 @@ static void btree_flush_write(struct cache_set *c)
 		}
 
 	b = best;
+	if (b)
+		set_btree_node_journal_flush(b);
+	mutex_unlock(&c->bucket_lock);
+
 	if (b) {
 		mutex_lock(&b->write_lock);
 		if (!btree_current_write(b)->journal) {
+			clear_bit(BTREE_NODE_journal_flush, &b->flags);
 			mutex_unlock(&b->write_lock);
 			/* We raced */
 			atomic_long_inc(&c->retry_flush_write);
@@ -426,6 +432,7 @@ static void btree_flush_write(struct cache_set *c)
 		}
 
 		__bch_btree_node_write(b, NULL);
+		clear_bit(BTREE_NODE_journal_flush, &b->flags);
 		mutex_unlock(&b->write_lock);
 	}
 }
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e6c7a84..2321643 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1768,7 +1768,6 @@ static int run_cache_set(struct cache_set *c)
 	set_gc_sectors(c);
 
 	if (CACHE_SYNC(&c->sb)) {
-		LIST_HEAD(journal);
 		struct bkey *k;
 		struct jset *j;
 
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b1d0ae2..dc385b7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	unsigned long freed;
 
 	c = container_of(shrink, struct dm_bufio_client, shrinker);
-	if (!dm_bufio_trylock(c))
+	if (sc->gfp_mask & __GFP_FS)
+		dm_bufio_lock(c);
+	else if (!dm_bufio_trylock(c))
 		return SHRINK_STOP;
 
 	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b29a832..84ff700 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache)
 
 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 {
-	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 }
 
 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
 {
 	struct dm_cache_migration *mg;
 
-	mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
-	if (!mg)
-		return NULL;
+	mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
 	memset(mg, 0, sizeof(*mg));
 
@@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
 	struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
 	cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
-	if (!cell_prealloc) {
-		defer_bio(cache, bio);
-		return false;
-	}
 
 	build_key(oblock, end, &key);
 	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
 	struct dm_bio_prison_cell_v2 *prealloc;
 
 	prealloc = alloc_prison_cell(cache);
-	if (!prealloc) {
-		DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
-		mg_complete(mg, false);
-		return -ENOMEM;
-	}
 
 	/*
 	 * Prevent writes to the block, but allow reads to continue.
@@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
 	}
 
 	mg = alloc_migration(cache);
-	if (!mg) {
-		policy_complete_background_work(cache->policy, op, false);
-		background_work_end(cache);
-		return -ENOMEM;
-	}
 
 	mg->op = op;
 	mg->overwrite_bio = bio;
@@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
 	struct dm_bio_prison_cell_v2 *prealloc;
 
 	prealloc = alloc_prison_cell(cache);
-	if (!prealloc) {
-		invalidate_complete(mg, false);
-		return -ENOMEM;
-	}
 
 	build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
 	r = dm_cell_lock_v2(cache->prison, &key,
@@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
 		return -EPERM;
 
 	mg = alloc_migration(cache);
-	if (!mg) {
-		background_work_end(cache);
-		return -ENOMEM;
-	}
 
 	mg->overwrite_bio = bio;
 	mg->invalidate_cblock = cblock;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c9..7e426e4 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -130,6 +130,7 @@ struct mapped_device {
 };
 
 int md_in_flight(struct mapped_device *md);
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a68e0b6..73b321b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -950,6 +950,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
 {
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 	struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+	struct mapped_device *md = dm_table_get_md(ti->table);
 
 	/* From now we require underlying device with our integrity profile */
 	if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -969,7 +970,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
 
 	if (crypt_integrity_aead(cc)) {
 		cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
-		DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+		DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
 		       cc->integrity_tag_size, cc->integrity_iv_size);
 
 		if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -977,7 +978,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
 			return -EINVAL;
 		}
 	} else if (cc->integrity_iv_size)
-		DMINFO("Additional per-sector space %u bytes for IV.",
+		DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
 		       cc->integrity_iv_size);
 
 	if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 562b621..e71aecc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1760,7 +1760,22 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
 			queue_work(ic->wait_wq, &dio->work);
 			return;
 		}
+		if (journal_read_pos != NOT_FOUND)
+			dio->range.n_sectors = ic->sectors_per_block;
 		wait_and_add_new_range(ic, &dio->range);
+		/*
+		 * wait_and_add_new_range drops the spinlock, so the journal
+		 * may have been changed arbitrarily. We need to recheck.
+		 * To simplify the code, we restrict I/O size to just one block.
+		 */
+		if (journal_read_pos != NOT_FOUND) {
+			sector_t next_sector;
+			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+			if (unlikely(new_pos != journal_read_pos)) {
+				remove_range_unlocked(ic, &dio->range);
+				goto retry;
+			}
+		}
 	}
 	spin_unlock_irq(&ic->endio_wait.lock);
 
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 671c243..3f694d9 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
 	 * no point in continuing.
 	 */
 	if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
-	    job->master_job->write_err)
+	    job->master_job->write_err) {
+		job->write_err = job->master_job->write_err;
 		return -EIO;
+	}
 
 	io_job_start(job->kc->throttle);
 
@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
 			else
 				job->read_err = 1;
 			push(&kc->complete_jobs, job);
+			wake(kc);
 			break;
 		}
 
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index baa966e..481e54d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -554,8 +554,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 	return DM_MAPIO_REMAPPED;
 }
 
-static void multipath_release_clone(struct request *clone)
+static void multipath_release_clone(struct request *clone,
+				    union map_info *map_context)
 {
+	if (unlikely(map_context)) {
+		/*
+		 * non-NULL map_context means caller is still map
+		 * method; must undo multipath_clone_and_map()
+		 */
+		struct dm_mpath_io *mpio = get_mpio(map_context);
+		struct pgpath *pgpath = mpio->pgpath;
+
+		if (pgpath && pgpath->pg->ps.type->end_io)
+			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+						    &pgpath->path,
+						    mpio->nr_bytes);
+	}
+
 	blk_put_request(clone);
 }
 
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e..b78a8a4 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 			  */
 			r = rs_prepare_reshape(rs);
 			if (r)
-				return r;
+				goto bad;
 
 			/* Reshaping ain't recovery, so disable recovery */
 			rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8..4d36373 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -219,7 +219,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
 	struct request *rq = tio->orig;
 
 	blk_rq_unprep_clone(clone);
-	tio->ti->type->release_clone_rq(clone);
+	tio->ti->type->release_clone_rq(clone, NULL);
 
 	rq_end_stats(md, rq);
 	if (!rq->q->mq_ops)
@@ -270,7 +270,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
 	rq_end_stats(md, rq);
 	if (tio->clone) {
 		blk_rq_unprep_clone(tio->clone);
-		tio->ti->type->release_clone_rq(tio->clone);
+		tio->ti->type->release_clone_rq(tio->clone, NULL);
 	}
 
 	if (!rq->q->mq_ops)
@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 	}
 
 	if (unlikely(error == BLK_STS_TARGET)) {
-		if (req_op(clone) == REQ_OP_WRITE_SAME &&
-		    !clone->q->limits.max_write_same_sectors)
+		if (req_op(clone) == REQ_OP_DISCARD &&
+		    !clone->q->limits.max_discard_sectors)
+			disable_discard(tio->md);
+		else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+			 !clone->q->limits.max_write_same_sectors)
 			disable_write_same(tio->md);
-		if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-		    !clone->q->limits.max_write_zeroes_sectors)
+		else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+			 !clone->q->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(tio->md);
 	}
 
@@ -492,7 +495,7 @@ static int map_request(struct dm_rq_target_io *tio)
 	case DM_MAPIO_REMAPPED:
 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
 			/* -ENOMEM */
-			ti->type->release_clone_rq(clone);
+			ti->type->release_clone_rq(clone, &tio->info);
 			return DM_MAPIO_REQUEUE;
 		}
 
@@ -502,7 +505,8 @@ static int map_request(struct dm_rq_target_io *tio)
 		ret = dm_dispatch_clone_request(clone, rq);
 		if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
 			blk_rq_unprep_clone(clone);
-			tio->ti->type->release_clone_rq(clone);
+			blk_mq_cleanup_rq(clone);
+			tio->ti->type->release_clone_rq(clone, &tio->info);
 			tio->clone = NULL;
 			if (!rq->q->mq_ops)
 				r = DM_MAPIO_DELAY_REQUEUE;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 59d17c4..96343c7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1350,7 +1350,7 @@ void dm_table_event(struct dm_table *t)
 }
 EXPORT_SYMBOL(dm_table_event);
 
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
 {
 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
 }
@@ -1375,6 +1375,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
 	unsigned int l, n = 0, k = 0;
 	sector_t *node;
 
+	if (unlikely(sector >= dm_table_get_size(t)))
+		return &t->targets[t->num_targets];
+
 	for (l = 0; l < t->depth; l++) {
 		n = get_child(n, k);
 		node = get_node(t, l, n);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 314d17c..64dd0b3 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
 	return DM_MAPIO_KILL;
 }
 
-static void io_err_release_clone_rq(struct request *clone)
+static void io_err_release_clone_rq(struct request *clone,
+				    union map_info *map_context)
 {
 }
 
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index ed3cace..6a26afc 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -2001,16 +2001,19 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
 
 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
 {
-	int r;
+	int r = -EINVAL;
 	struct dm_block *sblock;
 	struct thin_disk_superblock *disk_super;
 
 	down_write(&pmd->root_lock);
+	if (pmd->fail_io)
+		goto out;
+
 	pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
 
 	r = superblock_lock(pmd, &sblock);
 	if (r) {
-		DMERR("couldn't read superblock");
+		DMERR("couldn't lock superblock");
 		goto out;
 	}
 
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 4cdde7a..7e8d7fc 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
 	sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
 	struct bio *bio;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return ERR_PTR(-EIO);
+
 	/* Get a new block and a BIO to read it */
 	mblk = dmz_alloc_mblock(zmd, mblk_no);
 	if (!mblk)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio) {
 		dmz_free_mblock(zmd, mblk);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	spin_lock(&zmd->mblk_lock);
@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
 	if (!mblk) {
 		/* Cache miss: read the block from disk */
 		mblk = dmz_get_mblock_slow(zmd, mblk_no);
-		if (!mblk)
-			return ERR_PTR(-ENOMEM);
+		if (IS_ERR(mblk))
+			return mblk;
 	}
 
 	/* Wait for on-going read I/O and check for error */
@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 /*
  * Issue a metadata block write BIO.
  */
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
-			     unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+			    unsigned int set)
 {
 	sector_t block = zmd->sb[set].block + mblk->no;
 	struct bio *bio;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return -EIO;
+
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio) {
 		set_bit(DMZ_META_ERROR, &mblk->state);
-		return;
+		return -ENOMEM;
 	}
 
 	set_bit(DMZ_META_WRITING, &mblk->state);
@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
 	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
 	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 	submit_bio(bio);
+
+	return 0;
 }
 
 /*
@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
 	struct bio *bio;
 	int ret;
 
+	if (dmz_bdev_is_dying(zmd->dev))
+		return -EIO;
+
 	bio = bio_alloc(GFP_NOIO, 1);
 	if (!bio)
 		return -ENOMEM;
@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 {
 	struct dmz_mblock *mblk;
 	struct blk_plug plug;
-	int ret = 0;
+	int ret = 0, nr_mblks_submitted = 0;
 
 	/* Issue writes */
 	blk_start_plug(&plug);
-	list_for_each_entry(mblk, write_list, link)
-		dmz_write_mblock(zmd, mblk, set);
+	list_for_each_entry(mblk, write_list, link) {
+		ret = dmz_write_mblock(zmd, mblk, set);
+		if (ret)
+			break;
+		nr_mblks_submitted++;
+	}
 	blk_finish_plug(&plug);
 
 	/* Wait for completion */
 	list_for_each_entry(mblk, write_list, link) {
+		if (!nr_mblks_submitted)
+			break;
 		wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
 			       TASK_UNINTERRUPTIBLE);
 		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 			clear_bit(DMZ_META_ERROR, &mblk->state);
 			ret = -EIO;
 		}
+		nr_mblks_submitted--;
 	}
 
 	/* Flush drive cache (this will also sync data) */
@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
 	 */
 	dmz_lock_flush(zmd);
 
+	if (dmz_bdev_is_dying(zmd->dev)) {
+		ret = -EIO;
+		goto out;
+	}
+
 	/* Get dirty blocks */
 	spin_lock(&zmd->mblk_lock);
 	list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
 	struct dm_zone *zone;
 
 	if (list_empty(&zmd->map_rnd_list))
-		return NULL;
+		return ERR_PTR(-EBUSY);
 
 	list_for_each_entry(zone, &zmd->map_rnd_list, link) {
 		if (dmz_is_buf(zone))
@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
 			return dzone;
 	}
 
-	return NULL;
+	return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
 	struct dm_zone *zone;
 
 	if (list_empty(&zmd->map_seq_list))
-		return NULL;
+		return ERR_PTR(-EBUSY);
 
 	list_for_each_entry(zone, &zmd->map_seq_list, link) {
 		if (!zone->bzone)
@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
 			return zone;
 	}
 
-	return NULL;
+	return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1623,6 +1646,10 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
 		/* Alloate a random zone */
 		dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
 		if (!dzone) {
+			if (dmz_bdev_is_dying(zmd->dev)) {
+				dzone = ERR_PTR(-EIO);
+				goto out;
+			}
 			dmz_wait_for_free_zones(zmd);
 			goto again;
 		}
@@ -1720,6 +1747,10 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
 	/* Alloate a random zone */
 	bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
 	if (!bzone) {
+		if (dmz_bdev_is_dying(zmd->dev)) {
+			bzone = ERR_PTR(-EIO);
+			goto out;
+		}
 		dmz_wait_for_free_zones(zmd);
 		goto again;
 	}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95..9470b8f 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -37,7 +37,7 @@ enum {
 /*
  * Number of seconds of target BIO inactivity to consider the target idle.
  */
-#define DMZ_IDLE_PERIOD		(10UL * HZ)
+#define DMZ_IDLE_PERIOD			(10UL * HZ)
 
 /*
  * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
 		set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
 
 	while (block < end_block) {
+		if (dev->flags & DMZ_BDEV_DYING)
+			return -EIO;
+
 		/* Get a valid region from the source zone */
 		ret = dmz_first_valid_block(zmd, src_zone, &block);
 		if (ret <= 0)
@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
 	dmz_unlock_flush(zmd);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 /*
  * Find a candidate zone for reclaim and process it.
  */
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
 {
 	struct dmz_metadata *zmd = zrc->metadata;
 	struct dm_zone *dzone;
@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 
 	/* Get a data zone */
 	dzone = dmz_get_zone_for_reclaim(zmd);
-	if (!dzone)
-		return;
+	if (IS_ERR(dzone))
+		return PTR_ERR(dzone);
 
 	start = jiffies;
 
@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 out:
 	if (ret) {
 		dmz_unlock_zone_reclaim(dzone);
-		return;
+		return ret;
 	}
 
-	(void) dmz_flush_metadata(zrc->metadata);
+	ret = dmz_flush_metadata(zrc->metadata);
+	if (ret) {
+		dmz_dev_debug(zrc->dev,
+			      "Metadata flush for zone %u failed, err %d\n",
+			      dmz_id(zmd, rzone), ret);
+		return ret;
+	}
 
 	dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
 		      dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+	return 0;
 }
 
 /*
@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
 	struct dmz_metadata *zmd = zrc->metadata;
 	unsigned int nr_rnd, nr_unmap_rnd;
 	unsigned int p_unmap_rnd;
+	int ret;
+
+	if (dmz_bdev_is_dying(zrc->dev))
+		return;
 
 	if (!dmz_should_reclaim(zrc)) {
 		mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
 		      (dmz_target_idle(zrc) ? "Idle" : "Busy"),
 		      p_unmap_rnd, nr_unmap_rnd, nr_rnd);
 
-	dmz_reclaim(zrc);
+	ret = dmz_do_reclaim(zrc);
+	if (ret) {
+		dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+		if (ret == -EIO)
+			/*
+			 * LLD might be performing some error handling sequence
+			 * at the underlying device. To not interfere, do not
+			 * attempt to schedule the next reclaim run immediately.
+			 */
+			return;
+	}
 
 	dmz_schedule_reclaim(zrc);
 }
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 85fb2ba..3dd668f6 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -277,8 +277,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
 
 	/* Get the buffer zone. One will be allocated if needed */
 	bzone = dmz_get_chunk_buffer(zmd, zone);
-	if (!bzone)
-		return -ENOSPC;
+	if (IS_ERR(bzone))
+		return PTR_ERR(bzone);
 
 	if (dmz_is_readonly(bzone))
 		return -EROFS;
@@ -389,6 +389,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
 
 	dmz_lock_metadata(zmd);
 
+	if (dmz->dev->flags & DMZ_BDEV_DYING) {
+		ret = -EIO;
+		goto out;
+	}
+
 	/*
 	 * Get the data zone mapping the chunk. There may be no
 	 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +498,8 @@ static void dmz_flush_work(struct work_struct *work)
 
 	/* Flush dirty metadata blocks */
 	ret = dmz_flush_metadata(dmz->metadata);
+	if (ret)
+		dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
 
 	/* Process queued flush requests */
 	while (1) {
@@ -513,22 +520,24 @@ static void dmz_flush_work(struct work_struct *work)
  * Get a chunk work and start it to process a new BIO.
  * If the BIO chunk has no work yet, create one.
  */
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 {
 	unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
 	struct dm_chunk_work *cw;
+	int ret = 0;
 
 	mutex_lock(&dmz->chunk_lock);
 
 	/* Get the BIO chunk work. If one is not active yet, create one */
 	cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
 	if (!cw) {
-		int ret;
 
 		/* Create a new chunk work */
 		cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
-		if (!cw)
+		if (unlikely(!cw)) {
+			ret = -ENOMEM;
 			goto out;
+		}
 
 		INIT_WORK(&cw->work, dmz_chunk_work);
 		atomic_set(&cw->refcount, 0);
@@ -539,7 +548,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 		ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
 		if (unlikely(ret)) {
 			kfree(cw);
-			cw = NULL;
 			goto out;
 		}
 	}
@@ -547,10 +555,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 	bio_list_add(&cw->bio_list, bio);
 	dmz_get_chunk_work(cw);
 
+	dmz_reclaim_bio_acc(dmz->reclaim);
 	if (queue_work(dmz->chunk_wq, &cw->work))
 		dmz_get_chunk_work(cw);
 out:
 	mutex_unlock(&dmz->chunk_lock);
+	return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+	struct gendisk *disk;
+
+	if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+		disk = dmz_dev->bdev->bd_disk;
+		if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+			dmz_dev_warn(dmz_dev, "Backing device queue dying");
+			dmz_dev->flags |= DMZ_BDEV_DYING;
+		} else if (disk->fops->check_events) {
+			if (disk->fops->check_events(disk, 0) &
+					DISK_EVENT_MEDIA_CHANGE) {
+				dmz_dev_warn(dmz_dev, "Backing device offline");
+				dmz_dev->flags |= DMZ_BDEV_DYING;
+			}
+		}
+	}
+
+	return dmz_dev->flags & DMZ_BDEV_DYING;
 }
 
 /*
@@ -564,6 +600,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
 	sector_t sector = bio->bi_iter.bi_sector;
 	unsigned int nr_sectors = bio_sectors(bio);
 	sector_t chunk_sector;
+	int ret;
+
+	if (dmz_bdev_is_dying(dmz->dev))
+		return DM_MAPIO_KILL;
 
 	dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
 		      bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +641,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
 		dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
 
 	/* Now ready to handle this BIO */
-	dmz_reclaim_bio_acc(dmz->reclaim);
-	dmz_queue_chunk_work(dmz, bio);
+	ret = dmz_queue_chunk_work(dmz, bio);
+	if (ret) {
+		dmz_dev_debug(dmz->dev,
+			      "BIO op %d, can't process chunk %llu, err %i\n",
+			      bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+			      ret);
+		return DM_MAPIO_REQUEUE;
+	}
 
 	return DM_MAPIO_SUBMITTED;
 }
@@ -856,6 +902,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
 {
 	struct dmz_target *dmz = ti->private;
 
+	if (dmz_bdev_is_dying(dmz->dev))
+		return -ENODEV;
+
 	*bdev = dmz->dev->bdev;
 
 	return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49..93a6452 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -56,6 +56,8 @@ struct dmz_dev {
 
 	unsigned int		nr_zones;
 
+	unsigned int		flags;
+
 	sector_t		zone_nr_sectors;
 	unsigned int		zone_nr_sectors_shift;
 
@@ -67,6 +69,9 @@ struct dmz_dev {
 				 (dev)->zone_nr_sectors_shift)
 #define dmz_chunk_block(dev, b)	((b) & ((dev)->zone_nr_blocks - 1))
 
+/* Device flags. */
+#define DMZ_BDEV_DYING		(1 << 0)
+
 /*
  * Zone descriptor.
  */
@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
 void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
 
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
 #endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 42768fe..c9860e3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
 	}
 }
 
+void disable_discard(struct mapped_device *md)
+{
+	struct queue_limits *limits = dm_get_queue_limits(md);
+
+	/* device doesn't really support DISCARD, disable it */
+	limits->max_discard_sectors = 0;
+	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
 	struct queue_limits *limits = dm_get_queue_limits(md);
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
 	dm_endio_fn endio = tio->ti->type->end_io;
 
 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-		    !bio->bi_disk->queue->limits.max_write_same_sectors)
+		if (bio_op(bio) == REQ_OP_DISCARD &&
+		    !bio->bi_disk->queue->limits.max_discard_sectors)
+			disable_discard(md);
+		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+			 !bio->bi_disk->queue->limits.max_write_same_sectors)
 			disable_write_same(md);
-		if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-		    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(md);
 	}
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fb5d702..a8fbaa3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1770,8 +1770,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
 				if (!(le32_to_cpu(sb->feature_map) &
 				      MD_FEATURE_RECOVERY_BITMAP))
 					rdev->saved_raid_disk = -1;
-			} else
-				set_bit(In_sync, &rdev->flags);
+			} else {
+				/*
+				 * If the array is FROZEN, then the device can't
+				 * be in_sync with rest of array.
+				 */
+				if (!test_bit(MD_RECOVERY_FROZEN,
+					      &mddev->recovery))
+					set_bit(In_sync, &rdev->flags);
+			}
 			rdev->raid_disk = role;
 			break;
 		}
@@ -4116,7 +4123,7 @@ array_state_show(struct mddev *mddev, char *page)
 {
 	enum array_state st = inactive;
 
-	if (mddev->pers)
+	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
 		switch(mddev->ro) {
 		case 1:
 			st = readonly;
@@ -5671,9 +5678,6 @@ int md_run(struct mddev *mddev)
 		md_update_sb(mddev, 0);
 
 	md_new_event(mddev);
-	sysfs_notify_dirent_safe(mddev->sysfs_state);
-	sysfs_notify_dirent_safe(mddev->sysfs_action);
-	sysfs_notify(&mddev->kobj, NULL, "degraded");
 	return 0;
 
 abort:
@@ -5687,6 +5691,7 @@ static int do_md_run(struct mddev *mddev)
 {
 	int err;
 
+	set_bit(MD_NOT_READY, &mddev->flags);
 	err = md_run(mddev);
 	if (err)
 		goto out;
@@ -5707,9 +5712,14 @@ static int do_md_run(struct mddev *mddev)
 
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	clear_bit(MD_NOT_READY, &mddev->flags);
 	mddev->changed = 1;
 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+	sysfs_notify_dirent_safe(mddev->sysfs_state);
+	sysfs_notify_dirent_safe(mddev->sysfs_action);
+	sysfs_notify(&mddev->kobj, NULL, "degraded");
 out:
+	clear_bit(MD_NOT_READY, &mddev->flags);
 	return err;
 }
 
@@ -8797,6 +8807,7 @@ void md_check_recovery(struct mddev *mddev)
 
 	if (mddev_trylock(mddev)) {
 		int spares = 0;
+		bool try_set_sync = mddev->safemode != 0;
 
 		if (!mddev->external && mddev->safemode == 1)
 			mddev->safemode = 0;
@@ -8842,7 +8853,7 @@ void md_check_recovery(struct mddev *mddev)
 			}
 		}
 
-		if (!mddev->external && !mddev->in_sync) {
+		if (try_set_sync && !mddev->external && !mddev->in_sync) {
 			spin_lock(&mddev->lock);
 			set_in_sync(mddev);
 			spin_unlock(&mddev->lock);
@@ -8948,7 +8959,8 @@ void md_reap_sync_thread(struct mddev *mddev)
 	/* resync has finished, collect result */
 	md_unregister_thread(&mddev->sync_thread);
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+	    mddev->degraded != mddev->raid_disks) {
 		/* success...*/
 		/* activate any spares */
 		if (mddev->pers->spare_active(mddev)) {
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 325cb21..4f89463 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -243,6 +243,9 @@ enum mddev_flags {
 	MD_UPDATING_SB,		/* md_check_recovery is updating the metadata
 				 * without explicitly holding reconfig_mutex.
 				 */
+	MD_NOT_READY,		/* do_md_run() is active, so 'array_state'
+				 * must not report that array is ready yet
+				 */
 };
 
 enum mddev_sb_flags {
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b3197..8aae062 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 
 	new_parent = shadow_current(s);
 
+	pn = dm_block_data(new_parent);
+	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+		sizeof(__le64) : s->info->value_type.size;
+
+	/* create & init the left block */
 	r = new_block(s->info, &left);
 	if (r < 0)
 		return r;
 
+	ln = dm_block_data(left);
+	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+	ln->header.flags = pn->header.flags;
+	ln->header.nr_entries = cpu_to_le32(nr_left);
+	ln->header.max_entries = pn->header.max_entries;
+	ln->header.value_size = pn->header.value_size;
+	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+	/* create & init the right block */
 	r = new_block(s->info, &right);
 	if (r < 0) {
 		unlock_block(s->info, left);
 		return r;
 	}
 
-	pn = dm_block_data(new_parent);
-	ln = dm_block_data(left);
 	rn = dm_block_data(right);
-
-	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
 	nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 
-	ln->header.flags = pn->header.flags;
-	ln->header.nr_entries = cpu_to_le32(nr_left);
-	ln->header.max_entries = pn->header.max_entries;
-	ln->header.value_size = pn->header.value_size;
-
 	rn->header.flags = pn->header.flags;
 	rn->header.nr_entries = cpu_to_le32(nr_right);
 	rn->header.max_entries = pn->header.max_entries;
 	rn->header.value_size = pn->header.value_size;
-
-	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
 	memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
-	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
-		sizeof(__le64) : s->info->value_type.size;
-	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
 	memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
 	       nr_right * size);
 
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec44924..2532858 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
 	}
 
 	if (smm->recursion_count == 1)
-		apply_bops(smm);
+		r = apply_bops(smm);
 
 	smm->recursion_count--;
 
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f4daa56..3cafbfd 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,6 +26,9 @@
 #include "raid0.h"
 #include "raid5.h"
 
+static int default_layout = 0;
+module_param(default_layout, int, 0644);
+
 #define UNSUPPORTED_MDDEV_FLAGS		\
 	((1L << MD_HAS_JOURNAL) |	\
 	 (1L << MD_JOURNAL_CLEAN) |	\
@@ -146,6 +149,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 	}
 	pr_debug("md/raid0:%s: FINAL %d zones\n",
 		 mdname(mddev), conf->nr_strip_zones);
+
+	if (conf->nr_strip_zones == 1) {
+		conf->layout = RAID0_ORIG_LAYOUT;
+	} else if (default_layout == RAID0_ORIG_LAYOUT ||
+		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+		conf->layout = default_layout;
+	} else {
+		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+		       mdname(mddev));
+		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+		err = -ENOTSUPP;
+		goto abort;
+	}
 	/*
 	 * now since we have the hard sector sizes, we can make sure
 	 * chunk size is a multiple of that sector size
@@ -555,10 +571,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
 
 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
+	struct r0conf *conf = mddev->private;
 	struct strip_zone *zone;
 	struct md_rdev *tmp_dev;
 	sector_t bio_sector;
 	sector_t sector;
+	sector_t orig_sector;
 	unsigned chunk_sects;
 	unsigned sectors;
 
@@ -592,8 +610,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
 		bio = split;
 	}
 
+	orig_sector = sector;
 	zone = find_zone(mddev->private, &sector);
-	tmp_dev = map_sector(mddev, zone, sector, &sector);
+	switch (conf->layout) {
+	case RAID0_ORIG_LAYOUT:
+		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
+		break;
+	case RAID0_ALT_MULTIZONE_LAYOUT:
+		tmp_dev = map_sector(mddev, zone, sector, &sector);
+		break;
+	default:
+		WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+		bio_io_error(bio);
+		return true;
+	}
+
 	bio_set_dev(bio, tmp_dev->bdev);
 	bio->bi_iter.bi_sector = sector + zone->dev_start +
 		tmp_dev->data_offset;
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 540e65d..3816e54 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -8,11 +8,25 @@ struct strip_zone {
 	int	 nb_dev;	/* # of devices attached to the zone */
 };
 
+/* Linux 3.14 (20d0189b101) made an unintended change to
+ * the RAID0 layout for multi-zone arrays (where devices aren't all
+ * the same size.
+ * RAID0_ORIG_LAYOUT restores the original layout
+ * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
+ * The layouts are identical when there is only one zone (all
+ * devices the same size).
+ */
+
+enum r0layout {
+	RAID0_ORIG_LAYOUT = 1,
+	RAID0_ALT_MULTIZONE_LAYOUT = 2,
+};
 struct r0conf {
 	struct strip_zone	*strip_zone;
 	struct md_rdev		**devlist; /* lists of rdevs, pointed to
 					    * by strip_zone->dev */
 	int			nr_strip_zones;
+	enum r0layout		layout;
 };
 
 #endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fa47249..6929d110 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -434,19 +434,21 @@ static void raid1_end_write_request(struct bio *bio)
 		    /* We never try FailFast to WriteMostly devices */
 		    !test_bit(WriteMostly, &rdev->flags)) {
 			md_error(r1_bio->mddev, rdev);
-			if (!test_bit(Faulty, &rdev->flags))
-				/* This is the only remaining device,
-				 * We need to retry the write without
-				 * FailFast
-				 */
-				set_bit(R1BIO_WriteError, &r1_bio->state);
-			else {
-				/* Finished with this branch */
-				r1_bio->bios[mirror] = NULL;
-				to_put = bio;
-			}
-		} else
+		}
+
+		/*
+		 * When the device is faulty, it is not necessary to
+		 * handle write error.
+		 * For failfast, this is the only remaining device,
+		 * We need to retry the write without FailFast.
+		 */
+		if (!test_bit(Faulty, &rdev->flags))
 			set_bit(R1BIO_WriteError, &r1_bio->state);
+		else {
+			/* Finished with this branch */
+			r1_bio->bios[mirror] = NULL;
+			to_put = bio;
+		}
 	} else {
 		/*
 		 * Set R1BIO_Uptodate in our master bio, so that we
@@ -3103,6 +3105,13 @@ static int raid1_run(struct mddev *mddev)
 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
 			mddev->degraded++;
+	/*
+	 * RAID1 needs at least one disk in active
+	 */
+	if (conf->raid_disks - mddev->degraded < 1) {
+		ret = -EINVAL;
+		goto abort;
+	}
 
 	if (conf->raid_disks - mddev->degraded == 1)
 		mddev->recovery_cp = MaxSector;
@@ -3136,8 +3145,12 @@ static int raid1_run(struct mddev *mddev)
 	ret =  md_integrity_register(mddev);
 	if (ret) {
 		md_unregister_thread(&mddev->thread);
-		raid1_free(mddev, conf);
+		goto abort;
 	}
+	return 0;
+
+abort:
+	raid1_free(mddev, conf);
 	return ret;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a147619..4a5aad2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2540,7 +2540,8 @@ static void raid5_end_read_request(struct bio * bi)
 		int set_bad = 0;
 
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-		atomic_inc(&rdev->read_errors);
+		if (!(bi->bi_status == BLK_STS_PROTECTION))
+			atomic_inc(&rdev->read_errors);
 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
 			pr_warn_ratelimited(
 				"md/raid:%s: read error on replacement device (sector %llu on %s).\n",
@@ -2572,7 +2573,9 @@ static void raid5_end_read_request(struct bio * bi)
 		    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 			retry = 1;
 		if (retry)
-			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+			if (sh->qd_idx >= 0 && sh->pd_idx == i)
+				set_bit(R5_ReadError, &sh->dev[i].flags);
+			else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
 				set_bit(R5_ReadError, &sh->dev[i].flags);
 				clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 			} else
@@ -5721,7 +5724,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
 				do_flush = false;
 			}
 
-			set_bit(STRIPE_HANDLE, &sh->state);
+			if (!sh->batch_head)
+				set_bit(STRIPE_HANDLE, &sh->state);
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			if ((!sh->batch_head || sh == sh->batch_head) &&
 			    (bi->bi_opf & REQ_SYNC) &&
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 29a2ab9..ad8677d 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
+cec-objs := cec-core.o cec-adap.o cec-api.o
 
 ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index a7ea27d..4a15d53 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -62,6 +62,19 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr
 	return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
 }
 
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+			   unsigned int *offset)
+{
+	unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+	if (offset)
+		*offset = loc;
+	if (loc == 0)
+		return CEC_PHYS_ADDR_INVALID;
+	return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+
 /*
  * Queue a new event for this filehandle. If ts == 0, then set it
  * to the current time.
diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
deleted file mode 100644
index f587e8e..0000000
--- a/drivers/media/cec/cec-edid.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/cec.h>
-
-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
-			   unsigned int *offset)
-{
-	unsigned int loc = cec_get_edid_spa_location(edid, size);
-
-	if (offset)
-		*offset = loc;
-	if (loc == 0)
-		return CEC_PHYS_ADDR_INVALID;
-	return (edid[loc] << 8) | edid[loc + 1];
-}
-EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
-
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
-{
-	unsigned int loc = cec_get_edid_spa_location(edid, size);
-	u8 sum = 0;
-	unsigned int i;
-
-	if (loc == 0)
-		return;
-	edid[loc] = phys_addr >> 8;
-	edid[loc + 1] = phys_addr & 0xff;
-	loc &= ~0x7f;
-
-	/* update the checksum */
-	for (i = loc; i < loc + 127; i++)
-		sum += edid[i];
-	edid[i] = 256 - sum;
-}
-EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
-
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
-	/* Check if input is sane */
-	if (WARN_ON(input == 0 || input > 0xf))
-		return CEC_PHYS_ADDR_INVALID;
-
-	if (phys_addr == 0)
-		return input << 12;
-
-	if ((phys_addr & 0x0fff) == 0)
-		return phys_addr | (input << 8);
-
-	if ((phys_addr & 0x00ff) == 0)
-		return phys_addr | (input << 4);
-
-	if ((phys_addr & 0x000f) == 0)
-		return phys_addr | input;
-
-	/*
-	 * All nibbles are used so no valid physical addresses can be assigned
-	 * to the input.
-	 */
-	return CEC_PHYS_ADDR_INVALID;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
-
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
-	int i;
-
-	if (parent)
-		*parent = phys_addr;
-	if (port)
-		*port = 0;
-	if (phys_addr == CEC_PHYS_ADDR_INVALID)
-		return 0;
-	for (i = 0; i < 16; i += 4)
-		if (phys_addr & (0xf << i))
-			break;
-	if (i == 16)
-		return 0;
-	if (parent)
-		*parent = phys_addr & (0xfff0 << i);
-	if (port)
-		*port = (phys_addr >> i) & 0xf;
-	for (i += 4; i < 16; i += 4)
-		if ((phys_addr & (0xf << i)) == 0)
-			return -EINVAL;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index dd2078b..2424680 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -123,6 +123,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
 {
 	mutex_lock(&n->lock);
 	n->callback = NULL;
+	n->cec_adap->notifier = NULL;
+	n->cec_adap = NULL;
 	mutex_unlock(&n->lock);
 	cec_notifier_put(n);
 }
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index c4e7ebf..8a61150 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -164,6 +164,9 @@ static void dvb_frontend_free(struct kref *ref)
 
 static void dvb_frontend_put(struct dvb_frontend *fe)
 {
+	/* call detach before dropping the reference count */
+	if (fe->ops.detach)
+		fe->ops.detach(fe);
 	/*
 	 * Check if the frontend was registered, as otherwise
 	 * kref was not initialized yet.
@@ -3035,7 +3038,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
 	dvb_frontend_invoke_release(fe, fe->ops.release_sec);
 	dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
 	dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
-	dvb_frontend_invoke_release(fe, fe->ops.detach);
 	dvb_frontend_put(fe);
 }
 EXPORT_SYMBOL(dvb_frontend_detach);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 3c87785..04dc2f4 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
 	if (npads) {
 		dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
 				       GFP_KERNEL);
-		if (!dvbdev->pads)
+		if (!dvbdev->pads) {
+			kfree(dvbdev->entity);
 			return -ENOMEM;
+		}
 	}
 
 	switch (type) {
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 29836c1..ee830c7 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -18,6 +18,7 @@
 
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/idr.h>
 #include <linux/dvb/frontend.h>
 #include <asm/types.h>
 
@@ -43,8 +44,7 @@ struct dvb_pll_priv {
 };
 
 #define DVB_PLL_MAX 64
-
-static unsigned int dvb_pll_devcount;
+static DEFINE_IDA(pll_ida);
 
 static int debug;
 module_param(debug, int, 0644);
@@ -796,6 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 	struct dvb_pll_priv *priv = NULL;
 	int ret;
 	const struct dvb_pll_desc *desc;
+	int nr;
 
 	b1 = kmalloc(1, GFP_KERNEL);
 	if (!b1)
@@ -804,9 +805,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 	b1[0] = 0;
 	msg.buf = b1;
 
-	if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
-	    (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
-		pll_desc_id = id[dvb_pll_devcount];
+	nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
+	if (nr < 0) {
+		kfree(b1);
+		return NULL;
+	}
+
+	if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
+		pll_desc_id = id[nr];
 
 	BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
 
@@ -817,24 +823,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 			fe->ops.i2c_gate_ctrl(fe, 1);
 
 		ret = i2c_transfer (i2c, &msg, 1);
-		if (ret != 1) {
-			kfree(b1);
-			return NULL;
-		}
+		if (ret != 1)
+			goto out;
 		if (fe->ops.i2c_gate_ctrl)
 			     fe->ops.i2c_gate_ctrl(fe, 0);
 	}
 
 	priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
-	if (!priv) {
-		kfree(b1);
-		return NULL;
-	}
+	if (!priv)
+		goto out;
 
 	priv->pll_i2c_address = pll_addr;
 	priv->i2c = i2c;
 	priv->pll_desc = desc;
-	priv->nr = dvb_pll_devcount++;
+	priv->nr = nr;
 
 	memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
 	       sizeof(struct dvb_tuner_ops));
@@ -867,6 +869,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 	kfree(b1);
 
 	return fe;
+out:
+	kfree(b1);
+	ida_simple_remove(&pll_ida, nr);
+
+	return NULL;
 }
 EXPORT_SYMBOL(dvb_pll_attach);
 
@@ -903,9 +910,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 static int dvb_pll_remove(struct i2c_client *client)
 {
-	struct dvb_frontend *fe;
+	struct dvb_frontend *fe = i2c_get_clientdata(client);
+	struct dvb_pll_priv *priv = fe->tuner_priv;
 
-	fe = i2c_get_clientdata(client);
+	ida_simple_remove(&pll_ida, priv->nr);
 	dvb_pll_release(fe);
 	return 0;
 }
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 63c9ac2..8b1ae1d 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -60,8 +60,9 @@
 	tristate "NXP TDA1997x HDMI receiver"
 	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
 	depends on SND_SOC
-	select SND_PCM
 	select HDMI
+	select SND_PCM
+	select V4L2_FWNODE
 	---help---
 	  V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
 
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f01964c..a4b0a89 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2297,8 +2297,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
 		edid->blocks = 2;
 		return -E2BIG;
 	}
-	pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
-	err = cec_phys_addr_validate(pa, &pa, NULL);
+	pa = v4l2_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
+	err = v4l2_phys_addr_validate(pa, &pa, NULL);
 	if (err)
 		return err;
 
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index bb43a75..58662ba 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -791,8 +791,8 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
 		return 0;
 	}
 
-	pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
-	err = cec_phys_addr_validate(pa, &pa, NULL);
+	pa = v4l2_get_edid_phys_addr(edid, 256, &spa_loc);
+	err = v4l2_phys_addr_validate(pa, &pa, NULL);
 	if (err)
 		return err;
 
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index d5c0ffc..a3bbef6 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2787,9 +2787,14 @@ static int ov5640_probe(struct i2c_client *client,
 	/* request optional power down pin */
 	sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
 						    GPIOD_OUT_HIGH);
+	if (IS_ERR(sensor->pwdn_gpio))
+		return PTR_ERR(sensor->pwdn_gpio);
+
 	/* request optional reset pin */
 	sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
 						     GPIOD_OUT_HIGH);
+	if (IS_ERR(sensor->reset_gpio))
+		return PTR_ERR(sensor->reset_gpio);
 
 	v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
 
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 1722cda..34343bc 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -53,6 +53,8 @@
 #define		OV5645_CHIP_ID_HIGH_BYTE	0x56
 #define OV5645_CHIP_ID_LOW		0x300b
 #define		OV5645_CHIP_ID_LOW_BYTE		0x45
+#define OV5645_IO_MIPI_CTRL00		0x300e
+#define OV5645_PAD_OUTPUT00		0x3019
 #define OV5645_AWB_MANUAL_CONTROL	0x3406
 #define		OV5645_AWB_MANUAL_ENABLE	BIT(0)
 #define OV5645_AEC_PK_MANUAL		0x3503
@@ -63,6 +65,7 @@
 #define		OV5645_ISP_VFLIP		BIT(2)
 #define OV5645_TIMING_TC_REG21		0x3821
 #define		OV5645_SENSOR_MIRROR		BIT(1)
+#define OV5645_MIPI_CTRL00		0x4800
 #define OV5645_PRE_ISP_TEST_SETTING_1	0x503d
 #define		OV5645_TEST_PATTERN_MASK	0x3
 #define		OV5645_SET_TEST_PATTERN(x)	((x) & OV5645_TEST_PATTERN_MASK)
@@ -129,7 +132,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
 	{ 0x3503, 0x07 },
 	{ 0x3002, 0x1c },
 	{ 0x3006, 0xc3 },
-	{ 0x300e, 0x45 },
 	{ 0x3017, 0x00 },
 	{ 0x3018, 0x00 },
 	{ 0x302e, 0x0b },
@@ -358,7 +360,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
 	{ 0x3a1f, 0x14 },
 	{ 0x0601, 0x02 },
 	{ 0x3008, 0x42 },
-	{ 0x3008, 0x02 }
+	{ 0x3008, 0x02 },
+	{ OV5645_IO_MIPI_CTRL00, 0x40 },
+	{ OV5645_MIPI_CTRL00, 0x24 },
+	{ OV5645_PAD_OUTPUT00, 0x70 }
 };
 
 static const struct reg_value ov5645_setting_sxga[] = {
@@ -745,13 +750,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
 				goto exit;
 			}
 
-			ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
-					       OV5645_SYSTEM_CTRL0_STOP);
-			if (ret < 0) {
-				ov5645_set_power_off(ov5645);
-				goto exit;
-			}
+			usleep_range(500, 1000);
 		} else {
+			ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
 			ov5645_set_power_off(ov5645);
 		}
 	}
@@ -1057,11 +1058,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
 			dev_err(ov5645->dev, "could not sync v4l2 controls\n");
 			return ret;
 		}
+
+		ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
+		if (ret < 0)
+			return ret;
+
 		ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
 				       OV5645_SYSTEM_CTRL0_START);
 		if (ret < 0)
 			return ret;
 	} else {
+		ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
+		if (ret < 0)
+			return ret;
+
 		ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
 				       OV5645_SYSTEM_CTRL0_STOP);
 		if (ret < 0)
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 5bea31c..33a21d5 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -716,6 +716,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
 		for (m = 6; m >= 0; m--)
 			if (gain >= (1 << m) * 16)
 				break;
+
+		/* Sanity check: don't adjust the gain with a negative value */
+		if (m < 0)
+			return -EINVAL;
+
 		rgain = (gain - ((1 << m) * 16)) / (1 << m);
 		rgain |= (((1 << m) - 1) << 4);
 
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 26070fb..e4c0a27 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1789,7 +1789,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
 		return -E2BIG;
 	}
 	pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
-	err = cec_phys_addr_validate(pa, &pa, NULL);
+	err = v4l2_phys_addr_validate(pa, &pa, NULL);
 	if (err)
 		return err;
 
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 8b450fc..15a5e98 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -828,7 +828,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
 		return 0;
 	case V4L2_CID_HUE:
 		tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
-		break;
+		return 0;
 	case V4L2_CID_TEST_PATTERN:
 		decoder->enable = ctrl->val ? false : true;
 		tvp5150_selmux(sd);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index cf1e526..8a1128c 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -351,7 +351,11 @@ static const struct i2c_client saa7134_client_template = {
 
 /* ----------------------------------------------------------- */
 
-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+/*
+ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
+ * demod i2c gate closed due to an address clash between this EEPROM
+ * and the demod one.
+ */
 static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
 {
 	u8 subaddr = 0x7, dmdregval;
@@ -368,14 +372,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
 
 	ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
 	if ((ret == 2) && (dmdregval & 0x2)) {
-		pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+		pr_debug("%s: DVB-T demod i2c gate was left open\n",
 			 dev->name);
 
 		data[0] = subaddr;
 		data[1] = (dmdregval & ~0x2);
 		if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
-			pr_err("%s: EEPROM i2c gate open failure\n",
-			  dev->name);
+			pr_err("%s: EEPROM i2c gate close failure\n",
+			       dev->name);
 	}
 }
 
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 6d8e4af..8c56d4c 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -304,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
 	ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
 	if (ret < 0) {
 		pr_err("cannot register capture v4l2 device. skipping.\n");
+		saa7146_vv_release(dev);
+		i2c_del_adapter(&hexium->i2c_adapter);
+		kfree(hexium);
 		return ret;
 	}
 
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 5ddb232..0fe9be9 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev)
 		return -ENODEV;
 
 	is->pmu_regs = of_iomap(node, 0);
+	of_node_put(node);
 	if (!is->pmu_regs)
 		return -ENOMEM;
 
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index deb499f..b599353 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -498,6 +498,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
 			continue;
 
 		ret = fimc_md_parse_port_node(fmd, port, index);
+		of_node_put(port);
 		if (ret < 0) {
 			of_node_put(node);
 			goto rpm_put;
@@ -531,6 +532,7 @@ static int __of_get_csis_id(struct device_node *np)
 	if (!np)
 		return -EINVAL;
 	of_property_read_u32(np, "reg", &reg);
+	of_node_put(np);
 	return reg - FIMC_INPUT_MIPI_CSI2_0;
 }
 
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 0273302..83086ee 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -37,7 +37,7 @@
 #define VIU_VERSION		"0.5.1"
 
 /* Allow building this driver with COMPILE_TEST */
-#ifndef CONFIG_PPC
+#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
 #define out_be32(v, a)	iowrite32be(a, (void __iomem *)v)
 #define in_be32(a)	ioread32be((void __iomem *)a)
 #endif
diff --git a/drivers/media/platform/msm/cvp/cvp_core_hfi.h b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
index 5481ea2..054ccee 100644
--- a/drivers/media/platform/msm/cvp/cvp_core_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
@@ -240,6 +240,7 @@ struct iris_hfi_device {
 	unsigned long scaled_rate;
 	struct msm_cvp_gov_data bus_vote;
 	bool power_enabled;
+	bool reg_dumped;
 	struct mutex lock;
 	msm_cvp_callback callback;
 	struct cvp_mem_addr iface_q_table;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 24b6296..0fc5c12 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -505,7 +505,7 @@ static int __dsp_send_hfi_queue(struct iris_hfi_device *device)
 		device->dsp_iface_q_table.mem_data.size);
 	rc = cvp_dsp_send_cmd_hfi_queue(
 		(phys_addr_t *)device->dsp_iface_q_table.mem_data.dma_handle,
-		device->dsp_iface_q_table.mem_data.size);
+		device->dsp_iface_q_table.mem_data.size, device);
 	if (rc) {
 		dprintk(CVP_ERR, "%s: dsp hfi queue init failed\n", __func__);
 		return rc;
@@ -581,8 +581,10 @@ static int __dsp_shutdown(struct iris_hfi_device *device, u32 flags)
 {
 	int rc;
 
+	cvp_dsp_set_cvp_ssr();
+
 	if (!(device->dsp_flags & DSP_INIT)) {
-		dprintk(CVP_DBG, "%s: dsp not inited\n", __func__);
+		dprintk(CVP_WARN, "%s: dsp not inited\n", __func__);
 		return 0;
 	}
 
@@ -1387,7 +1389,7 @@ static void cvp_dump_csr(struct iris_hfi_device *dev)
 
 	if (!dev)
 		return;
-	if (!dev->power_enabled)
+	if (!dev->power_enabled || dev->reg_dumped)
 		return;
 	reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
 	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
@@ -1395,16 +1397,19 @@ static void cvp_dump_csr(struct iris_hfi_device *dev)
 	dprintk(CVP_ERR, "CVP_CPU_CS_SCIACMDARG0: %x\n", reg);
 	reg = __read_register(dev, CVP_WRAPPER_CPU_CLOCK_CONFIG);
 	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_CLOCK_CONFIG: %x\n", reg);
+	reg = __read_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+	dprintk(CVP_ERR, "CVP_WRAPPER_CORE_CLOCK_CONFIG: %x\n", reg);
 	reg = __read_register(dev, CVP_WRAPPER_INTR_STATUS);
 	dprintk(CVP_ERR, "CVP_WRAPPER_INTR_STATUS: %x\n", reg);
 	reg = __read_register(dev, CVP_CPU_CS_H2ASOFTINT);
 	dprintk(CVP_ERR, "CVP_CPU_CS_H2ASOFTINT: %x\n", reg);
 	reg = __read_register(dev, CVP_CPU_CS_A2HSOFTINT);
 	dprintk(CVP_ERR, "CVP_CPU_CS_A2HSOFTINT: %x\n", reg);
-	reg = __read_register(dev, CVP_CC_MVS0C_GDSCR);
-	dprintk(CVP_ERR, "CVP_CC_MVS0C_GDSCR: %x\n", reg);
 	reg = __read_register(dev, CVP_CC_MVS1C_GDSCR);
 	dprintk(CVP_ERR, "CVP_CC_MVS1C_GDSCR: %x\n", reg);
+	reg = __read_register(dev, CVP_CC_MVS1C_CBCR);
+	dprintk(CVP_ERR, "CVP_CC_MVS1C_CBCR: %x\n", reg);
+	dev->reg_dumped = true;
 }
 
 static int iris_hfi_flush_debug_queue(void *dev)
@@ -1439,6 +1444,9 @@ static int __set_clocks(struct iris_hfi_device *device, u32 freq)
 	iris_hfi_for_each_clock(device, cl) {
 		if (cl->has_scaling) {/* has_scaling */
 			device->clk_freq = freq;
+			if (msm_cvp_clock_voting)
+				freq = msm_cvp_clock_voting;
+
 			rc = clk_set_rate(cl->clk, freq);
 			if (rc) {
 				dprintk(CVP_ERR,
@@ -2231,6 +2239,7 @@ static int iris_hfi_core_init(void *device)
 	}
 
 	__set_state(dev, IRIS_STATE_INIT);
+	dev->reg_dumped = false;
 
 	dprintk(CVP_DBG, "Dev_Virt: %pa, Reg_Virt: %pK\n",
 		&dev->cvp_hal_data->firmware_base,
@@ -2315,7 +2324,7 @@ static int iris_hfi_core_release(void *dev)
 	}
 
 	mutex_lock(&device->lock);
-	dprintk(CVP_DBG, "Core releasing\n");
+	dprintk(CVP_WARN, "Core releasing\n");
 	if (device->res->pm_qos_latency_us &&
 		pm_qos_request_active(&device->qos))
 		pm_qos_remove_request(&device->qos);
@@ -4429,7 +4438,9 @@ static inline int __suspend(struct iris_hfi_device *device)
 
 static void power_off_iris2(struct iris_hfi_device *device)
 {
-	u32 lpi_status, reg_status = 0, count = 0, max_count = 10;
+	u32 lpi_status, reg_status = 0, count = 0, max_count = 1000;
+	u32 pc_ready, wfi_status, sbm_ln0_low;
+	u32 main_sbm_ln0_low, main_sbm_ln1_high;
 
 	if (!device->power_enabled)
 		return;
@@ -4448,18 +4459,26 @@ static void power_off_iris2(struct iris_hfi_device *device)
 			 __read_register(device,
 				CVP_AON_WRAPPER_MVP_NOC_LPI_STATUS);
 		reg_status = lpi_status & BIT(0);
-		dprintk(CVP_DBG,
-			"Noc: lpi_status %x noc_status %x (count %d)\n",
-			lpi_status, reg_status, count);
-
 		/* Wait for noc lpi status to be set */
 		usleep_range(50, 100);
 		count++;
 	}
+	dprintk(CVP_DBG,
+		"Noc: lpi_status %x noc_status %x (count %d)\n",
+		lpi_status, reg_status, count);
 	if (count == max_count) {
+		wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+		pc_ready = __read_register(device, CVP_CTRL_STATUS);
+		sbm_ln0_low =
+			__read_register(device, CVP_NOC_SBM_SENSELN0_LOW);
+		main_sbm_ln0_low = __read_register(device,
+				CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW);
+		main_sbm_ln1_high = __read_register(device,
+				CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH);
 		dprintk(CVP_WARN,
-			"NOC not in qaccept status %x %x\n",
-			reg_status, lpi_status);
+			"NOC not in qaccept status %x %x %x %x %x %x %x\n",
+			reg_status, lpi_status, wfi_status, pc_ready,
+			sbm_ln0_low, main_sbm_ln0_low, main_sbm_ln1_high);
 	}
 
 	/* HPG 6.1.2 Step 3, debug bridge to low power */
@@ -4471,14 +4490,13 @@ static void power_off_iris2(struct iris_hfi_device *device)
 		lpi_status = __read_register(device,
 				 CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
 		reg_status = lpi_status & 0x7;
-		dprintk(CVP_DBG,
-			"DBLP Set : lpi_status %d reg_status %d (count %d)\n",
-			lpi_status, reg_status, count);
-
 		/* Wait for debug bridge lpi status to be set */
 		usleep_range(50, 100);
 		count++;
 	}
+	dprintk(CVP_DBG,
+		"DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+		lpi_status, reg_status, count);
 	if (count == max_count) {
 		dprintk(CVP_WARN,
 			"DBLP Set: status %x %x\n", reg_status, lpi_status);
@@ -4492,12 +4510,12 @@ static void power_off_iris2(struct iris_hfi_device *device)
 	while (lpi_status && count < max_count) {
 		lpi_status = __read_register(device,
 				 CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
-		dprintk(CVP_DBG,
-			"DBLP Release: lpi_status %d(count %d)\n",
-			lpi_status, count);
 		usleep_range(50, 100);
 		count++;
 	}
+	dprintk(CVP_DBG,
+		"DBLP Release: lpi_status %d(count %d)\n",
+		lpi_status, count);
 	if (count == max_count) {
 		dprintk(CVP_WARN,
 			"DBLP Release: lpi_status %x\n", lpi_status);
@@ -4665,7 +4683,6 @@ static void __unload_fw(struct iris_hfi_device *device)
 	if (device->state != IRIS_STATE_DEINIT)
 		flush_workqueue(device->iris_pm_workq);
 
-	__vote_buses(device, NULL, 0);
 	subsystem_put(device->resources.fw.cookie);
 	__interface_queues_release(device);
 	call_iris_op(device, power_off, device);
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index 9638102..b341fdd 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -223,7 +223,7 @@ struct cvp_hfi_cmd_sys_get_property_packet {
 
 enum HFI_SESSION_TYPE {
 	HFI_SESSION_CV = 1,
-	HFI_SESSION_LRME,
+	HFI_SESSION_DME,
 	HFI_SESSION_ODT,
 	HFI_SESSION_FD
 };
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_io.h b/drivers/media/platform/msm/cvp/cvp_hfi_io.h
index 5fc4233..bdd44c0 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_io.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_io.h
@@ -105,11 +105,12 @@
 #define CVP_WRAPPER_INTR_CLEAR_A2H_BMSK	0x4
 #define CVP_WRAPPER_INTR_CLEAR_A2H_SHFT	0x2
 #define CVP_WRAPPER_CPU_STATUS		(CVP_WRAPPER_TZ_BASE_OFFS + 0x10)
-#define CVP_WRAPPER_CPU_CLOCK_CONFIG	(CVP_WRAPPER_TZ_BASE_OFFS + 0x50)
 #define CVP_WRAPPER_CPU_CGC_DIS	(CVP_WRAPPER_BASE_OFFS + 0x2010)
 
+#define CVP_WRAPPER_CPU_CLOCK_CONFIG	(CVP_WRAPPER_BASE_OFFS + 0x50)
 #define CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL	(CVP_WRAPPER_BASE_OFFS + 0x54)
 #define CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS	(CVP_WRAPPER_BASE_OFFS + 0x58)
+#define CVP_WRAPPER_CORE_CLOCK_CONFIG		(CVP_WRAPPER_BASE_OFFS + 0x88)
 
 #define CVP_CTRL_INIT		CVP_CPU_CS_SCIACMD
 
@@ -177,8 +178,13 @@
 #define CVP_NOC_ERR_ERRLOG2_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xB4)
 #define CVP_NOC_ERR_ERRLOG3_LOW_OFFS	(CVP_NOC_BASE_OFFS + 0xB8)
 #define CVP_NOC_ERR_ERRLOG3_HIGH_OFFS	(CVP_NOC_BASE_OFFS + 0xBC)
+#define CVP_NOC_SBM_SENSELN0_LOW	(CVP_NOC_BASE_OFFS + 0x300)
 
 #define CVP_NOC_CORE_BASE_OFFS			0x00010000
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW \
+		(CVP_NOC_CORE_BASE_OFFS + 0x1100)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH \
+		(CVP_NOC_CORE_BASE_OFFS + 0x110C)
 #define CVP_NOC_CORE_ERR_SWID_LOW_OFFS \
 		(CVP_NOC_CORE_BASE_OFFS + 0x1200)
 #define CVP_NOC_CORE_ERR_SWID_HIGH_OFFS \
@@ -213,4 +219,5 @@
 #define CVP_CC_BASE_OFFS			0x000F0000
 #define CVP_CC_MVS0C_GDSCR			(CVP_CC_BASE_OFFS + 0xBF8)
 #define CVP_CC_MVS1C_GDSCR			(CVP_CC_BASE_OFFS + 0xC98)
+#define CVP_CC_MVS1C_CBCR			(CVP_CC_BASE_OFFS + 0xCD4)
 #endif
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 00f2d7c..fc4b075 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -407,14 +407,6 @@ static int hfi_process_session_cvp_operation_config(u32 device_id,
 	return 0;
 }
 
-static int hfi_process_session_cvp_dfs(u32 device_id,
-	void *pkt,
-	struct msm_cvp_cb_info *info)
-{
-	dprintk(CVP_ERR, "Deprecated DFS handling path\n");
-		return -EINVAL;
-}
-
 static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
 	unsigned int session_id)
 {
@@ -477,8 +469,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
 	}
 
 	if (inst->deprecate_bitmask) {
-		if (pkt->packet_type == HFI_MSG_SESSION_CVP_DFS
-			|| pkt->packet_type == HFI_MSG_SESSION_CVP_DME
+		if (pkt->packet_type == HFI_MSG_SESSION_CVP_DME
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_ICA
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_FD) {
 			u64 ktid;
@@ -496,7 +487,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
 					inst->deprecate_bitmask);
 	}
 
-	sess_msg = kmem_cache_alloc(inst->session_queue.msg_cache, GFP_KERNEL);
+	sess_msg = kmem_cache_alloc(cvp_driver->msg_cache, GFP_KERNEL);
 	if (sess_msg == NULL) {
 		dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
 		return -ENOMEM;
@@ -526,7 +517,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
 
 error_handle_msg:
 	spin_unlock(&inst->session_queue.lock);
-	kmem_cache_free(inst->session_queue.msg_cache, sess_msg);
+	kmem_cache_free(cvp_driver->msg_cache, sess_msg);
 	return -ENOMEM;
 }
 
@@ -619,26 +610,20 @@ static int _deprecated_hfi_msg_process(u32 device_id,
 	struct msm_cvp_cb_info *info,
 	struct msm_cvp_inst *inst)
 {
-	if (pkt->packet_type == HFI_MSG_SESSION_CVP_DFS)
-		if (test_and_clear_bit(DFS_BIT_OFFSET,
-				&inst->deprecate_bitmask))
-			return hfi_process_session_cvp_dfs(
-					device_id, (void *)pkt, info);
-
 	if (pkt->packet_type == HFI_MSG_SESSION_CVP_DME)
-		if (test_and_clear_bit(DME_BIT_OFFSET,
+		if (test_bit(DME_BIT_OFFSET,
 				&inst->deprecate_bitmask))
 			return hfi_process_session_cvp_dme(
 					device_id, (void *)pkt, info);
 
 	if (pkt->packet_type == HFI_MSG_SESSION_CVP_ICA)
-		if (test_and_clear_bit(ICA_BIT_OFFSET,
+		if (test_bit(ICA_BIT_OFFSET,
 				&inst->deprecate_bitmask))
 			return hfi_process_session_cvp_ica(
 				device_id, (void *)pkt, info);
 
 	if (pkt->packet_type == HFI_MSG_SESSION_CVP_FD)
-		if (test_and_clear_bit(FD_BIT_OFFSET,
+		if (test_bit(FD_BIT_OFFSET,
 				&inst->deprecate_bitmask))
 			return hfi_process_session_cvp_fd(
 				device_id, (void *)pkt, info);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 8cb781d..d90ca95 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -10,6 +10,7 @@
 
 struct cvp_power_level {
 	unsigned long core_sum;
+	unsigned long op_core_sum;
 	unsigned long bw_sum;
 };
 
@@ -19,11 +20,19 @@ void print_internal_buffer(u32 tag, const char *str,
 	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
 		return;
 
-	dprintk(tag,
+	if (cbuf->smem.dma_buf) {
+		dprintk(tag,
 		"%s: %x : idx %2d fd %d off %d %s size %d flags %#x iova %#x",
 		str, hash32_ptr(inst->session), cbuf->buf.index, cbuf->buf.fd,
 		cbuf->buf.offset, cbuf->smem.dma_buf->name, cbuf->buf.size,
 		cbuf->buf.flags, cbuf->smem.device_addr);
+	} else {
+		dprintk(tag,
+		"%s: %x : idx %2d fd %d off %d size %d flags %#x iova %#x",
+		str, hash32_ptr(inst->session), cbuf->buf.index, cbuf->buf.fd,
+		cbuf->buf.offset, cbuf->buf.size, cbuf->buf.flags,
+		cbuf->smem.device_addr);
+	}
 }
 
 static enum hal_buffer get_hal_buftype(const char *str, unsigned int type)
@@ -175,7 +184,7 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 		return -EINVAL;
 	}
 
-	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
+	cbuf = kmem_cache_zalloc(cvp_driver->internal_buf_cache, GFP_KERNEL);
 	if (!cbuf) {
 		return -ENOMEM;
 	}
@@ -219,7 +228,7 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
 exit:
 	if (cbuf->smem.device_addr)
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	kmem_cache_free(inst->internal_buf_cache, cbuf);
+	kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	cbuf = NULL;
 
 	return rc;
@@ -277,7 +286,7 @@ static int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
 	list_del(&cbuf->list);
 	mutex_unlock(&inst->cvpdspbufs.lock);
 
-	kmem_cache_free(inst->internal_buf_cache, cbuf);
+	kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	return rc;
 }
 
@@ -309,7 +318,7 @@ static int msm_cvp_map_buf_cpu_d(struct msm_cvp_inst *inst,
 		return -EINVAL;
 	}
 
-	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
+	cbuf = kmem_cache_zalloc(cvp_driver->internal_buf_cache, GFP_KERNEL);
 	if (!cbuf)
 		return -ENOMEM;
 
@@ -339,7 +348,7 @@ static int msm_cvp_map_buf_cpu_d(struct msm_cvp_inst *inst,
 exit:
 	if (cbuf->smem.device_addr)
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	kmem_cache_free(inst->internal_buf_cache, cbuf);
+	kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	cbuf = NULL;
 
 	return rc;
@@ -411,9 +420,9 @@ static int msm_cvp_map_buf_user_persist(struct msm_cvp_inst *inst,
 	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
 	if (rc) {
 		dprintk(CVP_ERR,
-		"%s: %x : fd %d %s size %d",
+		"%s: %x : fd %d size %d",
 		"map persist failed", hash32_ptr(inst->session), cbuf->smem.fd,
-		cbuf->smem.dma_buf->name, cbuf->smem.size);
+		cbuf->smem.size);
 		goto exit;
 	}
 
@@ -468,7 +477,7 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 	if (!rc && *iova != 0)
 		return 0;
 
-	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
+	cbuf = kmem_cache_zalloc(cvp_driver->internal_buf_cache, GFP_KERNEL);
 	if (!cbuf)
 		return -ENOMEM;
 
@@ -502,7 +511,7 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 
 	*iova = cbuf->smem.device_addr + cbuf->buf.offset;
 
-	frame_buf = kmem_cache_zalloc(inst->frame_buf_cache, GFP_KERNEL);
+	frame_buf = kmem_cache_zalloc(cvp_driver->frame_buf_cache, GFP_KERNEL);
 	if (!frame_buf) {
 		rc = -ENOMEM;
 		goto exit2;
@@ -524,7 +533,7 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 	list_del(&cbuf->list);
 	mutex_unlock(&inst->cvpcpubufs.lock);
 exit:
-	kmem_cache_free(inst->internal_buf_cache, cbuf);
+	kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	cbuf = NULL;
 
 	return rc;
@@ -550,7 +559,7 @@ static void __unmap_buf(struct msm_cvp_inst *inst,
 			list_del(&cbuf->list);
 			print_internal_buffer(CVP_DBG, "unmap", inst, cbuf);
 			msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-			kmem_cache_free(inst->internal_buf_cache, cbuf);
+			kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 			break;
 		}
 	}
@@ -580,12 +589,12 @@ void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid)
 						&frame->bufs.list, list) {
 				list_del(&frame_buf->list);
 				__unmap_buf(inst, frame_buf);
-				kmem_cache_free(inst->frame_buf_cache,
+				kmem_cache_free(cvp_driver->frame_buf_cache,
 						frame_buf);
 			}
 			mutex_unlock(&frame->bufs.lock);
 			DEINIT_MSM_CVP_LIST(&frame->bufs);
-			kmem_cache_free(inst->frame_cache, frame);
+			kmem_cache_free(cvp_driver->frame_cache, frame);
 			break;
 		}
 	}
@@ -666,7 +675,8 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 			__func__, inst->session_queue.state,
 			inst->session_queue.msg_count);
 
-		if (inst->state >= MSM_CVP_CLOSE_DONE) {
+		if (inst->state >= MSM_CVP_CLOSE_DONE ||
+				sq->state != QUEUE_ACTIVE) {
 			rc = -ECONNRESET;
 			goto exit;
 		}
@@ -685,7 +695,7 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 
 		memcpy(out_pkt, &msg->pkt,
 			sizeof(struct cvp_hfi_msg_session_hdr));
-		kmem_cache_free(inst->session_queue.msg_cache, msg);
+		kmem_cache_free(cvp_driver->msg_cache, msg);
 	}
 
 exit:
@@ -720,7 +730,7 @@ static int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
 		if (inst->session_type == MSM_CVP_USER) {
 			new_buf->dbuf = 0;
 		} else if (inst->session_type == MSM_CVP_KERNEL) {
-			new_buf->fd = -1;
+			new_buf->fd = 0;
 		} else if (inst->session_type >= MSM_CVP_UNKNOWN) {
 			dprintk(CVP_ERR,
 				"%s: unknown session type %d\n",
@@ -768,7 +778,7 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 		cmd_hdr->client_data.kdata1 = (u32)ktid;
 		cmd_hdr->client_data.kdata2 = (u32)(ktid >> 32);
 
-		frame = kmem_cache_zalloc(inst->frame_cache, GFP_KERNEL);
+		frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
 		if (!frame)
 			return -ENOMEM;
 
@@ -799,7 +809,7 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 			if (inst->session_type == MSM_CVP_USER) {
 				new_buf->dbuf = 0;
 			} else if (inst->session_type == MSM_CVP_KERNEL) {
-				new_buf->fd = -1;
+				new_buf->fd = 0;
 			} else if (inst->session_type >= MSM_CVP_UNKNOWN) {
 				dprintk(CVP_ERR,
 					"%s: unknown session type %d\n",
@@ -823,11 +833,11 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 					list_del(&frame_buf->list);
 					__unmap_buf(inst, frame_buf);
 					kmem_cache_free(
-					inst->frame_buf_cache,
+					cvp_driver->frame_buf_cache,
 					frame_buf);
 				}
 				DEINIT_MSM_CVP_LIST(&frame->bufs);
-				kmem_cache_free(inst->frame_cache,
+				kmem_cache_free(cvp_driver->frame_cache,
 						frame);
 				return rc;
 			}
@@ -975,7 +985,7 @@ static int msm_cvp_session_process_hfi(
 static int msm_cvp_thread_fence_run(void *data)
 {
 	int i, rc = 0;
-	unsigned long timeout_ms = 1000;
+	unsigned long timeout_ms = 100;
 	int synx_obj;
 	struct cvp_hfi_device *hdev;
 	struct msm_cvp_fence_thread_data *fence_thread_data;
@@ -1028,21 +1038,21 @@ static int msm_cvp_thread_fence_run(void *data)
 					dprintk(CVP_ERR,
 						"%s: synx_import failed\n",
 						__func__);
-					goto exit;
+					synx_state = SYNX_STATE_SIGNALED_ERROR;
 				}
 				rc = synx_wait(synx_obj, timeout_ms);
 				if (rc) {
 					dprintk(CVP_ERR,
 						"%s: synx_wait failed\n",
 						__func__);
-					goto exit;
+					synx_state = SYNX_STATE_SIGNALED_ERROR;
 				}
 				rc = synx_release(synx_obj);
 				if (rc) {
 					dprintk(CVP_ERR,
 						"%s: synx_release failed\n",
 						__func__);
-					goto exit;
+					synx_state = SYNX_STATE_SIGNALED_ERROR;
 				}
 				if (i == 0) {
 					ica_enabled = 1;
@@ -1055,18 +1065,19 @@ static int msm_cvp_thread_fence_run(void *data)
 			}
 		}
 
-		rc = call_hfi_op(hdev, session_send,
-				(void *)inst->session, in_pkt);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: Failed in call_hfi_op %d, %x\n",
-				__func__, in_pkt->pkt_data[0],
-				in_pkt->pkt_data[1]);
-			synx_state = SYNX_STATE_SIGNALED_ERROR;
-		}
-
 		if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
-			rc = wait_for_sess_signal_receipt(inst,
+			mutex_lock(&inst->fence_lock);
+			rc = call_hfi_op(hdev, session_send,
+					(void *)inst->session, in_pkt);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: Failed in call_hfi_op %d, %x\n",
+					__func__, in_pkt->pkt_data[0],
+					in_pkt->pkt_data[1]);
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
+			}
+
+			rc = wait_for_sess_signal_receipt_fence(inst,
 					HAL_SESSION_DME_FRAME_CMD_DONE);
 			if (rc) {
 				dprintk(CVP_ERR,
@@ -1074,6 +1085,7 @@ static int msm_cvp_thread_fence_run(void *data)
 				__func__, rc);
 				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
+			mutex_unlock(&inst->fence_lock);
 		}
 
 		if (ica_enabled) {
@@ -1081,26 +1093,20 @@ static int msm_cvp_thread_fence_run(void *data)
 			if (rc) {
 				dprintk(CVP_ERR, "%s: synx_import failed\n",
 					__func__);
-				goto exit;
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
 			rc = synx_signal(synx_obj, synx_state);
 			if (rc) {
 				dprintk(CVP_ERR, "%s: synx_signal failed\n",
 					__func__);
-				goto exit;
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
-			if (synx_get_status(synx_obj) !=
-				SYNX_STATE_SIGNALED_SUCCESS) {
-				dprintk(CVP_ERR,
-					"%s: synx_get_status failed\n",
-					__func__);
-				goto exit;
-			}
+
 			rc = synx_release(synx_obj);
 			if (rc) {
 				dprintk(CVP_ERR, "%s: synx_release failed\n",
 					__func__);
-				goto exit;
+				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
 		}
 
@@ -1109,18 +1115,18 @@ static int msm_cvp_thread_fence_run(void *data)
 				&synx_obj);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
-			goto exit;
+			synx_state = SYNX_STATE_SIGNALED_ERROR;
 		}
 		rc = synx_signal(synx_obj, synx_state);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
-			goto exit;
+			synx_state = SYNX_STATE_SIGNALED_ERROR;
 		}
 		rc = synx_release(synx_obj);
 		if (rc) {
 			dprintk(CVP_ERR, "%s: synx_release failed\n",
 				__func__);
-			goto exit;
+			synx_state = SYNX_STATE_SIGNALED_ERROR;
 		}
 		break;
 	}
@@ -1160,6 +1166,7 @@ static int msm_cvp_thread_fence_run(void *data)
 			}
 		}
 
+		mutex_lock(&inst->fence_lock);
 		rc = call_hfi_op(hdev, session_send,
 				(void *)inst->session, in_pkt);
 		if (rc) {
@@ -1171,15 +1178,16 @@ static int msm_cvp_thread_fence_run(void *data)
 		}
 
 		if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
-			rc = wait_for_sess_signal_receipt(inst,
+			rc = wait_for_sess_signal_receipt_fence(inst,
 					HAL_SESSION_ICA_FRAME_CMD_DONE);
-			if (rc)	{
+			if (rc) {
 				dprintk(CVP_ERR,
 				"%s: wait for signal failed, rc %d\n",
 				__func__, rc);
 				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
 		}
+		mutex_unlock(&inst->fence_lock);
 
 		rc = synx_import(fence[2], fence[3], &synx_obj);
 		if (rc) {
@@ -1231,6 +1239,7 @@ static int msm_cvp_thread_fence_run(void *data)
 			}
 		}
 
+		mutex_lock(&inst->fence_lock);
 		rc = call_hfi_op(hdev, session_send,
 				(void *)inst->session, in_pkt);
 		if (rc) {
@@ -1242,15 +1251,16 @@ static int msm_cvp_thread_fence_run(void *data)
 		}
 
 		if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
-			rc = wait_for_sess_signal_receipt(inst,
+			rc = wait_for_sess_signal_receipt_fence(inst,
 					HAL_SESSION_FD_FRAME_CMD_DONE);
-			if (rc)	{
+			if (rc) {
 				dprintk(CVP_ERR,
 				"%s: wait for signal failed, rc %d\n",
 				__func__, rc);
 				synx_state = SYNX_STATE_SIGNALED_ERROR;
 			}
 		}
+		mutex_unlock(&inst->fence_lock);
 
 		for (i = start_out; i <  start_out + out_fence_num; i++) {
 			if (fence[(i<<1)]) {
@@ -1289,7 +1299,7 @@ static int msm_cvp_thread_fence_run(void *data)
 	}
 
 exit:
-	kmem_cache_free(inst->fence_data_cache, fence_thread_data);
+	kmem_cache_free(cvp_driver->fence_data_cache, fence_thread_data);
 	inst->cur_cmd_type = 0;
 	cvp_put_inst(inst);
 	do_exit(rc);
@@ -1321,7 +1331,7 @@ static int msm_cvp_session_process_hfi_fence(
 		return -ECONNRESET;
 
 	inst->cur_cmd_type = CVP_KMD_SEND_FENCE_CMD_PKT;
-	fence_thread_data = kmem_cache_alloc(inst->fence_data_cache,
+	fence_thread_data = kmem_cache_alloc(cvp_driver->fence_data_cache,
 			GFP_KERNEL);
 	if (!fence_thread_data) {
 		dprintk(CVP_ERR, "%s: fence_thread_data alloc failed\n",
@@ -1354,7 +1364,7 @@ static int msm_cvp_session_process_hfi_fence(
 
 	rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
 	if (rc)
-		goto exit;
+		goto free_and_exit;
 
 	thread_num = thread_num + 1;
 	fence_thread_data->inst = inst;
@@ -1367,14 +1377,15 @@ static int msm_cvp_session_process_hfi_fence(
 	thread = kthread_run(msm_cvp_thread_fence_run,
 			fence_thread_data, thread_fence_name);
 	if (!thread) {
-		kmem_cache_free(inst->fence_data_cache, fence_thread_data);
 		dprintk(CVP_ERR, "%s fail to create kthread\n", __func__);
 		rc = -ECHILD;
-		goto exit;
+		goto free_and_exit;
 	}
 
 	return 0;
 
+free_and_exit:
+	kmem_cache_free(cvp_driver->fence_data_cache, fence_thread_data);
 exit:
 	inst->cur_cmd_type = 0;
 	cvp_put_inst(s);
@@ -1425,6 +1436,108 @@ static inline int max_3(unsigned int a, unsigned int b, unsigned int c)
 	return (a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c);
 }
 
+static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
+{
+	return (inst->prop.od_cycles ||
+			inst->prop.mpu_cycles ||
+			inst->prop.fdu_cycles ||
+			inst->prop.ica_cycles);
+}
+
+static void aggregate_power_update(struct msm_cvp_core *core,
+	struct cvp_power_level *nrt_pwr,
+	struct cvp_power_level *rt_pwr,
+	unsigned int max_clk_rate)
+{
+	struct msm_cvp_inst *inst;
+	int i;
+	unsigned long fdu_sum[2] = {0}, od_sum[2] = {0}, mpu_sum[2] = {0};
+	unsigned long ica_sum[2] = {0}, fw_sum[2] = {0};
+	unsigned long op_fdu_max[2] = {0}, op_od_max[2] = {0};
+	unsigned long op_mpu_max[2] = {0}, op_ica_max[2] = {0};
+	unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
+
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->state == MSM_CVP_CORE_INVALID ||
+			inst->state == MSM_CVP_CORE_UNINIT ||
+			!is_subblock_profile_existed(inst))
+			continue;
+		if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
+			/* Non-realtime session use index 0 */
+			i = 0;
+		} else {
+			i = 1;
+		}
+		dprintk(CVP_PROF, "pwrUpdate %pK fdu %u od %u mpu %u ica %u\n",
+			inst->prop.fdu_cycles,
+			inst->prop.od_cycles,
+			inst->prop.mpu_cycles,
+			inst->prop.ica_cycles);
+
+		dprintk(CVP_PROF, "pwrUpdate fw %u fdu_o %u od_o %u mpu_o %u\n",
+			inst->prop.fw_cycles,
+			inst->prop.fdu_op_cycles,
+			inst->prop.od_op_cycles,
+			inst->prop.mpu_op_cycles);
+
+		dprintk(CVP_PROF, "pwrUpdate ica_o %u fw_o %u bw %u bw_o %u\n",
+			inst->prop.ica_op_cycles,
+			inst->prop.fw_op_cycles,
+			inst->prop.ddr_bw,
+			inst->prop.ddr_op_bw);
+
+		fdu_sum[i] += inst->prop.fdu_cycles;
+		od_sum[i] += inst->prop.od_cycles;
+		mpu_sum[i] += inst->prop.mpu_cycles;
+		ica_sum[i] += inst->prop.ica_cycles;
+		fw_sum[i] += inst->prop.fw_cycles;
+		op_fdu_max[i] =
+			(op_fdu_max[i] >= inst->prop.fdu_op_cycles) ?
+			op_fdu_max[i] : inst->prop.fdu_op_cycles;
+		op_od_max[i] =
+			(op_od_max[i] >= inst->prop.od_op_cycles) ?
+			op_od_max[i] : inst->prop.od_op_cycles;
+		op_mpu_max[i] =
+			(op_mpu_max[i] >= inst->prop.mpu_op_cycles) ?
+			op_mpu_max[i] : inst->prop.mpu_op_cycles;
+		op_ica_max[i] =
+			(op_ica_max[i] >= inst->prop.ica_op_cycles) ?
+			op_ica_max[i] : inst->prop.ica_op_cycles;
+		op_fw_max[i] =
+			(op_fw_max[i] >= inst->prop.fw_op_cycles) ?
+			op_fw_max[i] : inst->prop.fw_op_cycles;
+		bw_sum[i] += inst->prop.ddr_bw;
+		op_bw_max[i] =
+			(op_bw_max[i] >= inst->prop.ddr_op_bw) ?
+			op_bw_max[i] : inst->prop.ddr_op_bw;
+	}
+
+	for (i = 0; i < 2; i++) {
+		fdu_sum[i] = max_3(fdu_sum[i], od_sum[i], mpu_sum[i]);
+		fdu_sum[i] = max_3(fdu_sum[i], ica_sum[i], fw_sum[i]);
+
+		op_fdu_max[i] = max_3(op_fdu_max[i], op_od_max[i],
+			op_mpu_max[i]);
+		op_fdu_max[i] = max_3(op_fdu_max[i],
+			op_ica_max[i], op_fw_max[i]);
+		op_fdu_max[i] =
+			(op_fdu_max[i] > max_clk_rate) ?
+			max_clk_rate : op_fdu_max[i];
+		bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
+			bw_sum[i] : op_bw_max[i];
+	}
+
+	nrt_pwr->core_sum += fdu_sum[0];
+	nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_fdu_max[0]) ?
+			nrt_pwr->op_core_sum : op_fdu_max[0];
+	nrt_pwr->bw_sum += bw_sum[0];
+	rt_pwr->core_sum += fdu_sum[1];
+	rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_fdu_max[1]) ?
+			rt_pwr->op_core_sum : op_fdu_max[1];
+	rt_pwr->bw_sum += bw_sum[1];
+}
+
+
 static void aggregate_power_request(struct msm_cvp_core *core,
 	struct cvp_power_level *nrt_pwr,
 	struct cvp_power_level *rt_pwr,
@@ -1438,7 +1551,8 @@ static void aggregate_power_request(struct msm_cvp_core *core,
 
 	list_for_each_entry(inst, &core->instances, list) {
 		if (inst->state == MSM_CVP_CORE_INVALID ||
-			inst->state == MSM_CVP_CORE_UNINIT)
+			inst->state == MSM_CVP_CORE_UNINIT ||
+			is_subblock_profile_existed(inst))
 			continue;
 		if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
 			/* Non-realtime session use index 0 */
@@ -1446,6 +1560,15 @@ static void aggregate_power_request(struct msm_cvp_core *core,
 		} else {
 			i = 1;
 		}
+		dprintk(CVP_PROF, "pwrReq sess %pK core %u ctl %u fw %u\n",
+			inst, inst->power.clock_cycles_a,
+			inst->power.clock_cycles_b,
+			inst->power.reserved[0]);
+		dprintk(CVP_PROF, "pwrReq op_core %u op_ctl %u op_fw %u\n",
+			inst->power.reserved[1],
+			inst->power.reserved[2],
+			inst->power.reserved[3]);
+
 		core_sum[i] += inst->power.clock_cycles_a;
 		ctlr_sum[i] += inst->power.clock_cycles_b;
 		fw_sum[i] += inst->power.reserved[0];
@@ -1471,17 +1594,18 @@ static void aggregate_power_request(struct msm_cvp_core *core,
 		op_core_max[i] =
 			(op_core_max[i] > max_clk_rate) ?
 			max_clk_rate : op_core_max[i];
-		core_sum[i] =
-			(core_sum[i] >= op_core_max[i]) ?
-			core_sum[i] : op_core_max[i];
 		bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
 			bw_sum[i] : op_bw_max[i];
 	}
 
-	nrt_pwr->core_sum = core_sum[0];
-	nrt_pwr->bw_sum = bw_sum[0];
-	rt_pwr->core_sum = core_sum[1];
-	rt_pwr->bw_sum = bw_sum[1];
+	nrt_pwr->core_sum += core_sum[0];
+	nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_core_max[0]) ?
+			nrt_pwr->op_core_sum : op_core_max[0];
+	nrt_pwr->bw_sum += bw_sum[0];
+	rt_pwr->core_sum += core_sum[1];
+	rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_core_max[1]) ?
+			rt_pwr->op_core_sum : op_core_max[1];
+	rt_pwr->bw_sum += bw_sum[1];
 }
 
 /**
@@ -1504,9 +1628,9 @@ static int adjust_bw_freqs(void)
 	struct clock_info *cl;
 	struct allowed_clock_rates_table *tbl = NULL;
 	unsigned int tbl_size;
-	unsigned int cvp_min_rate, cvp_max_rate, max_bw;
-	struct cvp_power_level rt_pwr, nrt_pwr;
-	unsigned long tmp, core_sum, bw_sum;
+	unsigned int cvp_min_rate, cvp_max_rate, max_bw, min_bw;
+	struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
+	unsigned long tmp, core_sum, op_core_sum, bw_sum;
 	int i, rc = 0;
 
 	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
@@ -1520,8 +1644,16 @@ static int adjust_bw_freqs(void)
 	cvp_max_rate = tbl[tbl_size - 1].clock_rate;
 	bus = &core->resources.bus_set.bus_tbl[1];
 	max_bw = bus->range[1];
+	min_bw = max_bw/10;
 
 	aggregate_power_request(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+	dprintk(CVP_DBG, "PwrReq nrt %u %u rt %u %u\n",
+		nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+		rt_pwr.core_sum, rt_pwr.op_core_sum);
+	aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+	dprintk(CVP_DBG, "PwrUpdate nrt %u %u rt %u %u\n",
+		nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+		rt_pwr.core_sum, rt_pwr.op_core_sum);
 
 	if (rt_pwr.core_sum > cvp_max_rate) {
 		dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
@@ -1530,6 +1662,11 @@ static int adjust_bw_freqs(void)
 	}
 
 	core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
+	op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
+		rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
+
+	core_sum = (core_sum >= op_core_sum) ?
+		core_sum : op_core_sum;
 
 	if (core_sum > cvp_max_rate) {
 		core_sum = cvp_max_rate;
@@ -1543,10 +1680,10 @@ static int adjust_bw_freqs(void)
 	}
 
 	bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
-	if (bw_sum > max_bw)
-		bw_sum = max_bw;
+	bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
+	bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
 
-	dprintk(CVP_DBG, "%s %lld %lld\n", __func__,
+	dprintk(CVP_PROF, "%s %lld %lld\n", __func__,
 		core_sum, bw_sum);
 	if (!cl->has_scaling) {
 		dprintk(CVP_ERR, "Cannot scale CVP clock\n");
@@ -1614,7 +1751,6 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
 	inst->power.reserved[0] = div_by_1dot5(inst->power.reserved[0]);
 	inst->power.reserved[2] = div_by_1dot5(inst->power.reserved[2]);
 	inst->power.reserved[3] = div_by_1dot5(inst->power.reserved[3]);
-	inst->power.reserved[4] = div_by_1dot5(inst->power.reserved[4]);
 
 	/* Convert bps to KBps */
 	inst->power.ddr_bw = inst->power.ddr_bw >> 10;
@@ -1632,6 +1768,33 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
 	return rc;
 }
 
+static int msm_cvp_update_power(struct msm_cvp_inst *inst)
+
+{	int rc = 0;
+	struct msm_cvp_core *core;
+	struct msm_cvp_inst *s;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	s = cvp_get_inst_validate(inst->core, inst);
+	if (!s)
+		return -ECONNRESET;
+
+	inst->cur_cmd_type = CVP_KMD_UPDATE_POWER;
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	rc = adjust_bw_freqs();
+	mutex_unlock(&core->lock);
+	inst->cur_cmd_type = 0;
+	cvp_put_inst(s);
+
+	return rc;
+}
+
 static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
 		struct cvp_kmd_buffer *buf)
 {
@@ -1866,6 +2029,12 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
 		return -EINVAL;
 	}
 
+	if (props->prop_num >= MAX_KMD_PROP_NUM) {
+		dprintk(CVP_ERR, "Too many properties %d to set\n",
+			props->prop_num);
+		return -E2BIG;
+	}
+
 	prop_array = &arg->data.sys_properties.prop_data;
 	session_prop = &inst->prop;
 
@@ -1886,6 +2055,52 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
 		case CVP_KMD_PROP_SESSION_DSPMASK:
 			session_prop->dsp_mask = prop_array[i].data;
 			break;
+		case CVP_KMD_PROP_PWR_FDU:
+			session_prop->fdu_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_ICA:
+			session_prop->ica_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_OD:
+			session_prop->od_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_MPU:
+			session_prop->mpu_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FW:
+			session_prop->fw_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_DDR:
+			session_prop->ddr_bw = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_SYSCACHE:
+			session_prop->ddr_cache = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FDU_OP:
+			session_prop->fdu_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_ICA_OP:
+			session_prop->ica_op_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_OD_OP:
+			session_prop->od_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_MPU_OP:
+			session_prop->mpu_op_cycles = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_FW_OP:
+			session_prop->fw_op_cycles =
+				div_by_1dot5(prop_array[i].data);
+			break;
+		case CVP_KMD_PROP_PWR_DDR_OP:
+			session_prop->ddr_op_bw = prop_array[i].data;
+			break;
+		case CVP_KMD_PROP_PWR_SYSCACHE_OP:
+			session_prop->ddr_op_cache = prop_array[i].data;
+			break;
 		default:
 			dprintk(CVP_ERR,
 				"unrecognized sys property to set %d\n",
@@ -1936,6 +2151,11 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 		rc = msm_cvp_request_power(inst, power);
 		break;
 	}
+	case CVP_KMD_UPDATE_POWER:
+	{
+		rc = msm_cvp_update_power(inst);
+		break;
+	}
 	case CVP_KMD_REGISTER_BUFFER:
 	{
 		struct cvp_kmd_buffer *buf =
@@ -2047,10 +2267,8 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 		inst, hash32_ptr(inst->session));
 
 	session = (struct cvp_hal_session *)inst->session;
-	if (!session) {
-		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
-		return -EINVAL;
-	}
+	if (!session)
+		return rc;
 
 	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
 	if (rc)
@@ -2063,7 +2281,7 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 									cbuf);
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
 		list_del(&cbuf->list);
-		kmem_cache_free(inst->internal_buf_cache, cbuf);
+		kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	}
 	mutex_unlock(&inst->cvpcpubufs.lock);
 
@@ -2083,7 +2301,7 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 
 		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
 		list_del(&cbuf->list);
-		kmem_cache_free(inst->internal_buf_cache, cbuf);
+		kmem_cache_free(cvp_driver->internal_buf_cache, cbuf);
 	}
 	mutex_unlock(&inst->cvpdspbufs.lock);
 
@@ -2103,11 +2321,12 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 				buf->dbuf->name);
 
 			list_del(&frame_buf->list);
-			kmem_cache_free(inst->frame_buf_cache, frame_buf);
+			kmem_cache_free(cvp_driver->frame_buf_cache,
+					frame_buf);
 		}
 		mutex_unlock(&frame->bufs.lock);
 		DEINIT_MSM_CVP_LIST(&frame->bufs);
-		kmem_cache_free(inst->frame_cache, frame);
+		kmem_cache_free(cvp_driver->frame_cache, frame);
 	}
 	mutex_unlock(&inst->frames.lock);
 
@@ -2133,6 +2352,9 @@ int msm_cvp_session_init(struct msm_cvp_inst *inst)
 	inst->clk_data.sys_cache_bw = 1000;
 
 	inst->prop.type = HFI_SESSION_CV;
+	if (inst->session_type == MSM_CVP_KERNEL)
+		inst->prop.type = HFI_SESSION_DME;
+
 	inst->prop.kernel_mask = 0xFFFFFFFF;
 	inst->prop.priority = 0;
 	inst->prop.is_secure = 0;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 2fb9850..f5fa252 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -406,6 +406,54 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
 	return rc;
 }
 
+int wait_for_sess_signal_receipt_fence(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd)
+{
+	int rc = 0;
+	struct cvp_hfi_device *hdev;
+	int retry = FENCE_WAIT_SIGNAL_RETRY_TIMES;
+
+	if (!IS_HAL_SESSION_CMD(cmd)) {
+		dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+		return -EINVAL;
+	}
+	hdev = (struct cvp_hfi_device *)(inst->core->device);
+
+	while (retry) {
+		rc = wait_for_completion_timeout(
+			&inst->completions[SESSION_MSG_INDEX(cmd)],
+			msecs_to_jiffies(FENCE_WAIT_SIGNAL_TIMEOUT));
+		if (!rc) {
+			enum cvp_event_t event;
+			unsigned long flags = 0;
+
+		spin_lock_irqsave(&inst->event_handler.lock, flags);
+		event = inst->event_handler.event;
+		spin_unlock_irqrestore(
+			&inst->event_handler.lock, flags);
+		if (event == CVP_SSR_EVENT) {
+			dprintk(CVP_WARN, "%s: SSR triggered\n",
+				__func__);
+				return -ECONNRESET;
+		}
+			--retry;
+	} else {
+		rc = 0;
+			break;
+		}
+	}
+
+	if (!retry) {
+		dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
+				SESSION_MSG_INDEX(cmd));
+		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+		dump_hfi_queue(hdev->hfi_device_data);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
 static int wait_for_state(struct msm_cvp_inst *inst,
 	enum instance_state flipped_state,
 	enum instance_state desired_state,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index 99dd3fd..2322a98 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -41,6 +41,8 @@ void print_cvp_buffer(u32 tag, const char *str,
 		struct msm_cvp_internal_buffer *cbuf);
 int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
 	enum hal_command_response cmd);
+int wait_for_sess_signal_receipt_fence(struct msm_cvp_inst *inst,
+	enum hal_command_response cmd);
 int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
 int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
 void print_client_buffer(u32 tag, const char *str,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index cde1eaa..a4a86de 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -16,6 +16,7 @@
 #include "msm_cvp_clocks.h"
 #include <linux/dma-buf.h>
 #include <uapi/media/msm_media_info.h>
+#include <synx_api.h>
 
 #define MAX_EVENTS 30
 #define NUM_CYCLES16X16_HCD_FRAME 95
@@ -226,11 +227,6 @@ static int _init_session_queue(struct msm_cvp_inst *inst)
 	INIT_LIST_HEAD(&inst->session_queue.msgs);
 	inst->session_queue.msg_count = 0;
 	init_waitqueue_head(&inst->session_queue.wq);
-	inst->session_queue.msg_cache = KMEM_CACHE(cvp_session_msg, 0);
-	if (!inst->session_queue.msg_cache) {
-		dprintk(CVP_ERR, "Failed to allocate msg quque\n");
-		return -ENOMEM;
-	}
 	inst->session_queue.state = QUEUE_ACTIVE;
 	return 0;
 }
@@ -243,15 +239,13 @@ static void _deinit_session_queue(struct msm_cvp_inst *inst)
 	spin_lock(&inst->session_queue.lock);
 	list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
 		list_del_init(&msg->node);
-		kmem_cache_free(inst->session_queue.msg_cache, msg);
+		kmem_cache_free(cvp_driver->msg_cache, msg);
 	}
 	inst->session_queue.msg_count = 0;
 	inst->session_queue.state = QUEUE_STOP;
 	spin_unlock(&inst->session_queue.lock);
 
 	wake_up_all(&inst->session_queue.wq);
-
-	kmem_cache_destroy(inst->session_queue.msg_cache);
 }
 
 void *msm_cvp_open(int core_id, int session_type)
@@ -297,6 +291,7 @@ void *msm_cvp_open(int core_id, int session_type)
 	pr_info(CVP_DBG_TAG "Opening cvp instance: %pK\n", "info", inst);
 	mutex_init(&inst->sync_lock);
 	mutex_init(&inst->lock);
+	mutex_init(&inst->fence_lock);
 	spin_lock_init(&inst->event_handler.lock);
 
 	INIT_MSM_CVP_LIST(&inst->persistbufs);
@@ -308,6 +303,8 @@ void *msm_cvp_open(int core_id, int session_type)
 
 	kref_init(&inst->kref);
 
+	synx_initialize(NULL);
+
 	inst->session_type = session_type;
 	inst->state = MSM_CVP_CORE_UNINIT_DONE;
 	inst->core = core;
@@ -318,10 +315,6 @@ void *msm_cvp_open(int core_id, int session_type)
 	inst->clk_data.bitrate = 0;
 	inst->clk_data.core_id = 0;
 	inst->deprecate_bitmask = 0;
-	inst->fence_data_cache = KMEM_CACHE(msm_cvp_fence_thread_data, 0);
-	inst->frame_cache = KMEM_CACHE(msm_cvp_frame, 0);
-	inst->frame_buf_cache = KMEM_CACHE(msm_cvp_frame_buf, 0);
-	inst->internal_buf_cache = KMEM_CACHE(msm_cvp_internal_buffer, 0);
 
 	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
 		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
@@ -356,16 +349,14 @@ void *msm_cvp_open(int core_id, int session_type)
 	mutex_unlock(&core->lock);
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->lock);
+	mutex_destroy(&inst->fence_lock);
 
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpcpubufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
 	DEINIT_MSM_CVP_LIST(&inst->frames);
 
-	kmem_cache_destroy(inst->fence_data_cache);
-	kmem_cache_destroy(inst->frame_cache);
-	kmem_cache_destroy(inst->frame_buf_cache);
-	kmem_cache_destroy(inst->internal_buf_cache);
+	synx_uninitialize();
 
 	kfree(inst);
 	inst = NULL;
@@ -402,18 +393,16 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
 	list_del(&inst->list);
 	mutex_unlock(&core->lock);
 
+	synx_uninitialize();
+
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpcpubufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
 	DEINIT_MSM_CVP_LIST(&inst->frames);
 
-	kmem_cache_destroy(inst->fence_data_cache);
-	kmem_cache_destroy(inst->frame_cache);
-	kmem_cache_destroy(inst->frame_buf_cache);
-	kmem_cache_destroy(inst->internal_buf_cache);
-
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->lock);
+	mutex_destroy(&inst->fence_lock);
 
 	msm_cvp_debugfs_deinit_inst(inst);
 	_deinit_session_queue(inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_debug.c b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
index 063ef64..9d61d2ac 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_debug.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
@@ -6,7 +6,8 @@
 #define CREATE_TRACE_POINTS
 #define MAX_SSR_STRING_LEN 10
 #include "msm_cvp_debug.h"
-#include "cvp_hfi_api.h"
+#include "msm_cvp_common.h"
+#include "cvp_core_hfi.h"
 
 int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_FW;
 EXPORT_SYMBOL(msm_cvp_debug);
@@ -190,8 +191,6 @@ struct dentry *msm_cvp_debugfs_init_drv(void)
 	__debugfs_create(u32, "debug_output", &msm_cvp_debug_out) &&
 	__debugfs_create(bool, "disable_thermal_mitigation",
 			&msm_cvp_thermal_mitigation_disabled) &&
-	__debugfs_create(u32, "core_clock_voting",
-			&msm_cvp_clock_voting) &&
 	__debugfs_create(bool, "disable_cvp_syscache",
 			&msm_cvp_syscache_disable);
 
@@ -209,6 +208,62 @@ struct dentry *msm_cvp_debugfs_init_drv(void)
 	return NULL;
 }
 
+static int _clk_rate_set(void *data, u64 val)
+{
+	struct msm_cvp_core *core;
+	struct cvp_hfi_device *dev;
+	struct allowed_clock_rates_table *tbl = NULL;
+	unsigned int tbl_size, i;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	dev = core->device;
+	tbl = core->resources.allowed_clks_tbl;
+	tbl_size = core->resources.allowed_clks_tbl_size;
+
+	if (val == 0) {
+		struct iris_hfi_device *hdev = dev->hfi_device_data;
+
+		msm_cvp_clock_voting = 0;
+		call_hfi_op(dev, scale_clocks, hdev, hdev->clk_freq);
+		return 0;
+	}
+
+	for (i = 0; i < tbl_size; i++)
+		if (val <= tbl[i].clock_rate)
+			break;
+
+	if (i == tbl_size)
+		msm_cvp_clock_voting = tbl[tbl_size-1].clock_rate;
+	else
+		msm_cvp_clock_voting = tbl[i].clock_rate;
+
+	dprintk(CVP_WARN, "Override cvp_clk_rate with %d\n",
+			msm_cvp_clock_voting);
+
+	call_hfi_op(dev, scale_clocks, dev->hfi_device_data,
+		msm_cvp_clock_voting);
+
+	return 0;
+}
+
+static int _clk_rate_get(void *data, u64 *val)
+{
+	struct msm_cvp_core *core;
+	struct iris_hfi_device *hdev;
+
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	hdev = core->device->hfi_device_data;
+	if (msm_cvp_clock_voting)
+		*val = msm_cvp_clock_voting;
+	else
+		*val = hdev->clk_freq;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, _clk_rate_get, _clk_rate_set, "%llu\n");
+
+
 struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
 		struct dentry *parent)
 {
@@ -235,6 +290,12 @@ struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
 		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
 		goto failed_create_dir;
 	}
+	if (!debugfs_create_file("clock_rate", 0644, dir,
+			NULL, &clk_rate_fops)) {
+		dprintk(CVP_ERR, "debugfs_create_file: clock_rate fail\n");
+		goto failed_create_dir;
+	}
+
 failed_create_dir:
 	return dir;
 }
@@ -370,7 +431,7 @@ struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
 	info = debugfs_create_file("info", 0444, dir,
 			idata, &inst_info_fops);
 	if (!info) {
-		dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+		dprintk(CVP_ERR, "debugfs_create_file: info fail\n");
 		goto failed_create_file;
 	}
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 8218282..1eab89a 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -63,6 +63,8 @@ struct cvp_dsp_apps {
 	struct completion dereg_buffer_work;
 	struct completion shutdown_work;
 	struct completion cmdqueue_send_work;
+	struct work_struct ssr_work;
+	struct iris_hfi_device *device;
 };
 
 
@@ -88,6 +90,40 @@ static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 	return err;
 }
 
+void msm_cvp_cdsp_ssr_handler(struct work_struct *work)
+{
+	struct cvp_dsp_apps *me;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+	int err;
+
+	me = container_of(work, struct cvp_dsp_apps, ssr_work);
+	if (!me) {
+		dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+		return;
+	}
+
+	msg_ptr = cmd_msg.msg_ptr;
+	msg_ptr_len =  cmd_msg.msg_ptr_len;
+
+	err = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)msg_ptr,
+					msg_ptr_len,
+					(void *)NULL);
+	if (err) {
+		dprintk(CVP_ERR,
+			"%s: Failed to send HFI Queue address. err=%d\n",
+			__func__, err);
+		return;
+	}
+
+	if (me->device) {
+		mutex_lock(&me->device->lock);
+		me->device->dsp_flags |= DSP_INIT;
+		mutex_unlock(&me->device->lock);
+	}
+	dprintk(CVP_DBG, "%s: dsp recover from SSR successfully\n", __func__);
+}
+
 static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	int err = 0;
@@ -124,14 +160,7 @@ static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
 				__func__, err);
 			return err;
 		}
-		err = cvp_dsp_send_cmd_hfi_queue(
-			(phys_addr_t *)msg_ptr, msg_ptr_len);
-		if (err) {
-			dprintk(CVP_ERR,
-				"%s: Failed to send HFI Queue address. err=%d\n",
-				__func__, err);
-			goto bail;
-		}
+		schedule_work(&me->ssr_work);
 		mutex_lock(&me->smd_mutex);
 		cdsp_state = me->cdsp_state;
 		mutex_unlock(&me->smd_mutex);
@@ -148,9 +177,15 @@ static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
 {
 	struct cvp_dsp_apps *me = &gfa_cv;
 
+	cancel_work_sync(&me->ssr_work);
 	mutex_lock(&me->smd_mutex);
 	me->chan = NULL;
 	me->cdsp_state = STATUS_SSR;
+	if (me->device) {
+		mutex_lock(&me->device->lock);
+		me->device->dsp_flags &= ~DSP_INIT;
+		mutex_unlock(&me->device->lock);
+	}
 	mutex_unlock(&me->smd_mutex);
 	dprintk(CVP_INFO,
 		"%s: CDSP SSR triggered\n", __func__);
@@ -193,7 +228,8 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 }
 
 int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
-	uint32_t size_in_bytes)
+				uint32_t size_in_bytes,
+				struct iris_hfi_device *device)
 {
 	int err, timeout;
 	struct msm_cvp_core *core;
@@ -219,6 +255,7 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 	mutex_lock(&me->smd_mutex);
 	cmd_msg.msg_ptr = (uint64_t)phys_addr;
 	cmd_msg.msg_ptr_len = (size_in_bytes);
+	me->device = device;
 	mutex_unlock(&me->smd_mutex);
 
 	dprintk(CVP_DBG,
@@ -313,6 +350,15 @@ int cvp_dsp_resume(uint32_t session_flag)
 	return err;
 }
 
+void cvp_dsp_set_cvp_ssr(void)
+{
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	mutex_lock(&me->smd_mutex);
+	me->cvp_shutdown = STATUS_SSR;
+	mutex_unlock(&me->smd_mutex);
+}
+
 int cvp_dsp_shutdown(uint32_t session_flag)
 {
 	struct msm_cvp_core *core;
@@ -340,7 +386,6 @@ int cvp_dsp_shutdown(uint32_t session_flag)
 	}
 
 	mutex_lock(&me->smd_mutex);
-	me->cvp_shutdown = STATUS_SSR;
 	local_cmd_msg.msg_ptr = cmd_msg.msg_ptr;
 	local_cmd_msg.msg_ptr_len = cmd_msg.msg_ptr_len;
 	mutex_unlock(&me->smd_mutex);
@@ -489,6 +534,7 @@ static int __init cvp_dsp_device_init(void)
 	init_completion(&me->cmdqueue_send_work);
 	me->cvp_shutdown = STATUS_INIT;
 	me->cdsp_state = STATUS_INIT;
+	INIT_WORK(&me->ssr_work, msm_cvp_cdsp_ssr_handler);
 	err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
 	if (err) {
 		dprintk(CVP_ERR,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index 0fb3567..6cc14b2 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -8,6 +8,7 @@
 
 #include <linux/types.h>
 #include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
 
 #define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
 #define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
@@ -23,7 +24,7 @@
  * Size in bytes of command message queue
  */
 int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
-	uint32_t size_in_bytes);
+	uint32_t size_in_bytes, struct iris_hfi_device *device);
 
 /*
  * API for CVP driver to suspend CVP session during
@@ -53,6 +54,13 @@ int cvp_dsp_resume(uint32_t session_flag);
 int cvp_dsp_shutdown(uint32_t session_flag);
 
 /*
+ * API for CVP driver to set CVP status during
+ * cvp subsystem error.
+ *
+ */
+void cvp_dsp_set_cvp_ssr(void);
+
+/*
  * API to register iova buffer address with CDSP
  *
  * @session_id:     cvp session id
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index 139b322..9a5d136 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -29,6 +29,8 @@
 #define MAX_NAME_LENGTH 64
 #define MAX_DEBUGFS_NAME 50
 #define MAX_DSP_INIT_ATTEMPTS 16
+#define FENCE_WAIT_SIGNAL_TIMEOUT 100
+#define FENCE_WAIT_SIGNAL_RETRY_TIMES 20
 
 #define SYS_MSG_START HAL_SYS_INIT_DONE
 #define SYS_MSG_END HAL_SYS_ERROR
@@ -161,6 +163,11 @@ struct msm_cvp_drv {
 	struct dentry *debugfs_root;
 	int thermal_level;
 	u32 sku_version;
+	struct kmem_cache *msg_cache;
+	struct kmem_cache *fence_data_cache;
+	struct kmem_cache *frame_cache;
+	struct kmem_cache *frame_buf_cache;
+	struct kmem_cache *internal_buf_cache;
 };
 
 enum profiling_points {
@@ -250,7 +257,6 @@ struct cvp_session_queue {
 	unsigned int msg_count;
 	struct list_head msgs;
 	wait_queue_head_t wq;
-	struct kmem_cache *msg_cache;
 };
 
 struct cvp_session_prop {
@@ -259,6 +265,20 @@ struct cvp_session_prop {
 	u32 priority;
 	u32 is_secure;
 	u32 dsp_mask;
+	u32 fdu_cycles;
+	u32 od_cycles;
+	u32 mpu_cycles;
+	u32 ica_cycles;
+	u32 fw_cycles;
+	u32 fdu_op_cycles;
+	u32 od_op_cycles;
+	u32 mpu_op_cycles;
+	u32 ica_op_cycles;
+	u32 fw_op_cycles;
+	u32 ddr_bw;
+	u32 ddr_op_bw;
+	u32 ddr_cache;
+	u32 ddr_op_cache;
 };
 
 enum cvp_event_t {
@@ -329,11 +349,8 @@ struct msm_cvp_inst {
 	unsigned long deprecate_bitmask;
 	struct cvp_kmd_request_power power;
 	struct cvp_session_prop prop;
-	struct kmem_cache *fence_data_cache;
 	u32 cur_cmd_type;
-	struct kmem_cache *frame_cache;
-	struct kmem_cache *frame_buf_cache;
-	struct kmem_cache *internal_buf_cache;
+	struct mutex fence_lock;
 };
 
 struct msm_cvp_fence_thread_data {
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
index 45831db..8c37858 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
@@ -570,13 +570,27 @@ static int __init msm_cvp_init(void)
 		debugfs_remove_recursive(cvp_driver->debugfs_root);
 		kfree(cvp_driver);
 		cvp_driver = NULL;
+		return rc;
 	}
 
+	cvp_driver->msg_cache = KMEM_CACHE(cvp_session_msg, 0);
+	cvp_driver->fence_data_cache = KMEM_CACHE(msm_cvp_fence_thread_data, 0);
+	cvp_driver->frame_cache = KMEM_CACHE(msm_cvp_frame, 0);
+	cvp_driver->frame_buf_cache = KMEM_CACHE(msm_cvp_frame_buf, 0);
+	cvp_driver->internal_buf_cache = KMEM_CACHE(msm_cvp_internal_buffer, 0);
+
 	return rc;
 }
 
 static void __exit msm_cvp_exit(void)
 {
+
+	kmem_cache_destroy(cvp_driver->msg_cache);
+	kmem_cache_destroy(cvp_driver->fence_data_cache);
+	kmem_cache_destroy(cvp_driver->frame_cache);
+	kmem_cache_destroy(cvp_driver->frame_buf_cache);
+	kmem_cache_destroy(cvp_driver->internal_buf_cache);
+
 	platform_driver_unregister(&msm_cvp_driver);
 	debugfs_remove_recursive(cvp_driver->debugfs_root);
 	mutex_destroy(&cvp_driver->lock);
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 4deafae..b2ad749 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -423,6 +423,8 @@ static int convert_from_user(struct cvp_kmd_arg *kp,
 		}
 		break;
 	}
+	case CVP_KMD_UPDATE_POWER:
+		break;
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -594,6 +596,8 @@ static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
 	}
 	case CVP_KMD_SET_SYS_PROPERTY:
 		break;
+	case CVP_KMD_UPDATE_POWER:
+		break;
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
index 3226e9b..d08e998 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -77,7 +77,7 @@ static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
 }
 
 static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
-		const dmx_source_t *src)
+		const enum dmx_source_t *src)
 {
 	int ret = -EINVAL;
 
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index bafab3f..ff9a7bd 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -45,8 +45,11 @@
 #define NPU_MAX_DT_NAME_LEN	    21
 #define NPU_MAX_PWRLEVELS		8
 #define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_PATCH_NUM		160
 #define NPU_MAX_BW_DEVS			4
 
+#define PERF_MODE_DEFAULT 0
+
 enum npu_power_level {
 	NPU_PWRLEVEL_MINSVS = 0,
 	NPU_PWRLEVEL_LOWSVS,
@@ -163,6 +166,8 @@ struct npu_reg {
  * @uc_pwrlevel - power level from user driver setting
  * @perf_mode_override - perf mode from sysfs to override perf mode
  *                       settings from user driver
+ * @dcvs_mode - dcvs mode from sysfs to turn on dcvs mode
+ *              settings from user driver
  * @devbw - bw device
  */
 struct npu_pwrctrl {
@@ -182,6 +187,8 @@ struct npu_pwrctrl {
 	uint32_t cdsprm_pwrlevel;
 	uint32_t fmax_pwrlevel;
 	uint32_t perf_mode_override;
+	uint32_t dcvs_mode;
+	uint32_t cur_dcvs_activity;
 };
 
 /**
@@ -245,7 +252,7 @@ struct npu_device {
 	struct npu_io_data core_io;
 	struct npu_io_data tcm_io;
 	struct npu_io_data cc_io;
-	struct npu_io_data qdsp_io;
+	struct npu_io_data tcsr_io;
 	struct npu_io_data apss_shared_io;
 	struct npu_io_data qfprom_io;
 
@@ -329,5 +336,6 @@ void disable_fw(struct npu_device *npu_dev);
 int load_fw(struct npu_device *npu_dev);
 int unload_fw(struct npu_device *npu_dev);
 int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab);
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt);
 
 #endif /* _NPU_COMMON_H */
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 8653f38..51866c8 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -25,8 +25,7 @@
  */
 static int npu_debug_open(struct inode *inode, struct file *file);
 static int npu_debug_release(struct inode *inode, struct file *file);
-static ssize_t npu_debug_reg_write(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos);
+static int npu_debug_reg_release(struct inode *inode, struct file *file);
 static ssize_t npu_debug_reg_read(struct file *file,
 		char __user *user_buf, size_t count, loff_t *ppos);
 static ssize_t npu_debug_off_write(struct file *file,
@@ -42,13 +41,12 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
  * Variables
  * -------------------------------------------------------------------------
  */
-struct npu_device *g_npu_dev;
+static struct npu_device *g_npu_dev;
 
 static const struct file_operations npu_reg_fops = {
 	.open = npu_debug_open,
-	.release = npu_debug_release,
+	.release = npu_debug_reg_release,
 	.read = npu_debug_reg_read,
-	.write = npu_debug_reg_write,
 };
 
 static const struct file_operations npu_off_fops = {
@@ -86,6 +84,11 @@ static int npu_debug_open(struct inode *inode, struct file *file)
 
 static int npu_debug_release(struct inode *inode, struct file *file)
 {
+	return 0;
+}
+
+static int npu_debug_reg_release(struct inode *inode, struct file *file)
+{
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
@@ -101,41 +104,6 @@ static int npu_debug_release(struct inode *inode, struct file *file)
  * Function Implementations - Reg Read/Write
  * -------------------------------------------------------------------------
  */
-static ssize_t npu_debug_reg_write(struct file *file,
-		const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	size_t off;
-	uint32_t data, cnt;
-	struct npu_device *npu_dev = file->private_data;
-	char buf[24];
-
-	if (count >= sizeof(buf))
-		return -EINVAL;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	cnt = sscanf(buf, "%zx %x", &off, &data);
-	NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
-
-	return count;
-	if (cnt < 2)
-		return -EINVAL;
-
-	if (npu_enable_core_power(npu_dev))
-		return -EPERM;
-
-	REGW(npu_dev, off, data);
-
-	npu_disable_core_power(npu_dev);
-
-	NPU_DBG("write: addr=%zx data=%x\n", off, data);
-
-	return count;
-}
-
 static ssize_t npu_debug_reg_read(struct file *file,
 			char __user *user_buf, size_t count, loff_t *ppos)
 {
@@ -255,6 +223,7 @@ static ssize_t npu_debug_off_read(struct file *file,
 
 	len = scnprintf(buf, sizeof(buf), "offset=0x%08x cnt=%d\n",
 		debugfs->reg_off, debugfs->reg_cnt);
+	len = min(len, count);
 
 	if (copy_to_user(user_buf, buf, len)) {
 		NPU_ERR("failed to copy to user\n");
@@ -284,49 +253,21 @@ static ssize_t npu_debug_log_read(struct file *file,
 	mutex_lock(&debugfs->log_lock);
 
 	if (debugfs->log_num_bytes_buffered != 0) {
-		if ((debugfs->log_read_index +
-			debugfs->log_num_bytes_buffered) >
-			debugfs->log_buf_size) {
-			/* Wrap around case */
-			uint32_t remaining_to_end = debugfs->log_buf_size -
-				debugfs->log_read_index;
-			uint8_t *src_addr = debugfs->log_buf +
-				debugfs->log_read_index;
-			uint8_t *dst_addr = user_buf;
-
-			if (copy_to_user(dst_addr, src_addr,
-				remaining_to_end)) {
-				NPU_ERR("failed to copy to user\n");
-				mutex_unlock(&debugfs->log_lock);
-				return -EFAULT;
-			}
-			src_addr = debugfs->log_buf;
-			dst_addr = user_buf + remaining_to_end;
-			if (copy_to_user(dst_addr, src_addr,
-				debugfs->log_num_bytes_buffered -
-				remaining_to_end)) {
-				NPU_ERR("failed to copy to user\n");
-				mutex_unlock(&debugfs->log_lock);
-				return -EFAULT;
-			}
-			debugfs->log_read_index =
-				debugfs->log_num_bytes_buffered -
-				remaining_to_end;
-		} else {
-			if (copy_to_user(user_buf, (debugfs->log_buf +
-				debugfs->log_read_index),
-				debugfs->log_num_bytes_buffered)) {
-				NPU_ERR("failed to copy to user\n");
-				mutex_unlock(&debugfs->log_lock);
-				return -EFAULT;
-			}
-			debugfs->log_read_index +=
-				debugfs->log_num_bytes_buffered;
-			if (debugfs->log_read_index == debugfs->log_buf_size)
-				debugfs->log_read_index = 0;
+		len = min(debugfs->log_num_bytes_buffered,
+			debugfs->log_buf_size - debugfs->log_read_index);
+		len = min(count, len);
+		if (copy_to_user(user_buf, (debugfs->log_buf +
+			debugfs->log_read_index), len)) {
+			NPU_ERR("failed to copy to user\n");
+			mutex_unlock(&debugfs->log_lock);
+			return -EFAULT;
 		}
-		len = debugfs->log_num_bytes_buffered;
-		debugfs->log_num_bytes_buffered = 0;
+		debugfs->log_read_index += len;
+		if (debugfs->log_read_index == debugfs->log_buf_size)
+			debugfs->log_read_index = 0;
+
+		debugfs->log_num_bytes_buffered -= len;
+		*ppos += len;
 	}
 
 	/* mutex log unlock */
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 42f13e9..dedb341 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -30,7 +30,6 @@
 #define DDR_MAPPED_START_ADDR   0x80000000
 #define DDR_MAPPED_SIZE         0x60000000
 
-#define PERF_MODE_DEFAULT 0
 #define MBOX_OP_TIMEOUTMS 1000
 
 /* -------------------------------------------------------------------------
@@ -58,6 +57,12 @@ static ssize_t perf_mode_override_show(struct device *dev,
 static ssize_t perf_mode_override_store(struct device *dev,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count);
+static ssize_t dcvs_mode_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf);
+static ssize_t dcvs_mode_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count);
 static ssize_t boot_store(struct device *dev,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count);
@@ -85,6 +90,11 @@ static int npu_exec_network_v2(struct npu_client *client,
 	unsigned long arg);
 static int npu_receive_event(struct npu_client *client,
 	unsigned long arg);
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable);
+static int npu_set_property(struct npu_client *client,
+	unsigned long arg);
+static int npu_get_property(struct npu_client *client,
+	unsigned long arg);
 static long npu_ioctl(struct file *file, unsigned int cmd,
 					unsigned long arg);
 static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p);
@@ -155,11 +165,13 @@ static DEVICE_ATTR_RO(caps);
 static DEVICE_ATTR_RW(pwr);
 static DEVICE_ATTR_RW(perf_mode_override);
 static DEVICE_ATTR_WO(boot);
+static DEVICE_ATTR_RW(dcvs_mode);
 
 static struct attribute *npu_fs_attrs[] = {
 	&dev_attr_caps.attr,
 	&dev_attr_pwr.attr,
 	&dev_attr_perf_mode_override.attr,
+	&dev_attr_dcvs_mode.attr,
 	&dev_attr_boot.attr,
 	NULL
 };
@@ -278,8 +290,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
+	struct npu_client client;
 	struct npu_device *npu_dev = dev_get_drvdata(dev);
-	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	uint32_t val;
 	int rc;
 
@@ -290,15 +302,56 @@ static ssize_t perf_mode_override_store(struct device *dev,
 	}
 
 	val = min(val, npu_dev->pwrctrl.num_pwrlevels);
-	mutex_lock(&host_ctx->lock);
-	npu_dev->pwrctrl.perf_mode_override = val;
-	NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
-	npu_set_power_level(npu_dev, true);
-	mutex_unlock(&host_ctx->lock);
+	NPU_INFO("setting perf mode to %d\n", val);
+	client.npu_dev = npu_dev;
+	npu_host_set_perf_mode(&client, 0, val);
 
 	return count;
 }
 
+static ssize_t dcvs_mode_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct npu_device *npu_dev = dev_get_drvdata(dev);
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->dcvs_mode);
+}
+
+static ssize_t dcvs_mode_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count)
+{
+	struct npu_device *npu_dev = dev_get_drvdata(dev);
+	struct msm_npu_property prop;
+	uint32_t val;
+	int ret = 0;
+
+	ret = kstrtou32(buf, 10, &val);
+	if (ret) {
+		NPU_ERR("Invalid input for dcvs mode setting\n");
+		return -EINVAL;
+	}
+
+	val = min(val, (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+	NPU_DBG("sysfs: setting dcvs_mode to %d\n", val);
+
+	prop.prop_id = MSM_NPU_PROP_ID_DCVS_MODE;
+	prop.num_of_params = 1;
+	prop.network_hdl = 0;
+	prop.prop_param[0] = val;
+
+	ret = npu_host_set_fw_property(npu_dev, &prop);
+	if (ret) {
+		NPU_ERR("npu_host_set_fw_property failed %d\n", ret);
+		return ret;
+	}
+
+	npu_dev->pwrctrl.dcvs_mode = val;
+
+	return count;
+}
 /* -------------------------------------------------------------------------
  * SysFS - npu_boot
  * -------------------------------------------------------------------------
@@ -341,20 +394,20 @@ int npu_enable_core_power(struct npu_device *npu_dev)
 	mutex_lock(&npu_dev->dev_lock);
 	NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
 	if (!pwr->pwr_vote_num) {
-		ret = npu_enable_regulators(npu_dev);
+		ret = npu_set_bw(npu_dev, 100, 100);
 		if (ret)
 			goto fail;
 
-		ret = npu_set_bw(npu_dev, 100, 100);
+		ret = npu_enable_regulators(npu_dev);
 		if (ret) {
-			npu_disable_regulators(npu_dev);
+			npu_set_bw(npu_dev, 0, 0);
 			goto fail;
 		}
 
 		ret = npu_enable_core_clocks(npu_dev);
 		if (ret) {
-			npu_set_bw(npu_dev, 0, 0);
 			npu_disable_regulators(npu_dev);
+			npu_set_bw(npu_dev, 0, 0);
 			goto fail;
 		}
 		npu_resume_devbw(npu_dev);
@@ -381,11 +434,12 @@ void npu_disable_core_power(struct npu_device *npu_dev)
 	if (!pwr->pwr_vote_num) {
 		npu_suspend_devbw(npu_dev);
 		npu_disable_core_clocks(npu_dev);
-		npu_set_bw(npu_dev, 0, 0);
 		npu_disable_regulators(npu_dev);
+		npu_set_bw(npu_dev, 0, 0);
 		pwr->active_pwrlevel = pwr->default_pwrlevel;
 		pwr->uc_pwrlevel = pwr->max_pwrlevel;
 		pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+		pwr->cur_dcvs_activity = pwr->num_pwrlevels;
 		NPU_DBG("setting back to power level=%d\n",
 			pwr->active_pwrlevel);
 	}
@@ -446,14 +500,6 @@ static uint32_t npu_calc_power_level(struct npu_device *npu_dev)
 	uint32_t uc_pwr_level = npu_dev->pwrctrl.uc_pwrlevel;
 
 	/*
-	 * if perf_mode_override is not 0, use it to override
-	 * uc_pwrlevel
-	 */
-	if (npu_dev->pwrctrl.perf_mode_override > 0)
-		uc_pwr_level = npu_power_level_from_index(npu_dev,
-			npu_dev->pwrctrl.perf_mode_override - 1);
-
-	/*
 	 * pick the lowese power level between thermal power and usecase power
 	 * settings
 	 */
@@ -559,11 +605,8 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 	uint32_t uc_pwrlevel_to_set;
 
-	if (perf_mode == PERF_MODE_DEFAULT)
-		uc_pwrlevel_to_set = pwr->default_pwrlevel;
-	else
-		uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
-			perf_mode - 1);
+	uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
+		perf_mode - 1);
 
 	if (uc_pwrlevel_to_set > pwr->max_pwrlevel)
 		uc_pwrlevel_to_set = pwr->max_pwrlevel;
@@ -1142,9 +1185,10 @@ static int npu_load_network_v2(struct npu_client *client,
 		return -EFAULT;
 	}
 
-	if (req.patch_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
+	if ((req.patch_info_num > NPU_MAX_PATCH_NUM) ||
+		(req.patch_info_num == 0)) {
 		NPU_ERR("Invalid patch info num %d[max:%d]\n",
-			req.patch_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
+			req.patch_info_num, NPU_MAX_PATCH_NUM);
 		return -EINVAL;
 	}
 
@@ -1229,9 +1273,10 @@ static int npu_exec_network_v2(struct npu_client *client,
 		return -EFAULT;
 	}
 
-	if (req.patch_buf_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
+	if ((req.patch_buf_info_num > NPU_MAX_PATCH_NUM) ||
+		(req.patch_buf_info_num == 0)) {
 		NPU_ERR("Invalid patch buf info num %d[max:%d]\n",
-			req.patch_buf_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
+			req.patch_buf_info_num, NPU_MAX_PATCH_NUM);
 		return -EINVAL;
 	}
 
@@ -1274,28 +1319,6 @@ static int npu_exec_network_v2(struct npu_client *client,
 	return ret;
 }
 
-static int npu_process_kevent(struct npu_kevent *kevt)
-{
-	int ret = 0;
-
-	switch (kevt->evt.type) {
-	case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
-		ret = copy_to_user((void __user *)kevt->reserved[1],
-			(void *)&kevt->reserved[0],
-			kevt->evt.u.exec_v2_done.stats_buf_size);
-		if (ret) {
-			NPU_ERR("fail to copy to user\n");
-			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
-			ret = -EFAULT;
-		}
-		break;
-	default:
-		break;
-	}
-
-	return ret;
-}
-
 static int npu_receive_event(struct npu_client *client,
 	unsigned long arg)
 {
@@ -1311,7 +1334,7 @@ static int npu_receive_event(struct npu_client *client,
 		kevt = list_first_entry(&client->evt_list,
 			struct npu_kevent, list);
 		list_del(&kevt->list);
-		npu_process_kevent(kevt);
+		npu_process_kevent(client, kevt);
 		ret = copy_to_user(argp, &kevt->evt,
 			sizeof(struct msm_npu_event));
 		if (ret) {
@@ -1325,6 +1348,43 @@ static int npu_receive_event(struct npu_client *client,
 	return ret;
 }
 
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable)
+{
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int rc = 0;
+
+	if (host_ctx->network_num > 0) {
+		NPU_ERR("Need to unload network first\n");
+		mutex_unlock(&npu_dev->dev_lock);
+		return -EINVAL;
+	}
+
+	if (enable) {
+		NPU_DBG("enable fw\n");
+		rc = enable_fw(npu_dev);
+		if (rc) {
+			NPU_ERR("enable fw failed\n");
+		} else {
+			host_ctx->npu_init_cnt++;
+			NPU_DBG("npu_init_cnt %d\n",
+				host_ctx->npu_init_cnt);
+			/* set npu to lowest power level */
+			if (npu_set_uc_power_level(npu_dev, 1))
+				NPU_WARN("Failed to set uc power level\n");
+		}
+	} else if (host_ctx->npu_init_cnt > 0) {
+		NPU_DBG("disable fw\n");
+		disable_fw(npu_dev);
+		host_ctx->npu_init_cnt--;
+		NPU_DBG("npu_init_cnt %d\n", host_ctx->npu_init_cnt);
+	} else {
+		NPU_ERR("can't disable fw %d\n", host_ctx->npu_init_cnt);
+	}
+
+	return rc;
+}
+
 static int npu_set_property(struct npu_client *client,
 	unsigned long arg)
 {
@@ -1339,9 +1399,19 @@ static int npu_set_property(struct npu_client *client,
 	}
 
 	switch (prop.prop_id) {
+	case MSM_NPU_PROP_ID_FW_STATE:
+		ret = npu_set_fw_state(client,
+			(uint32_t)prop.prop_param[0]);
+		break;
+	case MSM_NPU_PROP_ID_PERF_MODE:
+		ret = npu_host_set_perf_mode(client,
+			(uint32_t)prop.network_hdl,
+			(uint32_t)prop.prop_param[0]);
+		break;
 	default:
-		NPU_ERR("Not supported property %d\n", prop.prop_id);
-		ret = -EINVAL;
+		ret = npu_host_set_fw_property(client->npu_dev, &prop);
+		if (ret)
+			NPU_ERR("npu_host_set_fw_property failed\n");
 		break;
 	}
 
@@ -1367,6 +1437,10 @@ static int npu_get_property(struct npu_client *client,
 	case MSM_NPU_PROP_ID_FW_STATE:
 		prop.prop_param[0] = host_ctx->fw_state;
 		break;
+	case MSM_NPU_PROP_ID_PERF_MODE:
+		prop.prop_param[0] = npu_host_get_perf_mode(client,
+			(uint32_t)prop.network_hdl);
+		break;
 	case MSM_NPU_PROP_ID_PERF_MODE_MAX:
 		prop.prop_param[0] = npu_dev->pwrctrl.num_pwrlevels;
 		break;
@@ -1376,14 +1450,33 @@ static int npu_get_property(struct npu_client *client,
 	case MSM_NPU_PROP_ID_HARDWARE_VERSION:
 		prop.prop_param[0] = npu_dev->hw_version;
 		break;
+	case MSM_NPU_PROP_ID_IPC_QUEUE_INFO:
+		ret = npu_host_get_ipc_queue_size(npu_dev,
+			prop.prop_param[0]);
+		if (ret < 0) {
+			NPU_ERR("Can't get ipc queue %d size",
+				prop.prop_param[0]);
+			return ret;
+		}
+
+		prop.prop_param[1] = ret;
+		break;
+	case MSM_NPU_PROP_ID_DRV_FEATURE:
+		prop.prop_param[0] = MSM_NPU_FEATURE_MULTI_EXECUTE |
+			MSM_NPU_FEATURE_ASYNC_EXECUTE;
+		break;
 	default:
-		NPU_ERR("Not supported property %d\n", prop.prop_id);
-		return -EINVAL;
+		ret = npu_host_get_fw_property(client->npu_dev, &prop);
+		if (ret) {
+			NPU_ERR("npu_host_set_fw_property failed\n");
+			return ret;
+		}
+		break;
 	}
 
 	ret = copy_to_user(argp, &prop, sizeof(prop));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 
@@ -1743,6 +1836,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
 	pwr->uc_pwrlevel = pwr->max_pwrlevel;
 	pwr->perf_mode_override = 0;
 	pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+	pwr->cur_dcvs_activity = pwr->num_pwrlevels;
 
 	return 0;
 }
@@ -2113,6 +2207,7 @@ static int npu_probe(struct platform_device *pdev)
 		return -EFAULT;
 
 	npu_dev->pdev = pdev;
+	mutex_init(&npu_dev->dev_lock);
 
 	platform_set_drvdata(pdev, npu_dev);
 	res = platform_get_resource_byname(pdev,
@@ -2173,23 +2268,23 @@ static int npu_probe(struct platform_device *pdev)
 		res->start, npu_dev->cc_io.base);
 
 	res = platform_get_resource_byname(pdev,
-		IORESOURCE_MEM, "qdsp");
+		IORESOURCE_MEM, "tcsr");
 	if (!res) {
-		NPU_ERR("unable to get qdsp resource\n");
+		NPU_ERR("unable to get tcsr_mutex resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
-	npu_dev->qdsp_io.size = resource_size(res);
-	npu_dev->qdsp_io.phy_addr = res->start;
-	npu_dev->qdsp_io.base = devm_ioremap(&pdev->dev, res->start,
-					npu_dev->qdsp_io.size);
-	if (unlikely(!npu_dev->qdsp_io.base)) {
-		NPU_ERR("unable to map qdsp\n");
+	npu_dev->tcsr_io.size = resource_size(res);
+	npu_dev->tcsr_io.phy_addr = res->start;
+	npu_dev->tcsr_io.base = devm_ioremap(&pdev->dev, res->start,
+					npu_dev->tcsr_io.size);
+	if (unlikely(!npu_dev->tcsr_io.base)) {
+		NPU_ERR("unable to map tcsr\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	NPU_DBG("qdsp phy address=0x%llx virt=%pK\n",
-		res->start, npu_dev->qdsp_io.base);
+	NPU_DBG("tcsr phy address=0x%llx virt=%pK\n",
+		res->start, npu_dev->tcsr_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "apss_shared");
@@ -2302,8 +2397,6 @@ static int npu_probe(struct platform_device *pdev)
 	if (rc)
 		goto error_driver_init;
 
-	mutex_init(&npu_dev->dev_lock);
-
 	rc = npu_host_init(npu_dev);
 	if (rc) {
 		NPU_ERR("unable to init host\n");
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 82a819d..573f272 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -38,7 +38,7 @@ static const struct npu_queue_tuple npu_q_setup[6] = {
 	{ 4096, IPC_QUEUE_APPS_EXEC         | TX_HDR_TYPE | RX_HDR_TYPE },
 	{ 4096, IPC_QUEUE_DSP_EXEC          | TX_HDR_TYPE | RX_HDR_TYPE },
 	{ 4096, IPC_QUEUE_APPS_RSP          | TX_HDR_TYPE | RX_HDR_TYPE },
-	{ 1024, IPC_QUEUE_DSP_RSP           | TX_HDR_TYPE | RX_HDR_TYPE },
+	{ 4096, IPC_QUEUE_DSP_RSP           | TX_HDR_TYPE | RX_HDR_TYPE },
 	{ 1024, IPC_QUEUE_LOG               | TX_HDR_TYPE | RX_HDR_TYPE },
 };
 
@@ -236,6 +236,13 @@ static int ipc_queue_read(struct npu_device *npu_dev,
 		status = -EPERM;
 		goto exit;
 	}
+
+	if (packet_size > NPU_IPC_BUF_LENGTH) {
+		NPU_ERR("Invalid packet size %d\n", packet_size);
+		status = -EINVAL;
+		goto exit;
+	}
+
 	new_read_idx = queue.qhdr_read_idx + packet_size;
 
 	if (new_read_idx < (queue.qhdr_q_size)) {
@@ -404,3 +411,13 @@ int npu_host_ipc_post_init(struct npu_device *npu_dev)
 {
 	return 0;
 }
+
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx)
+{
+	if (q_idx >= ARRAY_SIZE(npu_q_setup)) {
+		NPU_ERR("Invalid ipc queue index %d\n", q_idx);
+		return -EINVAL;
+	}
+
+	return npu_q_setup[q_idx].size;
+}
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.h b/drivers/media/platform/msm/npu/npu_host_ipc.h
index 991d769..2a336c2 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.h
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.h
@@ -33,6 +33,10 @@
 #define NPU_IPC_CMD_EXECUTE_V2          0x0000000A
 /* npu_ipc_cmd_notify_pwr_packet_t */
 #define NPU_IPC_CMD_NOTIFY_PWR          0x0000000B
+/* ipc_cmd_set_property_packet */
+#define NPU_IPC_CMD_SET_PROPERTY        0x0000000C
+/* ipc_cmd_get_property_packet */
+#define NPU_IPC_CMD_GET_PROPERTY        0x0000000D
 
 /* Messages sent **from** NPU */
 /* IPC Message Response -- uint32_t */
@@ -52,6 +56,15 @@
 #define NPU_IPC_MSG_EXECUTE_V2_DONE     0x00010006
 /* struct ipc_msg_notify_pwr_pkt */
 #define NPU_IPC_MSG_NOTIFY_PWR_DONE     0x00010007
+/* ipc_msg_set_property_packet */
+#define NPU_IPC_MSG_SET_PROPERTY_DONE   0x00010008
+/* ipc_msg_get_property_packet */
+#define NPU_IPC_MSG_GET_PROPERTY_DONE   0x00010009
+/* ipc_msg_general_notify_pkt */
+#define NPU_IPC_MSG_GENERAL_NOTIFY      0x00010010
+
+/* IPC Notify Message Type -- uint32_t */
+#define NPU_NOTIFY_DCVS_MODE            0x00002000
 
 /* Logging message size */
 /* Number 32-bit elements for the maximum log message size */
@@ -100,6 +113,9 @@
 /* Debug stats */
 #define NUM_LAYER_STATS_PER_EXE_MSG_MAX 110
 
+/* DCVS */
+#define NPU_DCVS_ACTIVITY_MAX_PERF 0x100
+
 /* -------------------------------------------------------------------------
  * Data Structures
  * -------------------------------------------------------------------------
@@ -269,6 +285,40 @@ struct ipc_cmd_loopback_pkt {
 };
 
 /*
+ * Generic property definition
+ */
+struct ipc_cmd_prop_pkt {
+	struct ipc_cmd_header_pkt header;
+	uint32_t prop_id;
+	uint32_t num_params;
+	uint32_t network_hdl;
+	uint32_t prop_param[0];
+};
+
+/*
+ * Generic property response packet definition
+ */
+struct ipc_msg_prop_pkt {
+	struct ipc_msg_header_pkt header;
+	uint32_t prop_id;
+	uint32_t num_params;
+	uint32_t network_hdl;
+	uint32_t prop_param[0];
+};
+
+/*
+ * Generic notify message packet definition
+ */
+struct ipc_msg_general_notify_pkt {
+	struct ipc_msg_header_pkt header;
+	uint32_t notify_id;
+	uint32_t num_params;
+	uint32_t network_hdl;
+	uint32_t notify_param[0];
+};
+
+
+/*
  * LOAD response packet definition
  */
 struct ipc_msg_load_pkt {
diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h
index 8fc82d1..07178b6 100644
--- a/drivers/media/platform/msm/npu/npu_hw.h
+++ b/drivers/media/platform/msm/npu/npu_hw.h
@@ -72,4 +72,7 @@
 #define NPU_CC_NPU_MASTERn_WDOG_BITE_IRQ_OWNER(n) (0x0006010+4*(n))
 #define NPU_CC_NPU_MASTERn_WDOG_BITE_IRQ_STATUS(n) (0x00009030+0x1000*(n))
 
+#define TCSR_NPU_CPC_PWR_ON (0x0003700C)
+#define NPU_CPC_PWR_ON (1 << 0)
+
 #endif /* NPU_HW_H */
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index 20f99eb..cef5a96 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -20,73 +20,93 @@
  * Functions - Register
  * -------------------------------------------------------------------------
  */
+static uint32_t npu_reg_read(void __iomem *base, size_t size, uint32_t off)
+{
+	if (!base) {
+		NPU_ERR("NULL base address\n");
+		return 0;
+	}
+
+	if ((off % 4) != 0) {
+		NPU_ERR("offset %x is not aligned\n", off);
+		return 0;
+	}
+
+	if (off >= size) {
+		NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+		return 0;
+	}
+
+	return readl_relaxed(base + off);
+}
+
+static void npu_reg_write(void __iomem *base, size_t size, uint32_t off,
+	uint32_t val)
+{
+	if (!base) {
+		NPU_ERR("NULL base address\n");
+		return;
+	}
+
+	if ((off % 4) != 0) {
+		NPU_ERR("offset %x is not aligned\n", off);
+		return;
+	}
+
+	if (off >= size) {
+		NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+		return;
+	}
+
+	writel_relaxed(val, base + off);
+	__iowmb();
+}
+
 uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
-	uint32_t ret = 0;
-
-	ret = readl(npu_dev->core_io.base + off);
-	return ret;
+	return npu_reg_read(npu_dev->core_io.base, npu_dev->core_io.size, off);
 }
 
 void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
 {
-	writel_relaxed(val, npu_dev->core_io.base + off);
-	__iowmb();
+	npu_reg_write(npu_dev->core_io.base, npu_dev->core_io.size,
+		off, val);
 }
 
-uint32_t npu_qdsp_reg_read(struct npu_device *npu_dev, uint32_t off)
+uint32_t npu_tcsr_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
-	uint32_t ret = 0;
-
-	ret = readl(npu_dev->qdsp_io.base + off);
-	return ret;
-}
-
-void npu_qdsp_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
-{
-	writel_relaxed(val, npu_dev->qdsp_io.base + off);
-	__iowmb();
+	return npu_reg_read(npu_dev->tcsr_io.base, npu_dev->tcsr_io.size, off);
 }
 
 uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
-	uint32_t ret = 0;
-
-	ret = readl(npu_dev->apss_shared_io.base + off);
-	return ret;
+	return npu_reg_read(npu_dev->apss_shared_io.base,
+		npu_dev->apss_shared_io.size, off);
 }
 
 void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
 	uint32_t val)
 {
-	writel_relaxed(val, npu_dev->apss_shared_io.base + off);
-	__iowmb();
+	npu_reg_write(npu_dev->apss_shared_io.base,
+		npu_dev->apss_shared_io.size, off, val);
 }
 
 uint32_t npu_cc_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
-	uint32_t ret = 0;
-
-	ret = readl_relaxed(npu_dev->cc_io.base + off);
-
-	return ret;
+	return npu_reg_read(npu_dev->cc_io.base, npu_dev->cc_io.size, off);
 }
 
 void npu_cc_reg_write(struct npu_device *npu_dev, uint32_t off,
 	uint32_t val)
 {
-	writel_relaxed(val, npu_dev->cc_io.base + off);
-	__iowmb();
+	npu_reg_write(npu_dev->cc_io.base, npu_dev->cc_io.size,
+		off, val);
 }
 
 uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
-	uint32_t ret = 0;
-
-	if (npu_dev->qfprom_io.base)
-		ret = readl(npu_dev->qfprom_io.base + off);
-
-	return ret;
+	return npu_reg_read(npu_dev->qfprom_io.base,
+		npu_dev->qfprom_io.size, off);
 }
 
 /* -------------------------------------------------------------------------
@@ -102,6 +122,13 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
 	uint32_t i = 0;
 	uint32_t num = 0;
 
+	if (dst_off >= npu_dev->tcm_io.size ||
+		(npu_dev->tcm_io.size - dst_off) < size) {
+		NPU_ERR("memory write exceeds io region %x:%x:%x\n",
+			dst_off, size, npu_dev->tcm_io.size);
+		return;
+	}
+
 	num = size/4;
 	for (i = 0; i < num; i++) {
 		writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -128,6 +155,13 @@ int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
 	uint32_t i = 0;
 	uint32_t num = 0;
 
+	if (src_off >= npu_dev->tcm_io.size ||
+		(npu_dev->tcm_io.size - src_off) < size) {
+		NPU_ERR("memory read exceeds io region %x:%x:%x\n",
+			src_off, size, npu_dev->tcm_io.size);
+		return 0;
+	}
+
 	num = size/4;
 	for (i = 0; i < num; i++) {
 		out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off);
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h
index c6e4955..4e988b8 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu/npu_hw_access.h
@@ -50,8 +50,7 @@ typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr);
  */
 uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off);
 void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
-uint32_t npu_qdsp_reg_read(struct npu_device *npu_dev, uint32_t off);
-void npu_qdsp_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
+uint32_t npu_tcsr_reg_read(struct npu_device *npu_dev, uint32_t off);
 uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off);
 void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
 	uint32_t val);
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 7a0ad42..7e2aff9 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -25,7 +25,7 @@
 #define LOG_MSG_TOTAL_SIZE_INDEX 0
 #define LOG_MSG_MSG_ID_INDEX     1
 
-#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS  20
+#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS  10
 #define NPU_FW_TIMEOUT_MS                5000
 
 /* -------------------------------------------------------------------------
@@ -39,7 +39,8 @@ static void npu_disable_fw_work(struct work_struct *work);
 static void npu_update_pwr_work(struct work_struct *work);
 static void turn_off_fw_logging(struct npu_device *npu_dev);
 static int wait_for_status_ready(struct npu_device *npu_dev,
-	uint32_t status_reg, uint32_t status_bits);
+	uint32_t status_reg, uint32_t status_bits, bool poll);
+static int wait_npu_cpc_power_off(struct npu_device *npu_dev);
 static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
 	struct npu_client *client);
 static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx,
@@ -56,20 +57,67 @@ static void host_session_msg_hdlr(struct npu_device *npu_dev);
 static void host_session_log_hdlr(struct npu_device *npu_dev);
 static int host_error_hdlr(struct npu_device *npu_dev, bool force);
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async, bool force);
+	struct npu_network *network, void *cmd_ptr,
+	struct npu_network_cmd *cmd);
 static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
-	void *cmd_ptr);
+	void *cmd_ptr, struct npu_misc_cmd *cmd);
 static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt);
 static int npu_notify_aop(struct npu_device *npu_dev, bool on);
 static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	uint32_t pwr_level, bool post);
 static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
 static void disable_fw_nolock(struct npu_device *npu_dev);
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
+static void npu_queue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd);
+static void npu_dequeue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd);
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+	uint32_t trans_id);
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+	uint32_t stats_buf_size);
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+	struct npu_network_cmd *cmd);
+static struct npu_misc_cmd *npu_alloc_misc_cmd(struct npu_host_ctx *ctx);
+static void npu_free_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd);
+static void npu_queue_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd);
+static void npu_dequeue_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd);
+static struct npu_misc_cmd *npu_find_misc_cmd(struct npu_host_ctx *ctx,
+	uint32_t trans_id);
 
 /* -------------------------------------------------------------------------
  * Function Definitions - Init / Deinit
  * -------------------------------------------------------------------------
  */
+
+static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
+{
+	uint32_t reg_val = NPU_CPC_PWR_ON;
+	uint32_t wait_cnt = 0, max_wait_ms;
+
+	max_wait_ms = NPU_FW_TIMEOUT_MS;
+
+	do {
+		reg_val = npu_tcsr_reg_read(npu_dev, TCSR_NPU_CPC_PWR_ON);
+		if (!(reg_val & NPU_CPC_PWR_ON)) {
+			NPU_DBG("npu cpc powers off\n");
+			break;
+		}
+
+		wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
+		if (wait_cnt > max_wait_ms) {
+			NPU_ERR("timeout wait for cpc power off\n");
+			return -EPERM;
+		}
+		msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
+	} while (1);
+
+	return 0;
+}
+
 static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
@@ -100,7 +148,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 
 	/* Keep reading ctrl status until NPU is ready */
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
-		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
 		ret = -EPERM;
 		goto load_fw_fail;
 	}
@@ -138,7 +186,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 		NPU_ERR("Wait for fw shutdown timedout\n");
 		ret = -ETIMEDOUT;
 	} else {
-		ret = 0;
+		ret = wait_npu_cpc_power_off(npu_dev);
 	}
 
 load_fw_fail:
@@ -284,7 +332,7 @@ static int enable_fw_nolock(struct npu_device *npu_dev)
 		goto notify_fw_pwr_fail;
 	}
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&host_ctx->fw_bringup_done, NW_CMD_TIMEOUT);
 	if (!ret) {
 		NPU_ERR("Wait for fw bringup timedout\n");
@@ -333,6 +381,7 @@ int enable_fw(struct npu_device *npu_dev)
 static void disable_fw_nolock(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret = 0;
 
 	if (!host_ctx->fw_ref_cnt) {
 		NPU_WARN("fw_ref_cnt is 0\n");
@@ -358,10 +407,14 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
 		msleep(500);
 	}
 
-	if (!host_ctx->auto_pil_disable
-		&& !wait_for_completion_interruptible_timeout(
-		&host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT))
-		NPU_ERR("Wait for fw shutdown timedout\n");
+	if (!host_ctx->auto_pil_disable) {
+		ret = wait_for_completion_timeout(
+			&host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT);
+		if (!ret)
+			NPU_ERR("Wait for fw shutdown timedout\n");
+		else
+			ret = wait_npu_cpc_power_off(npu_dev);
+	}
 
 	npu_disable_irq(npu_dev);
 	npu_disable_sys_cache(npu_dev);
@@ -396,6 +449,7 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	struct ipc_cmd_notify_pwr_pkt pwr_notify_pkt;
 	int ret = 0;
 	uint32_t reg_val;
+	struct npu_misc_cmd *misc_cmd = NULL;
 
 	/* Clear PWR_NOTIFY bits before sending cmd */
 	reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
@@ -420,14 +474,23 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	pwr_notify_pkt.notify_type = post ?
 		NPU_POWER_POST_NOTIFY : NPU_POWER_PRE_NOTIFY;
 
+	misc_cmd = npu_alloc_misc_cmd(host_ctx);
+	if (!misc_cmd) {
+		NPU_ERR("Can't allocate misc_cmd\n");
+		return -ENOMEM;
+	}
+
+	misc_cmd->cmd_type = NPU_IPC_CMD_NOTIFY_PWR;
+	misc_cmd->trans_id = pwr_notify_pkt.header.trans_id;
+
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
-		&pwr_notify_pkt);
+		&pwr_notify_pkt, misc_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_NOTIFY_PWR sent failed: %d\n", ret);
 	} else {
 		ret = wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
-			FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL);
+			FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL, true);
 		if (!ret) {
 			reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
 			if (reg_val & FW_CTRL_STATUS_PWR_NOTIFY_ERR_VAL) {
@@ -435,11 +498,10 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 				ret = -EPERM;
 			}
 		}
-
-		/* allow to send another misc cmd if timedout */
-		host_ctx->misc_cmd_pending = false;
 	}
 
+	npu_free_misc_cmd(host_ctx, misc_cmd);
+
 	return ret;
 }
 
@@ -570,11 +632,10 @@ int npu_host_update_power(struct npu_device *npu_dev)
 
 int npu_host_init(struct npu_device *npu_dev)
 {
-	int sts = 0;
+	int ret = 0;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
 	memset(host_ctx, 0, sizeof(*host_ctx));
-	init_completion(&host_ctx->misc_cmd_done);
 	init_completion(&host_ctx->fw_deinit_done);
 	init_completion(&host_ctx->fw_bringup_done);
 	init_completion(&host_ctx->fw_shutdown_done);
@@ -589,13 +650,17 @@ int npu_host_init(struct npu_device *npu_dev)
 		&host_ctx->nb);
 	if (IS_ERR(host_ctx->notif_hdle)) {
 		NPU_ERR("register event notification failed\n");
-		sts = PTR_ERR(host_ctx->notif_hdle);
-		return sts;
+		ret = PTR_ERR(host_ctx->notif_hdle);
+		host_ctx->notif_hdle = NULL;
+		goto fail;
 	}
 
-	host_ctx->wq = create_workqueue("npu_irq_hdl");
-	if (!host_ctx->wq) {
-		sts = -EPERM;
+	host_ctx->wq = create_workqueue("npu_general_wq");
+	host_ctx->wq_pri =
+		alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
+	if (!host_ctx->wq || !host_ctx->wq_pri) {
+		ret = -EPERM;
+		goto fail;
 	} else {
 		INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
 		INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
@@ -606,16 +671,70 @@ int npu_host_init(struct npu_device *npu_dev)
 			npu_disable_fw_work);
 	}
 
+	host_ctx->network_cmd_cache = kmem_cache_create("network_cmd_cache",
+		sizeof(struct npu_network_cmd), 0, 0, NULL);
+	if (!host_ctx->network_cmd_cache) {
+		NPU_ERR("Failed to create network_cmd_cache\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	host_ctx->misc_cmd_cache = kmem_cache_create("misc_cmd_cache",
+		sizeof(struct npu_misc_cmd), 0, 0, NULL);
+	if (!host_ctx->misc_cmd_cache) {
+		NPU_ERR("Failed to create misc_cmd_cache\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	host_ctx->stats_buf_cache = kmem_cache_create_usercopy(
+		"stats_buf_cache", NPU_MAX_STATS_BUF_SIZE, 0, 0,
+		0, NPU_MAX_STATS_BUF_SIZE, NULL);
+	if (!host_ctx->stats_buf_cache) {
+		NPU_ERR("Failed to create stats_buf_cache\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+	if (!host_ctx->ipc_msg_buf) {
+		NPU_ERR("Failed to allocate ipc buffer\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&host_ctx->misc_cmd_list);
 	host_ctx->auto_pil_disable = false;
 
-	return sts;
+	return 0;
+
+fail:
+	kfree(host_ctx->ipc_msg_buf);
+	kmem_cache_destroy(host_ctx->stats_buf_cache);
+	kmem_cache_destroy(host_ctx->network_cmd_cache);
+	kmem_cache_destroy(host_ctx->misc_cmd_cache);
+	if (host_ctx->wq)
+		destroy_workqueue(host_ctx->wq);
+	if (host_ctx->wq_pri)
+		destroy_workqueue(host_ctx->wq_pri);
+	if (host_ctx->notif_hdle)
+		subsys_notif_unregister_notifier(host_ctx->notif_hdle,
+			&host_ctx->nb);
+	mutex_destroy(&host_ctx->lock);
+	return ret;
 }
 
 void npu_host_deinit(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
+	kfree(host_ctx->ipc_msg_buf);
+	kmem_cache_destroy(host_ctx->stats_buf_cache);
+	kmem_cache_destroy(host_ctx->network_cmd_cache);
+	kmem_cache_destroy(host_ctx->misc_cmd_cache);
 	destroy_workqueue(host_ctx->wq);
+	destroy_workqueue(host_ctx->wq_pri);
+	subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
 	mutex_destroy(&host_ctx->lock);
 }
 
@@ -632,7 +751,7 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
 
 	/* Check that the event thread currently is running */
 	if (host_ctx->wq)
-		queue_work(host_ctx->wq, &host_ctx->ipc_irq_work);
+		queue_work(host_ctx->wq_pri, &host_ctx->ipc_irq_work);
 
 	return IRQ_HANDLED;
 }
@@ -681,7 +800,7 @@ irqreturn_t npu_err_intr_hdlr(int irq, void *ptr)
 	NPU_ERR("err_irq_sts %x\n", host_ctx->err_irq_sts);
 
 	if (host_ctx->wq)
-		queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+		queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
 
 	return IRQ_HANDLED;
 }
@@ -696,7 +815,7 @@ irqreturn_t npu_wdg_intr_hdlr(int irq, void *ptr)
 	NPU_ERR("wdg_irq_sts %x\n", host_ctx->wdg_irq_sts);
 
 	if (host_ctx->wq)
-		queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+		queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
 
 	return IRQ_HANDLED;
 }
@@ -710,6 +829,8 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	struct npu_network *network = NULL;
 	struct npu_kevent kevt;
+	struct npu_network_cmd *cmd;
+	struct npu_misc_cmd *misc_cmd;
 	bool fw_alive = true;
 	int i, ret = 0;
 
@@ -743,10 +864,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	}
 
 	NPU_INFO("npu subsystem is restarting\n");
-
-	/* clear FW_CTRL_STATUS register before restart */
-	REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0);
-
 	reinit_completion(&host_ctx->npu_power_up_done);
 	ret = subsystem_restart_dev(host_ctx->subsystem_handle);
 	if (ret) {
@@ -766,7 +883,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 
 	/* Keep reading ctrl status until NPU is ready */
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
-		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
 		NPU_ERR("wait for fw status ready timedout\n");
 		ret = -EPERM;
 		goto fw_start_done;
@@ -796,23 +913,37 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	/* flush all pending npu cmds */
 	for (i = 0; i < MAX_LOADED_NETWORK; i++) {
 		network = &host_ctx->networks[i];
-		if (network->is_valid && network->cmd_pending &&
-			network->fw_error) {
-			if (network->cmd_async) {
-				NPU_DBG("async cmd, queue ssr event\n");
-				kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
-				kevt.evt.u.ssr.network_hdl =
-					network->network_hdl;
-				if (npu_queue_event(network->client, &kevt))
-					NPU_ERR("queue npu event failed\n");
-			} else {
-				NPU_DBG("complete network %llx\n",
-					network->id);
-				complete(&network->cmd_done);
+		if (!network->is_valid || !network->fw_error)
+			continue;
+
+		if (network->is_async) {
+			NPU_DBG("async cmd, queue ssr event\n");
+			kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
+			kevt.evt.u.ssr.network_hdl =
+				network->network_hdl;
+			if (npu_queue_event(network->client, &kevt))
+				NPU_ERR("queue npu event failed\n");
+
+			while (!list_empty(&network->cmd_list)) {
+				cmd = list_first_entry(&network->cmd_list,
+					struct npu_network_cmd, list);
+				npu_dequeue_network_cmd(network, cmd);
+				npu_free_network_cmd(host_ctx, cmd);
+			}
+		} else {
+			list_for_each_entry(cmd, &network->cmd_list, list) {
+				NPU_DBG("complete network %llx trans_id %d\n",
+					network->id, cmd->trans_id);
+				complete(&cmd->cmd_done);
 			}
 		}
 	}
-	complete_all(&host_ctx->misc_cmd_done);
+
+	list_for_each_entry(misc_cmd, &host_ctx->misc_cmd_list, list) {
+		NPU_DBG("complete misc cmd trans_id %d\n",
+			misc_cmd->trans_id);
+		complete(&misc_cmd->cmd_done);
+	}
 	mutex_unlock(&host_ctx->lock);
 
 	return ret;
@@ -926,6 +1057,7 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
 {
 	struct ipc_cmd_log_state_pkt log_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_misc_cmd *misc_cmd = NULL;
 	int ret = 0;
 
 	mutex_lock(&host_ctx->lock);
@@ -936,21 +1068,30 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
 	log_packet.header.flags = 0xF;
 	log_packet.log_state.module_msk = 0;
 	log_packet.log_state.level_msk = 0;
+
+	misc_cmd = npu_alloc_misc_cmd(host_ctx);
+	if (!misc_cmd) {
+		NPU_ERR("Can't allocate misc_cmd\n");
+		return;
+	}
+
+	misc_cmd->cmd_type = NPU_IPC_CMD_CONFIG_LOG;
+	misc_cmd->trans_id = log_packet.header.trans_id;
+
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
-		&log_packet);
+		&log_packet, misc_cmd);
 
 	NPU_DBG("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
 
 	if (ret)
 		NPU_ERR("npu_host_ipc_send_cmd failed\n");
-	else
-		host_ctx->misc_cmd_pending = false;
 
+	npu_free_misc_cmd(host_ctx, misc_cmd);
 	mutex_unlock(&host_ctx->lock);
 }
 
 static int wait_for_status_ready(struct npu_device *npu_dev,
-	uint32_t status_reg, uint32_t status_bits)
+	uint32_t status_reg, uint32_t status_bits, bool poll)
 {
 	uint32_t ctrl_sts = 0;
 	uint32_t wait_cnt = 0, max_wait_ms;
@@ -958,19 +1099,34 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
 
 	max_wait_ms = (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT_MS : NPU_FW_TIMEOUT_MS;
+	if (poll)
+		wait_cnt = max_wait_ms * 10;
+	else
+		wait_cnt = max_wait_ms / NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
 
 	/* keep reading status register until bits are set */
-	while ((ctrl_sts & status_bits) != status_bits) {
+	do {
 		ctrl_sts = REGR(npu_dev, status_reg);
-		msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
-		wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
-		if (wait_cnt >= max_wait_ms) {
+		if ((ctrl_sts & status_bits) == status_bits) {
+			NPU_DBG("status %x[reg %x] ready received\n",
+				status_bits, status_reg);
+			break;
+		}
+
+		if (!wait_cnt) {
 			NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
 				status_bits, ctrl_sts, status_reg);
 			return -EPERM;
 		}
-	}
-	NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
+
+		if (poll)
+			udelay(100);
+		else
+			msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
+
+		wait_cnt--;
+	} while (1);
+
 	return 0;
 }
 
@@ -1046,15 +1202,9 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
 
 	memset(network, 0, sizeof(struct npu_network));
 	network->id = i + 1;
-	init_completion(&network->cmd_done);
 	network->is_valid = true;
 	network->client = client;
-	network->stats_buf = kzalloc(NPU_MAX_STATS_BUF_SIZE,
-		GFP_KERNEL);
-	if (!network->stats_buf) {
-		memset(network, 0, sizeof(struct npu_network));
-		return NULL;
-	}
+	INIT_LIST_HEAD(&network->cmd_list);
 
 	ctx->network_num++;
 	return network;
@@ -1117,14 +1267,23 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
 	int64_t id)
 {
 	struct npu_network *network = NULL;
+	struct npu_network_cmd *cmd;
 
 	WARN_ON(!mutex_is_locked(&ctx->lock));
 
 	network = get_network_by_id(ctx, client, id);
 	if (network) {
 		network_put(network);
+		while (!list_empty(&network->cmd_list)) {
+			cmd = list_first_entry(&network->cmd_list,
+				struct npu_network_cmd, list);
+			NPU_WARN("Free cmd %x type %x\n", cmd->cmd_id,
+				cmd->cmd_type);
+			npu_dequeue_network_cmd(network, cmd);
+			npu_free_network_cmd(ctx, cmd);
+		}
+
 		if (atomic_read(&network->ref_cnt) == 0) {
-			kfree(network->stats_buf);
 			memset(network, 0, sizeof(struct npu_network));
 			ctx->network_num--;
 		} else {
@@ -1138,6 +1297,42 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
  * Function Definitions - IPC
  * -------------------------------------------------------------------------
  */
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+	uint32_t stats_buf_size)
+{
+	struct npu_network_cmd *cmd = NULL;
+
+	cmd = kmem_cache_zalloc(ctx->network_cmd_cache, GFP_KERNEL);
+	if (!cmd) {
+		NPU_ERR("Can't allocate network cmd\n");
+		return NULL;
+	}
+
+	init_completion(&cmd->cmd_done);
+
+	if (stats_buf_size == 0)
+		return cmd;
+
+	cmd->stats_buf = kmem_cache_zalloc(ctx->stats_buf_cache,
+		GFP_KERNEL);
+	if (!cmd->stats_buf) {
+		NPU_ERR("Can't allocate stats buf\n");
+		kmem_cache_free(ctx->network_cmd_cache, cmd);
+		return NULL;
+	}
+	cmd->stats_buf_size = stats_buf_size;
+
+	return cmd;
+}
+
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+	struct npu_network_cmd *cmd)
+{
+	if (cmd->stats_buf)
+		kmem_cache_free(ctx->stats_buf_cache, cmd->stats_buf);
+	kmem_cache_free(ctx->network_cmd_cache, cmd);
+}
+
 static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
 {
 	struct npu_kevent *kevt = kmalloc(sizeof(*kevt), GFP_KERNEL);
@@ -1155,11 +1350,147 @@ static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
 	return 0;
 }
 
+static void npu_queue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd)
+{
+	INIT_LIST_HEAD(&cmd->list);
+	list_add_tail(&cmd->list, &network->cmd_list);
+}
+
+static void npu_dequeue_network_cmd(struct npu_network *network,
+	struct npu_network_cmd *cmd)
+{
+	list_del(&cmd->list);
+}
+
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+	uint32_t trans_id)
+{
+	struct npu_network_cmd *cmd;
+
+	list_for_each_entry(cmd, &network->cmd_list, list) {
+		if (cmd->trans_id == trans_id) {
+			NPU_DBG("find cmd for trans_id %d\n", trans_id);
+			return cmd;
+		}
+	}
+
+	NPU_ERR("can't find cmd for trans_id %d\n", trans_id);
+	return NULL;
+}
+
+static struct npu_misc_cmd *npu_alloc_misc_cmd(struct npu_host_ctx *ctx)
+{
+	struct npu_misc_cmd *cmd = NULL;
+
+	cmd = kmem_cache_zalloc(ctx->misc_cmd_cache, GFP_KERNEL);
+	if (!cmd) {
+		NPU_ERR("Can't allocate misc cmd\n");
+		return NULL;
+	}
+
+	init_completion(&cmd->cmd_done);
+
+	return cmd;
+}
+
+static void npu_free_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd)
+{
+	kmem_cache_free(ctx->misc_cmd_cache, cmd);
+}
+
+static void npu_queue_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd)
+{
+	INIT_LIST_HEAD(&cmd->list);
+	list_add_tail(&cmd->list, &ctx->misc_cmd_list);
+}
+
+static void npu_dequeue_misc_cmd(struct npu_host_ctx *ctx,
+	struct npu_misc_cmd *cmd)
+{
+	list_del(&cmd->list);
+}
+
+static struct npu_misc_cmd *npu_find_misc_cmd(struct npu_host_ctx *ctx,
+	uint32_t trans_id)
+{
+	struct npu_misc_cmd *cmd;
+
+	list_for_each_entry(cmd, &ctx->misc_cmd_list, list) {
+		if (cmd->trans_id == trans_id) {
+			NPU_DBG("find misc cmd for trans_id %d\n", trans_id);
+			return cmd;
+		}
+	}
+
+	NPU_ERR("can't find misc cmd for trans_id %d\n", trans_id);
+	return NULL;
+}
+
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt)
+{
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret = 0;
+
+	mutex_lock(&host_ctx->lock);
+
+	switch (kevt->evt.type) {
+	case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
+	{
+		struct npu_network_cmd *cmd = NULL;
+		struct npu_network *network;
+
+		network = get_network_by_hdl(host_ctx,
+			client, kevt->reserved[0]);
+		if (!network) {
+			NPU_ERR("Can't find network %x\n", kevt->reserved[0]);
+			ret = -EINVAL;
+			break;
+		}
+
+		cmd = npu_find_network_cmd(network, kevt->reserved[1]);
+		if (!cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d\n",
+				kevt->reserved[1]);
+			network_put(network);
+			ret = -EINVAL;
+			break;
+		}
+
+		kevt->evt.reserved[0] = cmd->cmd_id;
+		ret = copy_to_user((void __user *)cmd->stats_buf_u,
+			(void *)cmd->stats_buf,
+			kevt->evt.u.exec_v2_done.stats_buf_size);
+		if (ret) {
+			NPU_ERR("fail to copy to user\n");
+			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
+			ret = -EFAULT;
+		}
+
+		npu_dequeue_network_cmd(network, cmd);
+		npu_free_network_cmd(host_ctx, cmd);
+		network_put(network);
+		break;
+	}
+	default:
+		break;
+	}
+	mutex_unlock(&host_ctx->lock);
+
+	return ret;
+}
+
 static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 {
 	uint32_t msg_id;
 	struct npu_network *network = NULL;
 	struct npu_kevent kevt;
+	struct npu_device *npu_dev = host_ctx->npu_dev;
+	struct npu_network_cmd *network_cmd = NULL;
+	struct npu_misc_cmd *misc_cmd = NULL;
 
 	msg_id = msg[1];
 	switch (msg_id) {
@@ -1180,19 +1511,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			exe_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d\n",
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		network->cmd_pending = false;
-		network->cmd_ret_status = exe_rsp_pkt->header.status;
+		network_cmd->ret_status = exe_rsp_pkt->header.status;
 
-		if (!network->cmd_async) {
-			complete(&network->cmd_done);
+		if (!network_cmd->async) {
+			complete(&network_cmd->cmd_done);
 		} else {
 			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
@@ -1225,10 +1556,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
-				network->trans_id,
-				exe_rsp_pkt->header.trans_id);
+		network_cmd = npu_find_network_cmd(network,
+			exe_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find exec cmd with trans_id:%d:%d\n",
+				exe_rsp_pkt->header.trans_id,
+				exe_rsp_pkt->network_hdl);
 			network_put(network);
 			break;
 		}
@@ -1237,17 +1570,16 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 		stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
 		NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
 			stats_size);
-		stats_size = stats_size < network->stats_buf_size ?
-			stats_size : network->stats_buf_size;
+		stats_size = stats_size < network_cmd->stats_buf_size ?
+			stats_size : network_cmd->stats_buf_size;
 		if (stats_size)
-			memcpy(network->stats_buf, exe_rsp_pkt->stats_data,
+			memcpy(network_cmd->stats_buf, exe_rsp_pkt->stats_data,
 				stats_size);
 
-		network->stats_buf_size = stats_size;
-		network->cmd_pending = false;
-		network->cmd_ret_status = exe_rsp_pkt->header.status;
+		network_cmd->stats_buf_size = stats_size;
+		network_cmd->ret_status = exe_rsp_pkt->header.status;
 
-		if (network->cmd_async) {
+		if (network_cmd->async) {
 			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
 			kevt.evt.u.exec_v2_done.network_hdl =
@@ -1255,12 +1587,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			kevt.evt.u.exec_v2_done.exec_result =
 				exe_rsp_pkt->header.status;
 			kevt.evt.u.exec_v2_done.stats_buf_size = stats_size;
-			kevt.reserved[0] = (uint64_t)network->stats_buf;
-			kevt.reserved[1] = (uint64_t)network->stats_buf_u;
+			kevt.reserved[0] = (uint64_t)network->network_hdl;
+			kevt.reserved[1] = (uint64_t)network_cmd->trans_id;
 			if (npu_queue_event(network->client, &kevt))
 				NPU_ERR("queue npu event failed\n");
 		} else {
-			complete(&network->cmd_done);
+			complete(&network_cmd->cmd_done);
 		}
 		network_put(network);
 		break;
@@ -1287,19 +1619,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != load_rsp_pkt->header.trans_id) {
-			NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			load_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find load cmd with trans_id:%d\n",
 				load_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
 		network->network_hdl = load_rsp_pkt->network_hdl;
-		network->cmd_pending = false;
-		network->cmd_ret_status = load_rsp_pkt->header.status;
+		network_cmd->ret_status = load_rsp_pkt->header.status;
 
-		complete(&network->cmd_done);
+		complete(&network_cmd->cmd_done);
 		network_put(network);
 		break;
 	}
@@ -1320,18 +1652,18 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 			break;
 		}
 
-		if (network->trans_id != unload_rsp_pkt->header.trans_id) {
-			NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
-				network->trans_id,
+		network_cmd = npu_find_network_cmd(network,
+			unload_rsp_pkt->header.trans_id);
+		if (!network_cmd) {
+			NPU_ERR("can't find unload cmd with trans_id:%d\n",
 				unload_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		network->cmd_pending = false;
-		network->cmd_ret_status = unload_rsp_pkt->header.status;
+		network_cmd->ret_status = unload_rsp_pkt->header.status;
 
-		complete(&network->cmd_done);
+		complete(&network_cmd->cmd_done);
 		network_put(network);
 		break;
 	}
@@ -1342,9 +1674,101 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 
 		NPU_DBG("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
 			lb_rsp_pkt->loopbackParams);
-		host_ctx->misc_cmd_result = lb_rsp_pkt->header.status;
-		host_ctx->misc_cmd_pending = false;
-		complete_all(&host_ctx->misc_cmd_done);
+
+		misc_cmd = npu_find_misc_cmd(host_ctx,
+			lb_rsp_pkt->header.trans_id);
+		if (!misc_cmd) {
+			NPU_ERR("can't find loopback cmd with trans_id:%d\n",
+				lb_rsp_pkt->header.trans_id);
+			break;
+		}
+
+		misc_cmd->ret_status = lb_rsp_pkt->header.status;
+		complete_all(&misc_cmd->cmd_done);
+		break;
+	}
+	case NPU_IPC_MSG_SET_PROPERTY_DONE:
+	{
+		struct ipc_msg_prop_pkt *prop_rsp_pkt =
+			(struct ipc_msg_prop_pkt *)msg;
+		uint32_t *param = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+			sizeof(struct ipc_msg_prop_pkt));
+		NPU_DBG("NPU_IPC_MSG_SET_PROPERTY_DONE %d:0x%x:%d\n",
+			prop_rsp_pkt->network_hdl,
+			prop_rsp_pkt->prop_id,
+			param[0]);
+
+		misc_cmd = npu_find_misc_cmd(host_ctx,
+			prop_rsp_pkt->header.trans_id);
+		if (!misc_cmd) {
+			NPU_ERR("can't find set_prop cmd with trans_id:%d\n",
+				prop_rsp_pkt->header.trans_id);
+			break;
+		}
+
+		misc_cmd->ret_status = prop_rsp_pkt->header.status;
+		complete(&misc_cmd->cmd_done);
+		break;
+	}
+	case NPU_IPC_MSG_GET_PROPERTY_DONE:
+	{
+		struct ipc_msg_prop_pkt *prop_rsp_pkt =
+			(struct ipc_msg_prop_pkt *)msg;
+		uint32_t prop_size = 0;
+		uint32_t *prop_data = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+			sizeof(struct ipc_msg_header_pkt));
+
+		NPU_DBG("NPU_IPC_MSG_GET_PROPERTY_DONE %d:0x%x:%d:%d\n",
+			prop_rsp_pkt->network_hdl,
+			prop_rsp_pkt->prop_id,
+			prop_rsp_pkt->num_params,
+			prop_rsp_pkt->prop_param[0]);
+
+		misc_cmd = npu_find_misc_cmd(host_ctx,
+			prop_rsp_pkt->header.trans_id);
+		if (!misc_cmd) {
+			NPU_ERR("can't find get_prop cmd with trans_id:%d\n",
+				prop_rsp_pkt->header.trans_id);
+			break;
+		}
+
+		misc_cmd->ret_status = prop_rsp_pkt->header.status;
+
+		if (prop_rsp_pkt->num_params > 0) {
+			/* Copy prop data to kernel buffer */
+			prop_size = prop_rsp_pkt->header.size -
+				sizeof(struct ipc_msg_header_pkt);
+			if (prop_size > sizeof(struct msm_npu_property)) {
+				NPU_WARN("Invalid prop size %d\n", prop_size);
+				prop_size = sizeof(struct msm_npu_property);
+			}
+			memcpy(&misc_cmd->u.prop, prop_data, prop_size);
+		}
+
+		complete_all(&misc_cmd->cmd_done);
+		break;
+	}
+	case NPU_IPC_MSG_GENERAL_NOTIFY:
+	{
+		struct ipc_msg_general_notify_pkt *notify_msg_pkt =
+			(struct ipc_msg_general_notify_pkt *)msg;
+
+		NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY %d:0x%x:%d\n",
+			notify_msg_pkt->network_hdl,
+			notify_msg_pkt->notify_id,
+			notify_msg_pkt->notify_param[0]);
+
+		switch (notify_msg_pkt->notify_id) {
+		case NPU_NOTIFY_DCVS_MODE:
+			NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY DCVS_MODE %d\n",
+				notify_msg_pkt->notify_param[0]);
+			update_dcvs_activity(npu_dev,
+				notify_msg_pkt->notify_param[0]);
+			break;
+		default:
+			NPU_ERR("Nothing to do\n");
+			break;
+		}
 		break;
 	}
 	default:
@@ -1356,27 +1780,22 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 
 static void host_session_msg_hdlr(struct npu_device *npu_dev)
 {
-	uint32_t *msg;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL);
-	if (!msg)
-		return;
-
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state != FW_ENABLED) {
 		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
-	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) {
+	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP,
+		host_ctx->ipc_msg_buf) == 0) {
 		NPU_DBG("received from msg queue\n");
-		app_msg_proc(host_ctx, msg);
+		app_msg_proc(host_ctx, host_ctx->ipc_msg_buf);
 	}
 
 skip_read_msg:
 	mutex_unlock(&host_ctx->lock);
-	kfree(msg);
 }
 
 static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
@@ -1402,28 +1821,22 @@ static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
 
 static void host_session_log_hdlr(struct npu_device *npu_dev)
 {
-	uint32_t *msg;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL);
-
-	if (!msg)
-		return;
-
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state != FW_ENABLED) {
 		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
-	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
+	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG,
+		host_ctx->ipc_msg_buf) == 0) {
 		NPU_DBG("received from log queue\n");
-		log_msg_proc(npu_dev, msg);
+		log_msg_proc(npu_dev, host_ctx->ipc_msg_buf);
 	}
 
 skip_read_msg:
 	mutex_unlock(&host_ctx->lock);
-	kfree(msg);
 }
 
 /* -------------------------------------------------------------------------
@@ -1464,7 +1877,7 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
 	 * fw is disabled
 	 */
 	if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) &&
-		!wait_for_completion_interruptible_timeout(
+		!wait_for_completion_timeout(
 		&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
 		NPU_WARN("npu: wait for fw_deinit_done time out\n");
 
@@ -1477,7 +1890,8 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
 }
 
 static int npu_send_network_cmd(struct npu_device *npu_dev,
-	struct npu_network *network, void *cmd_ptr, bool async, bool force)
+	struct npu_network *network, void *cmd_ptr,
+	struct npu_network_cmd *cmd)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
@@ -1488,28 +1902,22 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
 		(host_ctx->fw_state != FW_ENABLED)) {
 		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
-	} else if (network->cmd_pending && !force) {
-		NPU_ERR("Another cmd is pending\n");
-		ret = -EBUSY;
 	} else {
-		network->cmd_async = async;
-		network->cmd_ret_status = 0;
-		network->cmd_pending = true;
-		network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
+		if (cmd)
+			reinit_completion(&cmd->cmd_done);
 		NPU_DBG("Send cmd %d network id %llx trans id %d\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
-			network->id, network->trans_id);
+			network->id,
+			((struct ipc_cmd_header_pkt *)cmd_ptr)->trans_id);
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_APPS_EXEC, cmd_ptr);
-		if (ret)
-			network->cmd_pending = false;
 	}
 
 	return ret;
 }
 
 static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
-	void *cmd_ptr)
+	void *cmd_ptr, struct npu_misc_cmd *cmd)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
@@ -1519,17 +1927,10 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
 	if (host_ctx->fw_error || (host_ctx->fw_state != FW_ENABLED)) {
 		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
-	} else if (host_ctx->misc_cmd_pending) {
-		NPU_ERR("Another misc cmd is pending\n");
-		ret = -EBUSY;
 	} else {
-		NPU_DBG("Send cmd %d\n",
-			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type);
-		host_ctx->misc_cmd_pending = true;
-		reinit_completion(&host_ctx->misc_cmd_done);
+		NPU_DBG("Send cmd %d\n", cmd->cmd_type);
+		reinit_completion(&cmd->cmd_done);
 		ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr);
-		if (ret)
-			host_ctx->misc_cmd_pending = false;
 	}
 
 	return ret;
@@ -1567,10 +1968,11 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
 		max_perf_mode = 1;
 	} else {
 		/* find the max level among all the networks */
-		for (i = 0; i < host_ctx->network_num; i++) {
-			if ((network->perf_mode != 0) &&
-				(network->perf_mode > max_perf_mode))
-				max_perf_mode = network->perf_mode;
+		for (i = 0; i < MAX_LOADED_NETWORK; i++) {
+			if ((network->id != 0) &&
+				(network->cur_perf_mode != 0) &&
+				(network->cur_perf_mode > max_perf_mode))
+				max_perf_mode = network->cur_perf_mode;
 			network++;
 		}
 	}
@@ -1587,6 +1989,12 @@ static int set_perf_mode(struct npu_device *npu_dev)
 
 	networks_perf_mode = find_networks_perf_mode(host_ctx);
 
+	if (npu_dev->pwrctrl.perf_mode_override)
+		networks_perf_mode = npu_dev->pwrctrl.perf_mode_override;
+
+	if (npu_dev->pwrctrl.cur_dcvs_activity != NPU_DCVS_ACTIVITY_MAX_PERF)
+		networks_perf_mode = min_t(uint32_t, networks_perf_mode,
+			npu_dev->pwrctrl.cur_dcvs_activity);
 	ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 	if (ret)
 		NPU_ERR("set uc power level %d failed\n", networks_perf_mode);
@@ -1594,16 +2002,223 @@ static int set_perf_mode(struct npu_device *npu_dev)
 	return ret;
 }
 
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity)
+{
+	npu_dev->pwrctrl.cur_dcvs_activity = activity;
+	NPU_DBG("update dcvs activity to %d\n", activity);
+
+	return set_perf_mode(npu_dev);
+}
+
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+	struct msm_npu_property *property)
+{
+	int ret = 0, i;
+	uint32_t prop_param, prop_id;
+	struct ipc_cmd_prop_pkt *prop_packet = NULL;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	uint32_t num_of_params, pkt_size;
+	struct npu_misc_cmd *misc_cmd = NULL;
+
+	prop_id = property->prop_id;
+	num_of_params = min_t(uint32_t, property->num_of_params,
+		(uint32_t)PROP_PARAM_MAX_SIZE);
+	pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+	prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+	if (!prop_packet)
+		return -ENOMEM;
+
+	switch (prop_id) {
+	case MSM_NPU_PROP_ID_DCVS_MODE:
+		prop_param = min_t(uint32_t, property->prop_param[0],
+			(uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+		property->prop_param[0] = prop_param;
+		NPU_DBG("setting dcvs_mode to %d[%d:%d]\n", prop_param,
+			property->prop_param[0],
+			(uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+
+		if (property->network_hdl == 0) {
+			npu_dev->pwrctrl.dcvs_mode = prop_param;
+			NPU_DBG("Set global dcvs mode %d\n", prop_param);
+		}
+		break;
+	default:
+		NPU_ERR("unsupported property %d\n", property->prop_id);
+		goto set_prop_exit;
+	}
+
+	prop_packet->header.cmd_type = NPU_IPC_CMD_SET_PROPERTY;
+	prop_packet->header.size = pkt_size;
+	prop_packet->header.trans_id =
+		atomic_add_return(1, &host_ctx->ipc_trans_id);
+	prop_packet->header.flags = 0;
+
+	prop_packet->prop_id = prop_id;
+	prop_packet->num_params = num_of_params;
+	prop_packet->network_hdl = property->network_hdl;
+	for (i = 0; i < num_of_params; i++)
+		prop_packet->prop_param[i] = property->prop_param[i];
+
+	mutex_lock(&host_ctx->lock);
+	misc_cmd = npu_alloc_misc_cmd(host_ctx);
+	if (!misc_cmd) {
+		NPU_ERR("Can't allocate misc_cmd\n");
+		ret = -ENOMEM;
+		goto set_prop_exit;
+	}
+
+	misc_cmd->cmd_type = NPU_IPC_CMD_SET_PROPERTY;
+	misc_cmd->trans_id = prop_packet->header.trans_id;
+	npu_queue_misc_cmd(host_ctx, misc_cmd);
+
+	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+		prop_packet, misc_cmd);
+	NPU_DBG("NPU_IPC_CMD_SET_PROPERTY sent status: %d\n", ret);
+
+	if (ret) {
+		NPU_ERR("NPU_IPC_CMD_SET_PROPERTY failed\n");
+		goto free_misc_cmd;
+	}
+	mutex_unlock(&host_ctx->lock);
+
+	ret = wait_for_completion_interruptible_timeout(
+		&misc_cmd->cmd_done,
+		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+	mutex_lock(&host_ctx->lock);
+	if (!ret) {
+		NPU_ERR("NPU_IPC_CMD_SET_PROPERTY time out\n");
+		ret = -ETIMEDOUT;
+		goto free_misc_cmd;
+	} else if (ret < 0) {
+		NPU_ERR("Wait for set_property done interrupted by signal\n");
+		goto free_misc_cmd;
+	}
+
+	ret = misc_cmd->ret_status;
+	if (ret)
+		NPU_ERR("set fw property failed %d\n", ret);
+
+free_misc_cmd:
+	npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+	npu_free_misc_cmd(host_ctx, misc_cmd);
+set_prop_exit:
+	mutex_unlock(&host_ctx->lock);
+	kfree(prop_packet);
+	return ret;
+}
+
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+	struct msm_npu_property *property)
+{
+	int ret = 0, i;
+	struct ipc_cmd_prop_pkt *prop_packet = NULL;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct msm_npu_property *prop_from_fw;
+	uint32_t num_of_params, pkt_size;
+	struct npu_misc_cmd *misc_cmd = NULL;
+
+	if (property->prop_id < MSM_NPU_FW_PROP_ID_START) {
+		NPU_ERR("Not supproted fw property id %x\n",
+			property->prop_id);
+		return -EINVAL;
+	}
+
+	num_of_params = min_t(uint32_t, property->num_of_params,
+		(uint32_t)PROP_PARAM_MAX_SIZE);
+	pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+	prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+	if (!prop_packet)
+		return -ENOMEM;
+
+	prop_packet->header.cmd_type = NPU_IPC_CMD_GET_PROPERTY;
+	prop_packet->header.size = pkt_size;
+	prop_packet->header.trans_id =
+		atomic_add_return(1, &host_ctx->ipc_trans_id);
+	prop_packet->header.flags = 0;
+
+	prop_packet->prop_id = property->prop_id;
+	prop_packet->num_params = num_of_params;
+	prop_packet->network_hdl = property->network_hdl;
+	for (i = 0; i < num_of_params; i++)
+		prop_packet->prop_param[i] = property->prop_param[i];
+
+	mutex_lock(&host_ctx->lock);
+	misc_cmd = npu_alloc_misc_cmd(host_ctx);
+	if (!misc_cmd) {
+		NPU_ERR("Can't allocate misc_cmd\n");
+		ret = -ENOMEM;
+		goto get_prop_exit;
+	}
+
+	misc_cmd->cmd_type = NPU_IPC_CMD_GET_PROPERTY;
+	misc_cmd->trans_id = prop_packet->header.trans_id;
+	npu_queue_misc_cmd(host_ctx, misc_cmd);
+
+	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+		prop_packet, misc_cmd);
+	NPU_DBG("NPU_IPC_CMD_GET_PROPERTY sent status: %d\n", ret);
+
+	if (ret) {
+		NPU_ERR("NPU_IPC_CMD_GET_PROPERTY failed\n");
+		goto free_misc_cmd;
+	}
+	mutex_unlock(&host_ctx->lock);
+
+	ret = wait_for_completion_interruptible_timeout(
+		&misc_cmd->cmd_done,
+		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+	mutex_lock(&host_ctx->lock);
+	if (!ret) {
+		pr_err_ratelimited("npu: NPU_IPC_CMD_GET_PROPERTY time out\n");
+		ret = -ETIMEDOUT;
+		goto free_misc_cmd;
+	} else if (ret < 0) {
+		NPU_ERR("Wait for get_property done interrupted by signal\n");
+		goto free_misc_cmd;
+	}
+
+	ret = misc_cmd->ret_status;
+	if (!ret) {
+		/* Return prop data retrieved from fw to user */
+		prop_from_fw = &misc_cmd->u.prop;
+		if (property->prop_id == prop_from_fw->prop_id &&
+			property->network_hdl == prop_from_fw->network_hdl) {
+			property->num_of_params = num_of_params;
+			for (i = 0; i < num_of_params; i++)
+				property->prop_param[i] =
+					prop_from_fw->prop_param[i];
+		}
+	} else {
+		NPU_ERR("get fw property failed %d\n", ret);
+	}
+
+free_misc_cmd:
+	npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+	npu_free_misc_cmd(host_ctx, misc_cmd);
+get_prop_exit:
+	mutex_unlock(&host_ctx->lock);
+	kfree(prop_packet);
+	return ret;
+}
+
 int32_t npu_host_load_network_v2(struct npu_client *client,
 			struct msm_npu_load_network_ioctl_v2 *load_ioctl,
 			struct msm_npu_patch_info_v2 *patch_info)
 {
 	int ret = 0, i;
 	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 	struct npu_network *network;
 	struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
 	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network_cmd *load_cmd = NULL;
 	uint32_t num_patch_params, pkt_size;
 
 	ret = enable_fw(npu_dev);
@@ -1637,7 +2252,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	network->phy_add = load_ioctl->buf_phys_addr;
 	network->first_block_size = load_ioctl->first_block_size;
 	network->priority = load_ioctl->priority;
-	network->perf_mode = load_ioctl->perf_mode;
+	network->cur_perf_mode = network->init_perf_mode =
+		(load_ioctl->perf_mode == PERF_MODE_DEFAULT) ?
+		pwr->num_pwrlevels : load_ioctl->perf_mode;
 	network->num_layers = load_ioctl->num_layers;
 
 	/* verify mapped physical address */
@@ -1666,18 +2283,30 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	load_packet->buf_pkt.num_layers = network->num_layers;
 	load_packet->num_patch_params = num_patch_params;
 
+	load_cmd = npu_alloc_network_cmd(host_ctx, 0);
+	if (!load_cmd) {
+		NPU_ERR("Can't allocate load_cmd\n");
+		ret = -ENOMEM;
+		goto error_free_network;
+	}
+
+	load_cmd->cmd_id = 0;
+	load_cmd->cmd_type = NPU_IPC_CMD_LOAD_V2;
+	load_cmd->trans_id = load_packet->header.trans_id;
+	load_cmd->async = false;
+	npu_queue_network_cmd(network, load_cmd);
+
 	/* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
+	ret = npu_send_network_cmd(npu_dev, network, load_packet, load_cmd);
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
-		goto error_free_network;
+		goto free_load_cmd;
 	}
 
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&load_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -1686,24 +2315,28 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during load_v2 network\n");
-		goto error_free_network;
+		goto free_load_cmd;
 	}
 
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
-			network->id, network->trans_id);
+			network->id, load_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
 		ret = -ETIMEDOUT;
 		goto error_load_network;
 	}
 
-	ret = network->cmd_ret_status;
-	if (ret)
-		goto error_free_network;
+	ret = load_cmd->ret_status;
+	if (ret) {
+		NPU_ERR("load network failed status %d\n", ret);
+		goto free_load_cmd;
+	}
 
 	load_ioctl->network_hdl = network->network_hdl;
 	network->is_active = true;
 	kfree(load_packet);
+	npu_dequeue_network_cmd(network, load_cmd);
+	npu_free_network_cmd(host_ctx, load_cmd);
 	network_put(network);
 	mutex_unlock(&host_ctx->lock);
 
@@ -1718,9 +2351,12 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 		atomic_add_return(1, &host_ctx->ipc_trans_id);
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
-	npu_send_network_cmd(npu_dev, network, &unload_packet, false, true);
+	npu_send_network_cmd(npu_dev, network, &unload_packet, NULL);
 	/* wait 200 ms to make sure fw has processed this command */
 	msleep(200);
+free_load_cmd:
+	npu_dequeue_network_cmd(network, load_cmd);
+	npu_free_network_cmd(host_ctx, load_cmd);
 error_free_network:
 	kfree(load_packet);
 	network_put(network);
@@ -1739,6 +2375,7 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_network *network;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network_cmd *unload_cmd = NULL;
 
 	/* get the corresponding network for ipc trans id purpose */
 	mutex_lock(&host_ctx->lock);
@@ -1770,10 +2407,22 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
 
+	unload_cmd = npu_alloc_network_cmd(host_ctx, 0);
+	if (!unload_cmd) {
+		NPU_ERR("Can't allocate unload_cmd\n");
+		ret = -ENOMEM;
+		goto free_network;
+	}
+
+	unload_cmd->cmd_id = 0;
+	unload_cmd->cmd_type = NPU_IPC_CMD_UNLOAD;
+	unload_cmd->trans_id = unload_packet.header.trans_id;
+	unload_cmd->async = false;
+	npu_queue_network_cmd(network, unload_cmd);
+
 	/* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
-		false);
+	ret = npu_send_network_cmd(npu_dev, network, &unload_packet,
+		unload_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
@@ -1783,17 +2432,19 @@ int32_t npu_host_unload_network(struct npu_client *client,
 		 */
 		if (ret == -EBUSY) {
 			NPU_ERR("Network is running, retry later\n");
+			npu_dequeue_network_cmd(network, unload_cmd);
+			npu_free_network_cmd(host_ctx, unload_cmd);
 			network_put(network);
 			mutex_unlock(&host_ctx->lock);
 			return ret;
 		}
-		goto free_network;
+		goto free_unload_cmd;
 	}
 
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&unload_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -1807,16 +2458,18 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
-			network->id, network->trans_id);
+			network->id, unload_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
-		goto free_network;
+		goto free_unload_cmd;
 	}
 
-	ret = network->cmd_ret_status;
+	ret = unload_cmd->ret_status;
 	NPU_DBG("unload network status %d\n", ret);
 
+free_unload_cmd:
+	npu_dequeue_network_cmd(network, unload_cmd);
+	npu_free_network_cmd(host_ctx, unload_cmd);
 free_network:
 	/*
 	 * free the network on the kernel if the corresponding ACO
@@ -1824,9 +2477,10 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	 */
 	network_put(network);
 	free_network(host_ctx, client, network->id);
-	/* update perf mode */
-	if (set_perf_mode(npu_dev))
-		NPU_WARN("set_perf_mode failed\n");
+
+	/* recalculate uc_power_level after unload network */
+	if (npu_dev->pwrctrl.cur_dcvs_activity)
+		set_perf_mode(npu_dev);
 
 	mutex_unlock(&host_ctx->lock);
 
@@ -1841,6 +2495,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 {
 	struct npu_device *npu_dev = client->npu_dev;
 	struct ipc_cmd_execute_pkt_v2 *exec_packet;
+	struct npu_network_cmd *exec_cmd = NULL;
 	int32_t ret;
 	struct npu_network *network;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
@@ -1869,6 +2524,14 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 		goto exec_v2_done;
 	}
 
+	if (network->is_async && !async_ioctl) {
+		NPU_ERR("network is in async mode\n");
+		ret = -EINVAL;
+		goto exec_v2_done;
+	}
+
+	network->is_async = async_ioctl;
+
 	NPU_DBG("execute_v2 network %lld\n", network->id);
 	num_patch_params = exec_ioctl->patch_buf_info_num;
 	pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
@@ -1907,20 +2570,28 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	exec_packet->network_hdl = network->network_hdl;
 	exec_packet->num_patch_params = num_patch_params;
 
-	network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
-	network->stats_buf_size = exec_ioctl->stats_buf_size;
+	exec_cmd = npu_alloc_network_cmd(host_ctx, exec_ioctl->stats_buf_size);
+	if (!exec_cmd) {
+		NPU_ERR("Can't allocate exec_cmd\n");
+		ret = -ENOMEM;
+		goto free_exec_packet;
+	}
+
+	exec_cmd->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
+	exec_cmd->cmd_id = exec_ioctl->async;
+	exec_cmd->cmd_type = NPU_IPC_CMD_EXECUTE_V2;
+	exec_cmd->trans_id = exec_packet->header.trans_id;
+	exec_cmd->async = async_ioctl;
+	npu_queue_network_cmd(network, exec_cmd);
 
 	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
-	/* Send it on the high priority queue */
-	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
-		false);
+	ret = npu_send_network_cmd(npu_dev, network, exec_packet, exec_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
-		goto free_exec_packet;
+		goto free_exec_cmd;
 	}
 
 	if (async_ioctl) {
@@ -1931,7 +2602,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_timeout(
-		&network->cmd_done,
+		&exec_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -1939,32 +2610,35 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during execute_v2 network\n");
-		goto free_exec_packet;
+		goto free_exec_cmd;
 	}
 
 	if (!ret) {
 		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
-			network->id, network->trans_id);
+			network->id, exec_cmd->trans_id);
 		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
 	}
 
-	ret = network->cmd_ret_status;
-	if (!ret) {
-		exec_ioctl->stats_buf_size = network->stats_buf_size;
-		if (copy_to_user(
-			(void __user *)exec_ioctl->stats_buf_addr,
-			network->stats_buf,
-			exec_ioctl->stats_buf_size)) {
-			NPU_ERR("copy stats to user failed\n");
-			exec_ioctl->stats_buf_size = 0;
-		}
-	} else {
+	ret = exec_cmd->ret_status;
+	if (ret) {
 		NPU_ERR("execution failed %d\n", ret);
+		goto free_exec_cmd;
 	}
 
+	exec_ioctl->stats_buf_size = exec_cmd->stats_buf_size;
+	if (copy_to_user(
+		(void __user *)exec_ioctl->stats_buf_addr,
+		exec_cmd->stats_buf,
+		exec_ioctl->stats_buf_size)) {
+		NPU_ERR("copy stats to user failed\n");
+		exec_ioctl->stats_buf_size = 0;
+	}
+
+free_exec_cmd:
+	npu_dequeue_network_cmd(network, exec_cmd);
+	npu_free_network_cmd(host_ctx, exec_cmd);
 free_exec_packet:
 	kfree(exec_packet);
 exec_v2_done:
@@ -1987,6 +2661,7 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
 {
 	struct ipc_cmd_loopback_pkt loopback_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_misc_cmd *misc_cmd = NULL;
 	int32_t ret;
 
 	ret = enable_fw(npu_dev);
@@ -2002,17 +2677,29 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
 	loopback_packet.header.flags = 0;
 	loopback_packet.loopbackParams = 15;
 
-	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet);
+	misc_cmd = npu_alloc_misc_cmd(host_ctx);
+	if (!misc_cmd) {
+		NPU_ERR("Can't allocate misc_cmd\n");
+		ret = -ENOMEM;
+		goto loopback_exit;
+	}
+
+	misc_cmd->cmd_type = NPU_IPC_CMD_LOOPBACK;
+	misc_cmd->trans_id = loopback_packet.header.trans_id;
+	npu_queue_misc_cmd(host_ctx, misc_cmd);
+
+	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet,
+		misc_cmd);
 
 	if (ret) {
 		NPU_ERR("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
-		goto loopback_exit;
+		goto free_misc_cmd;
 	}
 
 	mutex_unlock(&host_ctx->lock);
 
 	ret = wait_for_completion_interruptible_timeout(
-		&host_ctx->misc_cmd_done,
+		&misc_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
@@ -2025,9 +2712,12 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
 	} else if (ret < 0) {
 		NPU_ERR("Wait for loopback done interrupted by signal\n");
 	} else {
-		ret = host_ctx->misc_cmd_result;
+		ret = misc_cmd->ret_status;
 	}
 
+free_misc_cmd:
+	npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+	npu_free_misc_cmd(host_ctx, misc_cmd);
 loopback_exit:
 	mutex_unlock(&host_ctx->lock);
 	disable_fw(npu_dev);
@@ -2065,3 +2755,85 @@ void npu_host_cleanup_networks(struct npu_client *client)
 		npu_host_unmap_buf(client, &unmap_req);
 	}
 }
+
+/*
+ * set network or global perf_mode
+ * if network_hdl is 0, set global perf_mode_override
+ * otherwise set network perf_mode: if perf_mode is 0,
+ * change network perf_mode to initial perf_mode from
+ * load_network
+ */
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+	uint32_t perf_mode)
+{
+	int ret = 0;
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network *network = NULL;
+
+	mutex_lock(&host_ctx->lock);
+
+	if (network_hdl == 0) {
+		NPU_DBG("change perf_mode_override to %d\n", perf_mode);
+		npu_dev->pwrctrl.perf_mode_override = perf_mode;
+	} else {
+		network = get_network_by_hdl(host_ctx, client, network_hdl);
+		if (!network) {
+			NPU_ERR("invalid network handle %x\n", network_hdl);
+			mutex_unlock(&host_ctx->lock);
+			return -EINVAL;
+		}
+
+		if (perf_mode == 0) {
+			network->cur_perf_mode = network->init_perf_mode;
+			NPU_DBG("change network %d perf_mode back to %d\n",
+				network_hdl, network->cur_perf_mode);
+		} else {
+			network->cur_perf_mode = perf_mode;
+			NPU_DBG("change network %d perf_mode to %d\n",
+				network_hdl, network->cur_perf_mode);
+		}
+	}
+
+	ret = set_perf_mode(npu_dev);
+	if (ret)
+		NPU_ERR("set_perf_mode failed\n");
+
+	if (network)
+		network_put(network);
+	mutex_unlock(&host_ctx->lock);
+
+	return ret;
+}
+
+/*
+ * get the currently set network or global perf_mode
+ * if network_hdl is 0, get global perf_mode_override
+ * otherwise get network perf_mode
+ */
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl)
+{
+	int param_val = 0;
+	struct npu_device *npu_dev = client->npu_dev;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	struct npu_network *network = NULL;
+
+	mutex_lock(&host_ctx->lock);
+
+	if (network_hdl == 0) {
+		param_val = npu_dev->pwrctrl.perf_mode_override;
+	} else {
+		network = get_network_by_hdl(host_ctx, client, network_hdl);
+		if (!network) {
+			NPU_ERR("invalid network handle %x\n", network_hdl);
+			mutex_unlock(&host_ctx->lock);
+			return -EINVAL;
+		}
+		param_val = network->cur_perf_mode;
+		network_put(network);
+	}
+
+	mutex_unlock(&host_ctx->lock);
+
+	return param_val;
+}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 6c14720..0beb948 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -26,7 +26,7 @@
 #define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
 #define FIRMWARE_VERSION 0x00001000
 #define MAX_LOADED_NETWORK 32
-#define NPU_IPC_BUF_LENGTH 512
+#define NPU_IPC_BUF_LENGTH 4096
 
 #define FW_DBG_MODE_PAUSE        (1 << 0)
 #define FW_DBG_MODE_INC_TIMEOUT  (1 << 1)
@@ -36,6 +36,33 @@
  * Data Structures
  * -------------------------------------------------------------------------
  */
+
+struct npu_network_cmd {
+	struct list_head list;
+	uint32_t cmd_type;
+	uint32_t cmd_id;
+	uint32_t trans_id;
+	bool async;
+	struct completion cmd_done;
+	/* stats buf info */
+	uint32_t stats_buf_size;
+	void __user *stats_buf_u;
+	void *stats_buf;
+	int ret_status;
+};
+
+struct npu_misc_cmd {
+	struct list_head list;
+	uint32_t cmd_type;
+	uint32_t trans_id;
+	union {
+		struct msm_npu_property prop;
+		uint32_t data[32];
+	} u;
+	struct completion cmd_done;
+	int ret_status;
+};
+
 struct npu_network {
 	uint64_t id;
 	int buf_hdl;
@@ -44,21 +71,16 @@ struct npu_network {
 	uint32_t first_block_size;
 	uint32_t network_hdl;
 	uint32_t priority;
-	uint32_t perf_mode;
+	uint32_t cur_perf_mode;
+	uint32_t init_perf_mode;
 	uint32_t num_layers;
-	void *stats_buf;
-	void __user *stats_buf_u;
-	uint32_t stats_buf_size;
-	uint32_t trans_id;
 	atomic_t ref_cnt;
 	bool is_valid;
 	bool is_active;
 	bool fw_error;
-	bool cmd_pending;
-	bool cmd_async;
-	int cmd_ret_status;
-	struct completion cmd_done;
+	bool is_async;
 	struct npu_client *client;
+	struct list_head cmd_list;
 };
 
 enum fw_state {
@@ -73,6 +95,7 @@ struct npu_host_ctx {
 	void *subsystem_handle;
 	enum fw_state fw_state;
 	int32_t fw_ref_cnt;
+	int32_t npu_init_cnt;
 	int32_t power_vote_num;
 	struct work_struct ipc_irq_work;
 	struct work_struct wdg_err_irq_work;
@@ -81,13 +104,16 @@ struct npu_host_ctx {
 	struct work_struct update_pwr_work;
 	struct delayed_work disable_fw_work;
 	struct workqueue_struct *wq;
-	struct completion misc_cmd_done;
+	struct workqueue_struct *wq_pri;
 	struct completion fw_deinit_done;
 	struct completion fw_bringup_done;
 	struct completion fw_shutdown_done;
 	struct completion npu_power_up_done;
 	int32_t network_num;
 	struct npu_network networks[MAX_LOADED_NETWORK];
+	struct kmem_cache *network_cmd_cache;
+	struct kmem_cache *misc_cmd_cache;
+	struct kmem_cache *stats_buf_cache;
 	bool sys_cache_disable;
 	bool auto_pil_disable;
 	uint32_t fw_dbg_mode;
@@ -99,12 +125,12 @@ struct npu_host_ctx {
 	uint32_t wdg_irq_sts;
 	bool fw_error;
 	bool cancel_work;
-	bool misc_cmd_pending;
-	uint32_t misc_cmd_result;
 	struct notifier_block nb;
 	void *notif_hdle;
 	spinlock_t bridge_mbox_lock;
 	bool bridge_mbox_pwr_on;
+	void *ipc_msg_buf;
+	struct list_head misc_cmd_list;
 };
 
 struct npu_device;
@@ -124,6 +150,7 @@ int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t queueIndex,
 	void *pCmd);
 int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t queueIndex,
 	uint32_t *pMsg);
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx);
 
 int32_t npu_host_get_info(struct npu_device *npu_dev,
 	struct msm_npu_get_info_ioctl *get_info_ioctl);
@@ -140,11 +167,17 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
 	struct msm_npu_patch_buf_info *patch_buf_info);
 int32_t npu_host_loopback_test(struct npu_device *npu_dev);
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+			struct msm_npu_property *property);
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+			struct msm_npu_property *property);
 void npu_host_cleanup_networks(struct npu_client *client);
 int npu_host_notify_fw_pwr_state(struct npu_device *npu_dev,
 	uint32_t pwr_level, bool post);
 int npu_host_update_power(struct npu_device *npu_dev);
-
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+	uint32_t perf_mode);
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
 void npu_dump_debug_info(struct npu_device *npu_dev);
 void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr);
 
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index 0c97eb8..7809130 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -252,6 +252,13 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 
 	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
 
+	if (!row->index) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		pr_err("object already cleaned up at %d\n",
+			row->index);
+		return -EINVAL;
+	}
+
 	if (synx_status_locked(row) != SYNX_STATE_ACTIVE) {
 		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
 		pr_err("object already signaled synx at %d\n",
@@ -444,8 +451,13 @@ static int synx_release_core(struct synx_table_row *row)
 	 * be carefull while accessing the metadata
 	 */
 	fence = row->fence;
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
 	idx = row->index;
-	spin_lock_bh(&synx_dev->row_spinlocks[idx]);
+	if (!idx) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[idx]);
+		pr_err("object already cleaned up at %d\n", idx);
+		return -EINVAL;
+	}
 	/*
 	 * we need to clear the metadata for merged synx obj upon synx_release
 	 * itself as it does not invoke the synx_fence_release function.
@@ -490,6 +502,15 @@ int synx_wait(s32 synx_obj, u64 timeout_ms)
 		return -EINVAL;
 	}
 
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	if (!row->index) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		pr_err("object already cleaned up at %d\n",
+			row->index);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
 	timeleft = dma_fence_wait_timeout(row->fence, (bool) 0,
 					msecs_to_jiffies(timeout_ms));
 	if (timeleft <= 0) {
@@ -669,6 +690,13 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj)
 	}
 
 	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	if (!row->index) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		pr_err("object already cleaned up at %d\n",
+			row->index);
+		kfree(obj_node);
+		return -EINVAL;
+	}
 	obj_node->synx_obj = id;
 	list_add(&obj_node->list, &row->synx_obj_list);
 	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
@@ -890,7 +918,8 @@ static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl)
 }
 
 static int synx_handle_register_user_payload(
-	struct synx_private_ioctl_arg *k_ioctl)
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_client *client)
 {
 	s32 synx_obj;
 	u32 state = SYNX_STATE_INVALID;
@@ -898,7 +927,6 @@ static int synx_handle_register_user_payload(
 	struct synx_cb_data *user_payload_kernel;
 	struct synx_cb_data *user_payload_iter, *temp;
 	struct synx_table_row *row = NULL;
-	struct synx_client *client = NULL;
 
 	pr_debug("Enter %s\n", __func__);
 
@@ -917,12 +945,8 @@ static int synx_handle_register_user_payload(
 		return -EINVAL;
 	}
 
-	mutex_lock(&synx_dev->table_lock);
-	client = get_current_client();
-	mutex_unlock(&synx_dev->table_lock);
-
 	if (!client) {
-		pr_err("couldn't find client for process %d\n", current->tgid);
+		pr_err("invalid client for process %d\n", current->pid);
 		return -EINVAL;
 	}
 
@@ -972,11 +996,11 @@ static int synx_handle_register_user_payload(
 }
 
 static int synx_handle_deregister_user_payload(
-	struct synx_private_ioctl_arg *k_ioctl)
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_client *client)
 {
 	s32 synx_obj;
 	u32 state = SYNX_STATE_INVALID;
-	struct synx_client *client = NULL;
 	struct synx_userpayload_info userpayload_info;
 	struct synx_cb_data *user_payload_kernel, *temp;
 	struct synx_table_row *row = NULL;
@@ -999,12 +1023,8 @@ static int synx_handle_deregister_user_payload(
 		return -EINVAL;
 	}
 
-	mutex_lock(&synx_dev->table_lock);
-	client = get_current_client();
-	mutex_unlock(&synx_dev->table_lock);
-
 	if (!client) {
-		pr_err("couldn't find client for process %d\n", current->tgid);
+		pr_err("invalid client for process %d\n", current->pid);
 		return -EINVAL;
 	}
 
@@ -1124,11 +1144,13 @@ static long synx_ioctl(struct file *filep,
 {
 	s32 rc = 0;
 	struct synx_device *synx_dev = NULL;
+	struct synx_client *client;
 	struct synx_private_ioctl_arg k_ioctl;
 
 	pr_debug("Enter %s\n", __func__);
 
 	synx_dev = get_synx_device(filep);
+	client = filep->private_data;
 
 	if (cmd != SYNX_PRIVATE_IOCTL_CMD) {
 		pr_err("invalid ioctl cmd\n");
@@ -1154,11 +1176,11 @@ static long synx_ioctl(struct file *filep,
 		break;
 	case SYNX_REGISTER_PAYLOAD:
 		rc = synx_handle_register_user_payload(
-			&k_ioctl);
+			&k_ioctl, client);
 		break;
 	case SYNX_DEREGISTER_PAYLOAD:
 		rc = synx_handle_deregister_user_payload(
-			&k_ioctl);
+			&k_ioctl, client);
 		break;
 	case SYNX_SIGNAL:
 		rc = synx_handle_signal(&k_ioctl);
@@ -1266,7 +1288,7 @@ static int synx_open(struct inode *inode, struct file *filep)
 	struct synx_device *synx_dev = NULL;
 	struct synx_client *client = NULL;
 
-	pr_debug("Enter %s from pid: %d\n", __func__, current->tgid);
+	pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
 
 	synx_dev = container_of(inode->i_cdev, struct synx_device, cdev);
 
@@ -1275,7 +1297,6 @@ static int synx_open(struct inode *inode, struct file *filep)
 		return -ENOMEM;
 
 	client->device = synx_dev;
-	client->pid = current->tgid;
 	init_waitqueue_head(&client->wq);
 	INIT_LIST_HEAD(&client->eventq);
 	spin_lock_init(&client->eventq_lock);
@@ -1292,20 +1313,12 @@ static int synx_open(struct inode *inode, struct file *filep)
 	return 0;
 }
 
-static int synx_close(struct inode *inode, struct file *filep)
+static void synx_table_cleanup(void)
 {
 	int rc = 0;
 	int i;
-	struct synx_device *synx_dev = NULL;
-	struct synx_client *client, *tmp_client;
 	struct synx_import_data *data, *tmp_data;
 
-	pr_debug("Enter %s\n", __func__);
-
-	synx_dev = get_synx_device(filep);
-
-	mutex_lock(&synx_dev->table_lock);
-
 	synx_dev->open_cnt--;
 	if (!synx_dev->open_cnt) {
 		for (i = 1; i < SYNX_MAX_OBJS; i++) {
@@ -1376,18 +1389,22 @@ static int synx_close(struct inode *inode, struct file *filep)
 			kfree(data);
 		}
 	}
+}
 
-	list_for_each_entry_safe(client, tmp_client,
-		&synx_dev->client_list, list) {
-		if (current->tgid == client->pid) {
-			pr_debug("deleting client for process %d\n",
-				client->pid);
-			list_del_init(&client->list);
-			kfree(client);
-			break;
-		}
-	}
+static int synx_close(struct inode *inode, struct file *filep)
+{
+	struct synx_device *synx_dev = NULL;
+	struct synx_client *client;
 
+	pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
+
+	synx_dev = get_synx_device(filep);
+	client = filep->private_data;
+
+	mutex_lock(&synx_dev->table_lock);
+	synx_table_cleanup();
+	list_del_init(&client->list);
+	kfree(client);
 	mutex_unlock(&synx_dev->table_lock);
 
 	pr_debug("Exit %s\n", __func__);
@@ -1407,6 +1424,32 @@ static const struct file_operations synx_fops = {
 #endif
 };
 
+int synx_initialize(struct synx_initialization_params *params)
+{
+	pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
+
+	mutex_lock(&synx_dev->table_lock);
+	synx_dev->open_cnt++;
+	mutex_unlock(&synx_dev->table_lock);
+
+	if (params)
+		pr_debug("synx client session initialized for %s\n",
+			params->name);
+	return 0;
+}
+
+int synx_uninitialize(void)
+{
+	pr_debug("Enter %s from pid: %d\n",
+		__func__, current->pid);
+
+	mutex_lock(&synx_dev->table_lock);
+	synx_table_cleanup();
+	mutex_unlock(&synx_dev->table_lock);
+
+	return 0;
+}
+
 int synx_register_ops(const struct synx_register_params *params)
 {
 	s32 rc;
@@ -1421,7 +1464,7 @@ int synx_register_ops(const struct synx_register_params *params)
 		return -EINVAL;
 	}
 
-	mutex_lock(&synx_dev->table_lock);
+	mutex_lock(&synx_dev->vtbl_lock);
 	client_ops = &synx_dev->bind_vtbl[params->type];
 	if (!client_ops->valid) {
 		client_ops->valid = true;
@@ -1438,7 +1481,7 @@ int synx_register_ops(const struct synx_register_params *params)
 			client_ops->name);
 		rc = -EINVAL;
 	}
-	mutex_unlock(&synx_dev->table_lock);
+	mutex_unlock(&synx_dev->vtbl_lock);
 
 	return rc;
 }
@@ -1453,12 +1496,12 @@ int synx_deregister_ops(const struct synx_register_params *params)
 		return -EINVAL;
 	}
 
-	mutex_lock(&synx_dev->table_lock);
+	mutex_lock(&synx_dev->vtbl_lock);
 	client_ops = &synx_dev->bind_vtbl[params->type];
 	memset(client_ops, 0, sizeof(*client_ops));
 	pr_info("deregistered bind ops for %s\n",
 		params->name);
-	mutex_unlock(&synx_dev->table_lock);
+	mutex_unlock(&synx_dev->vtbl_lock);
 
 	return 0;
 }
@@ -1475,6 +1518,7 @@ static int __init synx_init(void)
 		return -ENOMEM;
 
 	mutex_init(&synx_dev->table_lock);
+	mutex_init(&synx_dev->vtbl_lock);
 
 	for (idx = 0; idx < SYNX_MAX_OBJS; idx++)
 		spin_lock_init(&synx_dev->row_spinlocks[idx]);
diff --git a/drivers/media/platform/msm/synx/synx_api.h b/drivers/media/platform/msm/synx/synx_api.h
index 978a561..a892558 100644
--- a/drivers/media/platform/msm/synx/synx_api.h
+++ b/drivers/media/platform/msm/synx/synx_api.h
@@ -44,6 +44,16 @@ struct synx_register_params {
 	u32 type;
 };
 
+/**
+ * struct synx_initialization_params - Session params (optional)
+ *
+ * @name : Client session name
+ *         Only first 64 bytes are accepted, rest will be ignored
+ */
+struct synx_initialization_params {
+	const char *name;
+};
+
 /* Kernel APIs */
 
 /* @brief: Register operations for external synchronization
@@ -71,6 +81,25 @@ int synx_register_ops(const struct synx_register_params *params);
 int synx_deregister_ops(const struct synx_register_params *params);
 
 /**
+ * @brief: Initializes a new client session
+ *
+ * @param params : Pointer to session init params
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if params is not valid.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * new client
+ */
+int synx_initialize(struct synx_initialization_params *params);
+
+/**
+ * @brief: Destroys the client session
+ *
+ * @return Status of operation. Zero in case of success.
+ */
+int synx_uninitialize(void);
+
+/**
  * @brief: Creates a synx object
  *
  *  The newly created synx obj is assigned to synx_obj.
diff --git a/drivers/media/platform/msm/synx/synx_private.h b/drivers/media/platform/msm/synx/synx_private.h
index 5a7889d0..d1ece71 100644
--- a/drivers/media/platform/msm/synx/synx_private.h
+++ b/drivers/media/platform/msm/synx/synx_private.h
@@ -198,6 +198,7 @@ struct synx_import_data {
  * synx_ids       : Global unique ids
  * idr_lock       : Spin lock for id allocation
  * dma_context    : dma context id
+ * vtbl_lock      : Mutex used to lock the bind table
  * bind_vtbl      : Table with registered bind ops for external sync (bind)
  * client_list    : All the synx clients
  * debugfs_root   : Root directory for debugfs
@@ -218,6 +219,7 @@ struct synx_device {
 	struct idr synx_ids;
 	spinlock_t idr_lock;
 	u64 dma_context;
+	struct mutex vtbl_lock;
 	struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
 	struct list_head client_list;
 	struct dentry *debugfs_root;
@@ -231,7 +233,6 @@ struct synx_device {
  * specific details
  *
  * @device      : Pointer to synx device structure
- * @pid         : Process id
  * @eventq_lock : Spinlock for the event queue
  * @wq          : Queue for the polling process
  * @eventq      : All the user callback payloads
@@ -239,7 +240,6 @@ struct synx_device {
  */
 struct synx_client {
 	struct synx_device *device;
-	int pid;
 	spinlock_t eventq_lock;
 	wait_queue_head_t wq;
 	struct list_head eventq;
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index 09de7f0..22d95b4 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -584,17 +584,6 @@ s32 synx_create_handle(void *pObj)
 	return id;
 }
 
-struct synx_client *get_current_client(void)
-{
-	struct synx_client *client = NULL;
-
-	list_for_each_entry(client, &synx_dev->client_list, list) {
-		if (current->tgid == client->pid)
-			break;
-	}
-	return client;
-}
-
 int synx_generate_secure_key(struct synx_table_row *row)
 {
 	if (!row)
@@ -750,14 +739,14 @@ struct bind_operations *synx_get_bind_ops(u32 type)
 	if (!is_valid_type(type))
 		return NULL;
 
-	mutex_lock(&synx_dev->table_lock);
+	mutex_lock(&synx_dev->vtbl_lock);
 	client_ops = &synx_dev->bind_vtbl[type];
 	if (!client_ops->valid) {
-		mutex_unlock(&synx_dev->table_lock);
+		mutex_unlock(&synx_dev->vtbl_lock);
 		return NULL;
 	}
 	pr_debug("found bind ops for %s\n", client_ops->name);
-	mutex_unlock(&synx_dev->table_lock);
+	mutex_unlock(&synx_dev->vtbl_lock);
 
 	return &client_ops->ops;
 }
diff --git a/drivers/media/platform/msm/synx/synx_util.h b/drivers/media/platform/msm/synx/synx_util.h
index 20982c7..97859ad 100644
--- a/drivers/media/platform/msm/synx/synx_util.h
+++ b/drivers/media/platform/msm/synx/synx_util.h
@@ -160,13 +160,6 @@ u32 synx_status(struct synx_table_row *row);
 u32 synx_status_locked(struct synx_table_row *row);
 
 /**
- * @brief: Function to return the current client (active process)
- *
- * @return The current client
- */
-struct synx_client *get_current_client(void);
-
-/**
  * @brief: Function to look up a synx handle
  *         It also verifies the authenticity of the request through
  *         the key provided.
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index bbb24fb..3deb054 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -118,7 +118,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
 	mutex_init(&mdp->vpulock);
 
 	/* Old dts had the components as child nodes */
-	if (of_get_next_child(dev->of_node, NULL)) {
+	node = of_get_next_child(dev->of_node, NULL);
+	if (node) {
+		of_node_put(node);
 		parent = dev->of_node;
 		dev_warn(dev, "device tree is out of date\n");
 	} else {
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5d..11ec048 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
 	 */
 
 	pixsize = vout->bpp * vout->vrfb_bpp;
-	dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
-		  (vout->pix.width * vout->bpp)) + 1;
+	dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
 
 	xt->src_start = vout->buf_phy_addr[vb->i];
 	xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 432bc7fb..addd03b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -722,6 +722,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
 					s_stream, mode);
 			pipe->do_propagation = true;
 		}
+
+		/* Stop at the first external sub-device. */
+		if (subdev->dev != isp->dev)
+			break;
 	}
 
 	return 0;
@@ -836,6 +840,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
 						      &subdev->entity);
 			failure = -ETIMEDOUT;
 		}
+
+		/* Stop at the first external sub-device. */
+		if (subdev->dev != isp->dev)
+			break;
 	}
 
 	return failure;
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 77b73e27..412438d 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -2605,6 +2605,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
 	int ret;
 
 	/* Register the subdev and video node. */
+	ccdc->subdev.dev = vdev->mdev->dev;
 	ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
 	if (ret < 0)
 		goto error;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index e062939..47b0d3f 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1034,6 +1034,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
 	int ret;
 
 	/* Register the subdev and video nodes. */
+	ccp2->subdev.dev = vdev->mdev->dev;
 	ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
 	if (ret < 0)
 		goto error;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index a4d3d03..e45292a 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1201,6 +1201,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
 	int ret;
 
 	/* Register the subdev and video nodes. */
+	csi2->subdev.dev = vdev->mdev->dev;
 	ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
 	if (ret < 0)
 		goto error;
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 3195f7c..591c6de 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
 	int ret;
 
 	/* Register the subdev and video nodes. */
+	prev->subdev.dev = vdev->mdev->dev;
 	ret = v4l2_device_register_subdev(vdev, &prev->subdev);
 	if (ret < 0)
 		goto error;
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 0b6a875..2035e3c 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
 	int ret;
 
 	/* Register the subdev and video nodes. */
+	res->subdev.dev = vdev->mdev->dev;
 	ret = v4l2_device_register_subdev(vdev, &res->subdev);
 	if (ret < 0)
 		goto error;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 47353fe..bfa2d05 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1029,6 +1029,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
 int omap3isp_stat_register_entities(struct ispstat *stat,
 				    struct v4l2_device *vdev)
 {
+	stat->subdev.dev = vdev->mdev->dev;
+
 	return v4l2_device_register_subdev(vdev, &stat->subdev);
 }
 
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 0d14670..5a30f1d 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
 		fdp1->fcp = rcar_fcp_get(fcp_node);
 		of_node_put(fcp_node);
 		if (IS_ERR(fdp1->fcp)) {
-			dev_err(&pdev->dev, "FCP not found (%ld)\n",
+			dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
 				PTR_ERR(fdp1->fcp));
 			return PTR_ERR(fdp1->fcp);
 		}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d386822..1d9c028 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1681,7 +1681,7 @@ static int dcmi_probe(struct platform_device *pdev)
 	if (irq <= 0) {
 		if (irq != -EPROBE_DEFER)
 			dev_err(&pdev->dev, "Could not get irq\n");
-		return irq;
+		return irq ? irq : -ENXIO;
 	}
 
 	dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 462099a..7b8cf66 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -3,7 +3,8 @@
  *
  * This is a virtual device driver for testing mem-to-mem videobuf framework.
  * It simulates a device that uses memory buffers for both source and
- * destination, processes the data and issues an "irq" (simulated by a timer).
+ * destination, processes the data and issues an "irq" (simulated by a delayed
+ * workqueue).
  * The device is capable of multi-instance, multi-buffer-per-transaction
  * operation (via the mem2mem framework).
  *
@@ -19,7 +20,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 
@@ -148,7 +148,7 @@ struct vim2m_dev {
 	struct mutex		dev_mutex;
 	spinlock_t		irqlock;
 
-	struct timer_list	timer;
+	struct delayed_work	work_run;
 
 	struct v4l2_m2m_dev	*m2m_dev;
 };
@@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
 	return 0;
 }
 
-static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
-{
-	dprintk(dev, "Scheduling a simulated irq\n");
-	mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
-}
-
 /*
  * mem2mem callbacks
  */
@@ -387,13 +381,14 @@ static void device_run(void *priv)
 
 	device_process(ctx, src_buf, dst_buf);
 
-	/* Run a timer, which simulates a hardware irq  */
-	schedule_irq(dev, ctx->transtime);
+	/* Run delayed work, which simulates a hardware irq  */
+	schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
 }
 
-static void device_isr(struct timer_list *t)
+static void device_work(struct work_struct *w)
 {
-	struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
+	struct vim2m_dev *vim2m_dev =
+		container_of(w, struct vim2m_dev, work_run.work);
 	struct vim2m_ctx *curr_ctx;
 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
 	unsigned long flags;
@@ -802,9 +797,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
 static void vim2m_stop_streaming(struct vb2_queue *q)
 {
 	struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+	struct vim2m_dev *dev = ctx->dev;
 	struct vb2_v4l2_buffer *vbuf;
 	unsigned long flags;
 
+	if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+		cancel_delayed_work_sync(&dev->work_run);
+
 	for (;;) {
 		if (V4L2_TYPE_IS_OUTPUT(q->type))
 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
@@ -1015,6 +1014,7 @@ static int vim2m_probe(struct platform_device *pdev)
 	vfd = &dev->vfd;
 	vfd->lock = &dev->dev_mutex;
 	vfd->v4l2_dev = &dev->v4l2_dev;
+	INIT_DELAYED_WORK(&dev->work_run, device_work);
 
 	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
 	if (ret) {
@@ -1026,7 +1026,6 @@ static int vim2m_probe(struct platform_device *pdev)
 	v4l2_info(&dev->v4l2_dev,
 			"Device registered as /dev/video%d\n", vfd->num);
 
-	timer_setup(&dev->timer, device_isr, 0);
 	platform_set_drvdata(pdev, dev);
 
 	dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1083,7 +1082,6 @@ static int vim2m_remove(struct platform_device *pdev)
 	media_device_cleanup(&dev->mdev);
 #endif
 	v4l2_m2m_release(dev->m2m_dev);
-	del_timer_sync(&dev->timer);
 	video_unregister_device(&dev->vfd);
 	v4l2_device_unregister(&dev->v4l2_dev);
 
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 3b09ffce..2e273f4 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -1724,7 +1724,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
 		return -E2BIG;
 	}
 	phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
-	ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
+	ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
 	if (ret)
 		return ret;
 
@@ -1740,7 +1740,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
 
 	for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
 		cec_s_phys_addr(dev->cec_tx_adap[i],
-				cec_phys_addr_for_input(phys_addr, i + 1),
+				v4l2_phys_addr_for_input(phys_addr, i + 1),
 				false);
 	return 0;
 }
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 2079861..e108e9b 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -863,7 +863,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
 	if (edid->blocks > dev->edid_blocks - edid->start_block)
 		edid->blocks = dev->edid_blocks - edid->start_block;
 	if (adap)
-		cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
+		v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
 	memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
 	return 0;
 }
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 26289ad..a5634ca 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
 
 	/* Get a default body for our list. */
 	dl->body0 = vsp1_dl_body_get(dlm->pool);
-	if (!dl->body0)
+	if (!dl->body0) {
+		kfree(dl);
 		return NULL;
+	}
 
 	header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
 
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 313a95f..19e381d 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -743,7 +743,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 	/* start radio */
 	retval = si470x_start_usb(radio);
 	if (retval < 0)
-		goto err_all;
+		goto err_buf;
 
 	/* set initial frequency */
 	si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
@@ -758,6 +758,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 
 	return 0;
 err_all:
+	usb_kill_urb(radio->int_in_urb);
+err_buf:
 	kfree(radio->buffer);
 err_ctrl:
 	v4l2_ctrl_handler_free(&radio->hdl);
@@ -831,6 +833,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
 	mutex_lock(&radio->lock);
 	v4l2_device_disconnect(&radio->v4l2_dev);
 	video_unregister_device(&radio->videodev);
+	usb_kill_urb(radio->int_in_urb);
 	usb_set_intfdata(intf, NULL);
 	mutex_unlock(&radio->lock);
 	v4l2_device_put(&radio->v4l2_dev);
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 7daac8b..6f3030b 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -424,6 +424,10 @@ static int iguanair_probe(struct usb_interface *intf,
 	int ret, pipein, pipeout;
 	struct usb_host_interface *idesc;
 
+	idesc = intf->altsetting;
+	if (idesc->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	ir = kzalloc(sizeof(*ir), GFP_KERNEL);
 	rc = rc_allocate_device(RC_DRIVER_IR_RAW);
 	if (!ir || !rc) {
@@ -438,18 +442,13 @@ static int iguanair_probe(struct usb_interface *intf,
 	ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
 	ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
 
-	if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
+	if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
+	    !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
+	    !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	idesc = intf->altsetting;
-
-	if (idesc->desc.bNumEndpoints < 2) {
-		ret = -ENODEV;
-		goto out;
-	}
-
 	ir->rc = rc;
 	ir->dev = &intf->dev;
 	ir->udev = udev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 1041c056..f23a220 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1835,12 +1835,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
 		break;
 	/* iMON VFD, MCE IR */
 	case 0x46:
-	case 0x7e:
 	case 0x9e:
 		dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
 		detected_display_type = IMON_DISPLAY_TYPE_VFD;
 		allowed_protos = RC_PROTO_BIT_RC6_MCE;
 		break;
+	/* iMON VFD, iMON or MCE IR */
+	case 0x7e:
+		dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
+		detected_display_type = IMON_DISPLAY_TYPE_VFD;
+		allowed_protos |= RC_PROTO_BIT_RC6_MCE;
+		break;
 	/* iMON LCD, MCE IR */
 	case 0x9f:
 		dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4c0c800..f1dfb84 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -42,21 +42,22 @@
 #include <linux/pm_wakeup.h>
 #include <media/rc-core.h>
 
-#define DRIVER_VERSION	"1.94"
+#define DRIVER_VERSION	"1.95"
 #define DRIVER_AUTHOR	"Jarod Wilson <jarod@redhat.com>"
 #define DRIVER_DESC	"Windows Media Center Ed. eHome Infrared Transceiver " \
 			"device driver"
 #define DRIVER_NAME	"mceusb"
 
+#define USB_TX_TIMEOUT		1000 /* in milliseconds */
 #define USB_CTRL_MSG_SZ		2  /* Size of usb ctrl msg on gen1 hw */
 #define MCE_G1_INIT_MSGS	40 /* Init messages on gen1 hw to throw out */
 
 /* MCE constants */
-#define MCE_CMDBUF_SIZE		384  /* MCE Command buffer length */
+#define MCE_IRBUF_SIZE		128  /* TX IR buffer length */
 #define MCE_TIME_UNIT		50   /* Approx 50us resolution */
-#define MCE_CODE_LENGTH		5    /* Normal length of packet (with header) */
-#define MCE_PACKET_SIZE		4    /* Normal length of packet (without header) */
-#define MCE_IRDATA_HEADER	0x84 /* Actual header format is 0x80 + num_bytes */
+#define MCE_PACKET_SIZE		31   /* Max length of packet (with header) */
+#define MCE_IRDATA_HEADER	(0x80 + MCE_PACKET_SIZE - 1)
+				     /* Actual format is 0x80 + num_bytes */
 #define MCE_IRDATA_TRAILER	0x80 /* End of IR data */
 #define MCE_MAX_CHANNELS	2    /* Two transmitters, hardware dependent? */
 #define MCE_DEFAULT_TX_MASK	0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
@@ -609,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
 	if (len <= skip)
 		return;
 
-	dev_dbg(dev, "%cx data: %*ph (length=%d)",
-		(out ? 't' : 'r'),
-		min(len, buf_len - offset), buf + offset, len);
+	dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
+		(out ? 't' : 'r'), offset,
+		min(len, buf_len - offset), buf + offset, len, buf_len);
 
 	inout = out ? "Request" : "Got";
 
@@ -733,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
 		case MCE_RSP_CMD_ILLEGAL:
 			dev_dbg(dev, "Illegal PORT_IR command");
 			break;
+		case MCE_RSP_TX_TIMEOUT:
+			dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
+			break;
 		default:
 			dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
 				 cmd, subcmd);
@@ -747,13 +751,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
 		dev_dbg(dev, "End of raw IR data");
 	else if ((cmd != MCE_CMD_PORT_IR) &&
 		 ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
-		dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
+		dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+			cmd & MCE_PACKET_LENGTH_MASK);
 #endif
 }
 
 /*
  * Schedule work that can't be done in interrupt handlers
- * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
+ * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
  * Invokes mceusb_deferred_kevent() for recovering from
  * error events specified by the kevent bit field.
  */
@@ -766,23 +771,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
 		dev_dbg(ir->dev, "kevent %d scheduled", kevent);
 }
 
-static void mce_async_callback(struct urb *urb)
+static void mce_write_callback(struct urb *urb)
 {
-	struct mceusb_dev *ir;
-	int len;
-
 	if (!urb)
 		return;
 
-	ir = urb->context;
+	complete(urb->context);
+}
+
+/*
+ * Write (TX/send) data to MCE device USB endpoint out.
+ * Used for IR blaster TX and MCE device commands.
+ *
+ * Return: The number of bytes written (> 0) or errno (< 0).
+ */
+static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
+{
+	int ret;
+	struct urb *urb;
+	struct device *dev = ir->dev;
+	unsigned char *buf_out;
+	struct completion tx_done;
+	unsigned long expire;
+	unsigned long ret_wait;
+
+	mceusb_dev_printdata(ir, data, size, 0, size, true);
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (unlikely(!urb)) {
+		dev_err(dev, "Error: mce write couldn't allocate urb");
+		return -ENOMEM;
+	}
+
+	buf_out = kmalloc(size, GFP_KERNEL);
+	if (!buf_out) {
+		usb_free_urb(urb);
+		return -ENOMEM;
+	}
+
+	init_completion(&tx_done);
+
+	/* outbound data */
+	if (usb_endpoint_xfer_int(ir->usb_ep_out))
+		usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
+				 buf_out, size, mce_write_callback, &tx_done,
+				 ir->usb_ep_out->bInterval);
+	else
+		usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
+				  buf_out, size, mce_write_callback, &tx_done);
+	memcpy(buf_out, data, size);
+
+	ret = usb_submit_urb(urb, GFP_KERNEL);
+	if (ret) {
+		dev_err(dev, "Error: mce write submit urb error = %d", ret);
+		kfree(buf_out);
+		usb_free_urb(urb);
+		return ret;
+	}
+
+	expire = msecs_to_jiffies(USB_TX_TIMEOUT);
+	ret_wait = wait_for_completion_timeout(&tx_done, expire);
+	if (!ret_wait) {
+		dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
+			expire, USB_TX_TIMEOUT);
+		usb_kill_urb(urb);
+		ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
+	} else {
+		ret = urb->status;
+	}
+	if (ret >= 0)
+		ret = urb->actual_length;	/* bytes written */
 
 	switch (urb->status) {
 	/* success */
 	case 0:
-		len = urb->actual_length;
-
-		mceusb_dev_printdata(ir, urb->transfer_buffer, len,
-				     0, len, true);
 		break;
 
 	case -ECONNRESET:
@@ -792,140 +854,135 @@ static void mce_async_callback(struct urb *urb)
 		break;
 
 	case -EPIPE:
-		dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
+		dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
 			urb->status);
 		mceusb_defer_kevent(ir, EVENT_TX_HALT);
 		break;
 
 	default:
-		dev_err(ir->dev, "Error: request urb status = %d", urb->status);
+		dev_err(ir->dev, "Error: mce write urb status = %d",
+			urb->status);
 		break;
 	}
 
-	/* the transfer buffer and urb were allocated in mce_request_packet */
-	kfree(urb->transfer_buffer);
+	dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
+		ret, ret_wait, expire, USB_TX_TIMEOUT,
+		urb->actual_length, urb->status);
+
+	kfree(buf_out);
 	usb_free_urb(urb);
+
+	return ret;
 }
 
-/* request outgoing (send) usb packet - used to initialize remote */
-static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
-								int size)
-{
-	int res;
-	struct urb *async_urb;
-	struct device *dev = ir->dev;
-	unsigned char *async_buf;
-
-	async_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (unlikely(!async_urb)) {
-		dev_err(dev, "Error, couldn't allocate urb!");
-		return;
-	}
-
-	async_buf = kmalloc(size, GFP_KERNEL);
-	if (!async_buf) {
-		usb_free_urb(async_urb);
-		return;
-	}
-
-	/* outbound data */
-	if (usb_endpoint_xfer_int(ir->usb_ep_out))
-		usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
-				 async_buf, size, mce_async_callback, ir,
-				 ir->usb_ep_out->bInterval);
-	else
-		usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
-				  async_buf, size, mce_async_callback, ir);
-
-	memcpy(async_buf, data, size);
-
-	dev_dbg(dev, "send request called (size=%#x)", size);
-
-	res = usb_submit_urb(async_urb, GFP_ATOMIC);
-	if (res) {
-		dev_err(dev, "send request FAILED! (res=%d)", res);
-		kfree(async_buf);
-		usb_free_urb(async_urb);
-		return;
-	}
-	dev_dbg(dev, "send request complete (res=%d)", res);
-}
-
-static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
 {
 	int rsize = sizeof(DEVICE_RESUME);
 
 	if (ir->need_reset) {
 		ir->need_reset = false;
-		mce_request_packet(ir, DEVICE_RESUME, rsize);
+		mce_write(ir, DEVICE_RESUME, rsize);
 		msleep(10);
 	}
 
-	mce_request_packet(ir, data, size);
+	mce_write(ir, data, size);
 	msleep(10);
 }
 
-/* Send data out the IR blaster port(s) */
+/*
+ * Transmit IR out the MCE device IR blaster port(s).
+ *
+ * Convert IR pulse/space sequence from LIRC to MCE format.
+ * Break up a long IR sequence into multiple parts (MCE IR data packets).
+ *
+ * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
+ * Pulses and spaces are implicit by their position.
+ * The first IR sample, txbuf[0], is always a pulse.
+ *
+ * u8 irbuf[] consists of multiple IR data packets for the MCE device.
+ * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
+ * An IR sample is 1-bit pulse/space flag with 7-bit time
+ * in MCE time units (50usec).
+ *
+ * Return: The number of IR samples sent (> 0) or errno (< 0).
+ */
 static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
 {
 	struct mceusb_dev *ir = dev->priv;
-	int i, length, ret = 0;
-	int cmdcount = 0;
-	unsigned char cmdbuf[MCE_CMDBUF_SIZE];
-
-	/* MCE tx init header */
-	cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
-	cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
-	cmdbuf[cmdcount++] = ir->tx_mask;
+	u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
+	u8 irbuf[MCE_IRBUF_SIZE];
+	int ircount = 0;
+	unsigned int irsample;
+	int i, length, ret;
 
 	/* Send the set TX ports command */
-	mce_async_out(ir, cmdbuf, cmdcount);
-	cmdcount = 0;
+	cmdbuf[2] = ir->tx_mask;
+	mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 
-	/* Generate mce packet data */
-	for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
-		txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
+	/* Generate mce IR data packet */
+	for (i = 0; i < count; i++) {
+		irsample = txbuf[i] / MCE_TIME_UNIT;
 
-		do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
-
-			/* Insert mce packet header every 4th entry */
-			if ((cmdcount < MCE_CMDBUF_SIZE) &&
-			    (cmdcount % MCE_CODE_LENGTH) == 0)
-				cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
-
-			/* Insert mce packet data */
-			if (cmdcount < MCE_CMDBUF_SIZE)
-				cmdbuf[cmdcount++] =
-					(txbuf[i] < MCE_PULSE_BIT ?
-					 txbuf[i] : MCE_MAX_PULSE_LENGTH) |
-					 (i & 1 ? 0x00 : MCE_PULSE_BIT);
-			else {
-				ret = -EINVAL;
-				goto out;
+		/* loop to support long pulses/spaces > 6350us (127*50us) */
+		while (irsample > 0) {
+			/* Insert IR header every 30th entry */
+			if (ircount % MCE_PACKET_SIZE == 0) {
+				/* Room for IR header and one IR sample? */
+				if (ircount >= MCE_IRBUF_SIZE - 1) {
+					/* Send near full buffer */
+					ret = mce_write(ir, irbuf, ircount);
+					if (ret < 0)
+						return ret;
+					ircount = 0;
+				}
+				irbuf[ircount++] = MCE_IRDATA_HEADER;
 			}
 
-		} while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
-			 (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
-	}
+			/* Insert IR sample */
+			if (irsample <= MCE_MAX_PULSE_LENGTH) {
+				irbuf[ircount] = irsample;
+				irsample = 0;
+			} else {
+				irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
+				irsample -= MCE_MAX_PULSE_LENGTH;
+			}
+			/*
+			 * Even i = IR pulse
+			 * Odd  i = IR space
+			 */
+			irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
+			ircount++;
 
-	/* Check if we have room for the empty packet at the end */
-	if (cmdcount >= MCE_CMDBUF_SIZE) {
-		ret = -EINVAL;
-		goto out;
-	}
+			/* IR buffer full? */
+			if (ircount >= MCE_IRBUF_SIZE) {
+				/* Fix packet length in last header */
+				length = ircount % MCE_PACKET_SIZE;
+				if (length > 0)
+					irbuf[ircount - length] -=
+						MCE_PACKET_SIZE - length;
+				/* Send full buffer */
+				ret = mce_write(ir, irbuf, ircount);
+				if (ret < 0)
+					return ret;
+				ircount = 0;
+			}
+		}
+	} /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
 
 	/* Fix packet length in last header */
-	length = cmdcount % MCE_CODE_LENGTH;
-	cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
+	length = ircount % MCE_PACKET_SIZE;
+	if (length > 0)
+		irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
 
-	/* All mce commands end with an empty packet (0x80) */
-	cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
+	/* Append IR trailer (0x80) to final partial (or empty) IR buffer */
+	irbuf[ircount++] = MCE_IRDATA_TRAILER;
 
-	/* Transmit the command to the mce device */
-	mce_async_out(ir, cmdbuf, cmdcount);
+	/* Send final buffer */
+	ret = mce_write(ir, irbuf, ircount);
+	if (ret < 0)
+		return ret;
 
-out:
-	return ret ? ret : count;
+	return count;
 }
 
 /* Sets active IR outputs -- mce devices typically have two */
@@ -965,7 +1022,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
 			cmdbuf[2] = MCE_CMD_SIG_END;
 			cmdbuf[3] = MCE_IRDATA_TRAILER;
 			dev_dbg(ir->dev, "disabling carrier modulation");
-			mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+			mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 			return 0;
 		}
 
@@ -979,7 +1036,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
 								carrier);
 
 				/* Transmit new carrier to mce device */
-				mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+				mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 				return 0;
 			}
 		}
@@ -1002,10 +1059,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
 	cmdbuf[2] = units >> 8;
 	cmdbuf[3] = units;
 
-	mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+	mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 
 	/* get receiver timeout value */
-	mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+	mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
 
 	return 0;
 }
@@ -1030,7 +1087,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
 		ir->wideband_rx_enabled = false;
 		cmdbuf[2] = 1;	/* port 1 is long range receiver */
 	}
-	mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+	mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 	/* response from device sets ir->learning_active */
 
 	return 0;
@@ -1053,7 +1110,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
 		ir->carrier_report_enabled = true;
 		if (!ir->learning_active) {
 			cmdbuf[2] = 2;	/* port 2 is short range receiver */
-			mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+			mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 		}
 	} else {
 		ir->carrier_report_enabled = false;
@@ -1064,7 +1121,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
 		 */
 		if (ir->learning_active && !ir->wideband_rx_enabled) {
 			cmdbuf[2] = 1;	/* port 1 is long range receiver */
-			mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+			mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 		}
 	}
 
@@ -1143,6 +1200,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
 		}
 		break;
 	case MCE_RSP_CMD_ILLEGAL:
+	case MCE_RSP_TX_TIMEOUT:
 		ir->need_reset = true;
 		break;
 	default:
@@ -1280,7 +1338,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
 {
 	/* If we get no reply or an illegal command reply, its ver 1, says MS */
 	ir->emver = 1;
-	mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
+	mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
 }
 
 static void mceusb_gen1_init(struct mceusb_dev *ir)
@@ -1326,10 +1384,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
 	dev_dbg(dev, "set handshake  - retC = %d", ret);
 
 	/* device resume */
-	mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+	mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
 
 	/* get hw/sw revision? */
-	mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
+	mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
 
 	kfree(data);
 }
@@ -1337,13 +1395,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
 static void mceusb_gen2_init(struct mceusb_dev *ir)
 {
 	/* device resume */
-	mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+	mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
 
 	/* get wake version (protocol, key, address) */
-	mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+	mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
 
 	/* unknown what this one actually returns... */
-	mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
+	mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
 }
 
 static void mceusb_get_parameters(struct mceusb_dev *ir)
@@ -1357,24 +1415,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
 	ir->num_rxports = 2;
 
 	/* get number of tx and rx ports */
-	mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+	mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
 
 	/* get the carrier and frequency */
-	mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
+	mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
 
 	if (ir->num_txports && !ir->flags.no_tx)
 		/* get the transmitter bitmask */
-		mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
+		mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
 
 	/* get receiver timeout value */
-	mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+	mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
 
 	/* get receiver sensor setting */
-	mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+	mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
 
 	for (i = 0; i < ir->num_txports; i++) {
 		cmdbuf[2] = i;
-		mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+		mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 	}
 }
 
@@ -1383,7 +1441,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
 	if (ir->emver < 2)
 		return;
 
-	mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
+	mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
 }
 
 /*
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index e42efd9..d37b85d 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -44,6 +44,11 @@
 /* Fields containing pulse width data */
 #define MTK_WIDTH_MASK		  (GENMASK(7, 0))
 
+/* IR threshold */
+#define MTK_IRTHD		 0x14
+#define MTK_DG_CNT_MASK		 (GENMASK(12, 8))
+#define MTK_DG_CNT(x)		 ((x) << 8)
+
 /* Bit to enable interrupt */
 #define MTK_IRINT_EN		  BIT(0)
 
@@ -409,6 +414,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
 	mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
 		     ir->data->fields[MTK_HW_PERIOD].reg);
 
+	/* Set de-glitch counter */
+	mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
+
 	/* Enable IR and PWM */
 	val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
 	val |= MTK_OK_COUNT(ir->data->ok_count) |  MTK_PWM_EN | MTK_IR_EN;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index f5b0459..4c191fc 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -685,6 +685,10 @@ static int submit_urbs(struct camera_data *cam)
 		if (!urb) {
 			for (j = 0; j < i; j++)
 				usb_free_urb(cam->sbuf[j].urb);
+			for (j = 0; j < NUM_SBUF; j++) {
+				kfree(cam->sbuf[j].data);
+				cam->sbuf[j].data = NULL;
+			}
 			return -ENOMEM;
 		}
 
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 091389f..c8d7950 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -2442,9 +2442,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
 		8, 0x0486,
 	};
 
+	if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+		return -ENODEV;
 	if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
 		return -ENODEV;
 	i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+	if (!i2c)
+		return -ENODEV;
 	if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
 		return -ENODEV;
 	dib0700_set_i2c_speed(adap->dev, 1500);
@@ -2520,10 +2524,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
 		0, 0x00ef,
 		8, 0x0406,
 	};
+	if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+		return -ENODEV;
 	i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
 	if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
 		return -ENODEV;
 	i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+	if (!i2c)
+		return -ENODEV;
 	if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
 		return -ENODEV;
 
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 0af7438..ae793da 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -913,14 +913,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
 						&a->dev->i2c_adap);
 	if (!a->fe_adap[0].fe)
 		return -ENODEV;
-
-	/*
-	 * dvb_frontend will call dvb_detach for both stb0899_detach
-	 * and stb0899_release but we only do dvb_attach(stb0899_attach).
-	 * Increment the module refcount instead.
-	 */
-	symbol_get(stb0899_attach);
-
 	if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
 					&a->dev->i2c_adap)) == NULL)
 		err("Cannot attach lnbp22\n");
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 18d0f8f..8d8e9f5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -607,10 +607,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
 {
 	struct technisat_usb2_state *state = d->priv;
-	u8 *buf = state->buf;
-	u8 *b;
-	int ret;
 	struct ir_raw_event ev;
+	u8 *buf = state->buf;
+	int i, ret;
 
 	buf[0] = GET_IR_DATA_VENDOR_REQUEST;
 	buf[1] = 0x08;
@@ -646,26 +645,25 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
 		return 0; /* no key pressed */
 
 	/* decoding */
-	b = buf+1;
 
 #if 0
 	deb_rc("RC: %d ", ret);
-	debug_dump(b, ret, deb_rc);
+	debug_dump(buf + 1, ret, deb_rc);
 #endif
 
 	ev.pulse = 0;
-	while (1) {
-		ev.pulse = !ev.pulse;
-		ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
-		ir_raw_event_store(d->rc_dev, &ev);
-
-		b++;
-		if (*b == 0xff) {
+	for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
+		if (buf[i] == 0xff) {
 			ev.pulse = 0;
 			ev.duration = 888888*2;
 			ir_raw_event_store(d->rc_dev, &ev);
 			break;
 		}
+
+		ev.pulse = !ev.pulse;
+		ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+			       FIRMWARE_CLOCK_TICK) / 1000;
+		ir_raw_event_store(d->rc_dev, &ev);
 	}
 
 	ir_raw_event_handle(d->rc_dev);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 87b887b..3f59a98 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
 		dev->dev_next->disconnected = 1;
 		dev_info(&dev->intf->dev, "Disconnecting %s\n",
 			 dev->dev_next->name);
-		flush_request_modules(dev->dev_next);
 	}
 
 	dev->disconnected = 1;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 989ae99..89b9293 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -123,6 +123,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, 2);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index bedc04a..bde4441 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -1581,6 +1581,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 		return;
 	}
 	if (len == 1)
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 10fcbe9..cb41e61 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2083,6 +2083,11 @@ static int reg_r(struct sd *sd, u16 index)
 	} else {
 		gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
 		sd->gspca_dev.usb_err = ret;
+		/*
+		 * Make sure the result is zeroed to avoid uninitialized
+		 * values.
+		 */
+		gspca_dev->usb_buf[0] = 0;
 	}
 
 	return ret;
@@ -2111,6 +2116,11 @@ static int reg_r8(struct sd *sd,
 	} else {
 		gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
 		sd->gspca_dev.usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, 8);
 	}
 
 	return ret;
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index d06dc07..9e3326b 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -642,6 +642,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
 	if (ret < 0) {
 		pr_err("read failed %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the result is zeroed to avoid uninitialized
+		 * values.
+		 */
+		gspca_dev->usb_buf[0] = 0;
 	}
 	return gspca_dev->usb_buf[0];
 }
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index 3d1364d..4d4ae22 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -1154,6 +1154,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		return 0;
 	}
 	return gspca_dev->usb_buf[0];
 }
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 477da06..40b8771 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -111,6 +111,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
 			pr_err("read req failed req %#04x error %d\n",
 			       req, err);
 		gspca_dev->usb_err = err;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index cfa2a04..efca54e 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -133,6 +133,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
 		}
 	},
 	{
+		.ident = "MSI MS-1039",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
+		}
+	},
+	{
 		.ident = "MSI MS-1632",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
@@ -918,6 +925,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
 	if (unlikely(result < 0 || result != length)) {
 		pr_err("Read register %02x failed %d\n", reg, result);
 		gspca_dev->usb_err = result;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 5f3f297..22de65d 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -462,6 +462,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 		dev_err(gspca_dev->v4l2_dev.dev,
 			"Error reading register %02x: %d\n", value, res);
 		gspca_dev->usb_err = res;
+		/*
+		 * Make sure the result is zeroed to avoid uninitialized
+		 * values.
+		 */
+		gspca_dev->usb_buf[0] = 0;
 	}
 }
 
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index df8d848..fa108ce 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1171,6 +1171,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index d25924e..a20eb85 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -80,6 +80,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index d7cbcf2..3521f5f 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -434,6 +434,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r %04x failed %d\n", value, ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 437a336..26eae69 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -264,6 +264,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 
diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
index 52d0716..6e32264 100644
--- a/drivers/media/usb/gspca/vc032x.c
+++ b/drivers/media/usb/gspca/vc032x.c
@@ -2915,6 +2915,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
 	if (ret < 0) {
 		pr_err("reg_r err %d\n", ret);
 		gspca_dev->usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
 	}
 }
 static void reg_r(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index abfab3de..ef0a839 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -143,6 +143,11 @@ static int w9968cf_read_sb(struct sd *sd)
 	} else {
 		pr_err("Read SB reg [01] failed\n");
 		sd->gspca_dev.usb_err = ret;
+		/*
+		 * Make sure the buffer is zeroed to avoid uninitialized
+		 * values.
+		 */
+		memset(sd->gspca_dev.usb_buf, 0, 2);
 	}
 
 	udelay(W9968CF_I2C_BUS_DELAY);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 29ac7fc..3316a17 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -141,6 +141,7 @@ static int device_authorization(struct hdpvr_device *dev)
 
 	dev->fw_ver = dev->usbc_buf[1];
 
+	dev->usbc_buf[46] = '\0';
 	v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
 			  dev->fw_ver, &dev->usbc_buf[2]);
 
@@ -275,6 +276,7 @@ static int hdpvr_probe(struct usb_interface *interface,
 #endif
 	size_t buffer_size;
 	int i;
+	int dev_num;
 	int retval = -ENOMEM;
 
 	/* allocate memory for our device state and initialize it */
@@ -372,8 +374,17 @@ static int hdpvr_probe(struct usb_interface *interface,
 	}
 #endif
 
+	dev_num = atomic_inc_return(&dev_nr);
+	if (dev_num >= HDPVR_MAX) {
+		v4l2_err(&dev->v4l2_dev,
+			 "max device number reached, device register failed\n");
+		atomic_dec(&dev_nr);
+		retval = -ENODEV;
+		goto reg_fail;
+	}
+
 	retval = hdpvr_register_videodev(dev, &interface->dev,
-				    video_nr[atomic_inc_return(&dev_nr)]);
+				    video_nr[dev_num]);
 	if (retval < 0) {
 		v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
 		goto reg_fail;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 5accb52..6e3f234 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -641,8 +641,7 @@ static int v4l_stk_release(struct file *fp)
 		dev->owner = NULL;
 	}
 
-	if (is_present(dev))
-		usb_autopm_put_interface(dev->interface);
+	usb_autopm_put_interface(dev->interface);
 	mutex_unlock(&dev->lock);
 	return v4l2_fh_release(fp);
 }
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 3a4e545..3db2fd7 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -105,6 +105,7 @@ static void tm6000_urb_received(struct urb *urb)
 			printk(KERN_ERR "tm6000:  error %s\n", __func__);
 			kfree(urb->transfer_buffer);
 			usb_free_urb(urb);
+			dev->dvb->bulk_urb = NULL;
 		}
 	}
 }
@@ -135,6 +136,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
 	dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
 	if (!dvb->bulk_urb->transfer_buffer) {
 		usb_free_urb(dvb->bulk_urb);
+		dvb->bulk_urb = NULL;
 		return -ENOMEM;
 	}
 
@@ -161,6 +163,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
 
 		kfree(dvb->bulk_urb->transfer_buffer);
 		usb_free_urb(dvb->bulk_urb);
+		dvb->bulk_urb = NULL;
 		return ret;
 	}
 
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 44ca66c..f34efa7 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -329,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
 
 	dprintk("%s\n", __func__);
 
-	b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+	b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
 	if (!b)
 		return -ENOMEM;
 
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index c7c600c..a24b40d 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -15,6 +15,7 @@
 #include <media/v4l2-dv-timings.h>
 #include <linux/math64.h>
 #include <linux/hdmi.h>
+#include <media/cec.h>
 
 MODULE_AUTHOR("Hans Verkuil");
 MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
@@ -942,3 +943,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
 	return c;
 }
 EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
+
+/**
+ * v4l2_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid:	pointer to the EDID data
+ * @size:	size in bytes of the EDID data
+ * @offset:	If not %NULL then the location of the physical address
+ *		bytes in the EDID will be returned here. This is set to 0
+ *		if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+			    unsigned int *offset)
+{
+	unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+	if (offset)
+		*offset = loc;
+	if (loc == 0)
+		return CEC_PHYS_ADDR_INVALID;
+	return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
+
+/**
+ * v4l2_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid:	pointer to the EDID data
+ * @size:	size in bytes of the EDID data
+ * @phys_addr:	the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+	unsigned int loc = cec_get_edid_spa_location(edid, size);
+	u8 sum = 0;
+	unsigned int i;
+
+	if (loc == 0)
+		return;
+	edid[loc] = phys_addr >> 8;
+	edid[loc + 1] = phys_addr & 0xff;
+	loc &= ~0x7f;
+
+	/* update the checksum */
+	for (i = loc; i < loc + 127; i++)
+		sum += edid[i];
+	edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
+
+/**
+ * v4l2_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr:	the physical address of the parent
+ * @input:	the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+	/* Check if input is sane */
+	if (WARN_ON(input == 0 || input > 0xf))
+		return CEC_PHYS_ADDR_INVALID;
+
+	if (phys_addr == 0)
+		return input << 12;
+
+	if ((phys_addr & 0x0fff) == 0)
+		return phys_addr | (input << 8);
+
+	if ((phys_addr & 0x00ff) == 0)
+		return phys_addr | (input << 4);
+
+	if ((phys_addr & 0x000f) == 0)
+		return phys_addr | input;
+
+	/*
+	 * All nibbles are used so no valid physical addresses can be assigned
+	 * to the input.
+	 */
+	return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
+
+/**
+ * v4l2_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr:	the physical address to validate
+ * @parent:	if not %NULL, then this is filled with the parents PA.
+ * @port:	if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+	int i;
+
+	if (parent)
+		*parent = phys_addr;
+	if (port)
+		*port = 0;
+	if (phys_addr == CEC_PHYS_ADDR_INVALID)
+		return 0;
+	for (i = 0; i < 16; i += 4)
+		if (phys_addr & (0xf << i))
+			break;
+	if (i == 16)
+		return 0;
+	if (parent)
+		*parent = phys_addr & (0xfff0 << i);
+	if (port)
+		*port = (phys_addr >> i) & 0xf;
+	for (i += 4; i < 16; i += 4)
+		if ((phys_addr & (0xf << i)) == 0)
+			return -EINVAL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index bcdca9f..29f5021 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -949,7 +949,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
 	if (!cnt) {
 		rc = -ENODEV;
 		pci_dev_busy = 1;
-		goto err_out;
+		goto err_out_int;
 	}
 
 	jm = kzalloc(sizeof(struct jmb38x_ms)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 968ef14..d49f86e 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -510,10 +510,10 @@
 	bool "Support for Crystal Cove PMIC"
 	depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
 	depends on X86 || COMPILE_TEST
+	depends on I2C_DESIGNWARE_PLATFORM=y
 	select MFD_CORE
 	select REGMAP_I2C
 	select REGMAP_IRQ
-	select I2C_DESIGNWARE_PLATFORM
 	help
 	  Select this option to enable support for Crystal Cove PMIC
 	  on some Intel SoC systems. The PMIC provides ADC, GPIO,
@@ -539,10 +539,10 @@
 	bool "Support for Intel Cherry Trail Whiskey Cove PMIC"
 	depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK
 	depends on X86 || COMPILE_TEST
+	depends on I2C_DESIGNWARE_PLATFORM=y
 	select MFD_CORE
 	select REGMAP_I2C
 	select REGMAP_IRQ
-	select I2C_DESIGNWARE_PLATFORM
 	help
 	  Select this option to enable support for the Intel Cherry Trail
 	  Whiskey Cove PMIC found on some Intel Cherry Trail systems.
@@ -1415,9 +1415,9 @@
 config MFD_TPS68470
 	bool "TI TPS68470 Power Management / LED chips"
 	depends on ACPI && I2C=y
+	depends on I2C_DESIGNWARE_PLATFORM=y
 	select MFD_CORE
 	select REGMAP_I2C
-	select I2C_DESIGNWARE_PLATFORM
 	help
 	  If you say yes here you get support for the TPS68470 series of
 	  Power Management / LED chips.
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 0e5282f..c37c8bb8 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
 	info->mem = &pdev->resource[0];
 	info->irq = pdev->irq;
 
+	pdev->d3cold_delay = 0;
+
 	/* Probably it is enough to set this for iDMA capable devices only */
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a0998876..f2b5ac5 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -585,6 +585,17 @@
 
 	  If in doubt, say N.
 
+config WIGIG_SENSING_SPI
+	tristate "Sensing over 60GHz using wil6210 / SPI bus"
+	depends on SPI && WIL6210
+	default n
+	help
+	  This module adds support for various sensing use cases, like gesture
+	  detection, face recognition, proximity detection and more. The sensor
+	  operates over 60GHz frequency band, using low power SPI interface.
+	  The output of the driver is CIRs (Channel Impulse Response) which
+	  must be processed in user space in order to get meaningful results.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9fb4ed6..11270e4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -71,3 +71,5 @@
 obj-$(CONFIG_OKL4_USER_VIPC)    += okl4-vipc.o
 obj-$(CONFIG_OKL4_GUEST)        += okl4-panic.o
 obj-$(CONFIG_OKL4_LINK_SHBUF)    += okl4-link-shbuf.o
+obj-$(CONFIG_WIGIG_SENSING_SPI)	+= wigig_sensing.o
+
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f9..198e030 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -214,13 +214,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
 	int ret;
 
+	/* No need to enable the client if nothing is needed from it */
+	if (!cldev->bus->fw_f_fw_ver_supported &&
+	    !cldev->bus->hbm_f_os_supported)
+		return;
+
 	ret = mei_cldev_enable(cldev);
 	if (ret)
 		return;
 
-	ret = mei_fwver(cldev);
-	if (ret < 0)
-		dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+	if (cldev->bus->fw_f_fw_ver_supported) {
+		ret = mei_fwver(cldev);
+		if (ret < 0)
+			dev_err(&cldev->dev, "FW version command failed %d\n",
+				ret);
+	}
 
 	if (cldev->bus->hbm_f_os_supported) {
 		ret = mei_osver(cldev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 225373e..f85aa3f 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -139,8 +139,13 @@
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
+#define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+
 #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
 #define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
 
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 0759c3a..60c8c84 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1368,6 +1368,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 #define MEI_CFG_FW_SPS                           \
 	.quirk_probe = mei_me_fw_type_sps
 
+#define MEI_CFG_FW_VER_SUPP                     \
+	.fw_ver_supported = 1
 
 #define MEI_CFG_ICH_HFS                      \
 	.fw_status.count = 0
@@ -1405,31 +1407,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
 	MEI_CFG_ICH10_HFS,
 };
 
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
 	MEI_CFG_PCH_HFS,
 };
 
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+	MEI_CFG_PCH_HFS,
+	MEI_CFG_FW_VER_SUPP,
+};
+
 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
 	MEI_CFG_PCH_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_FW_NM,
 };
 
 /* PCH8 Lynx Point and newer devices */
 static const struct mei_cfg mei_me_pch8_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
 static const struct mei_cfg mei_me_pch8_sps_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_FW_SPS,
 };
 
 /* Cannon Lake and newer devices */
 static const struct mei_cfg mei_me_pch12_cfg = {
 	MEI_CFG_PCH8_HFS,
+	MEI_CFG_FW_VER_SUPP,
 	MEI_CFG_DMA_128,
 };
 
@@ -1441,7 +1453,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
 	[MEI_ME_UNDEF_CFG] = NULL,
 	[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
 	[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
-	[MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+	[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+	[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
 	[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
 	[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
 	[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1480,6 +1493,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
 
 	mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
 	hw->cfg = cfg;
+	dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
 	return dev;
 }
 
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index bbcc5fc..7759713 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -32,11 +32,13 @@
  * @fw_status: FW status
  * @quirk_probe: device exclusion quirk
  * @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
  */
 struct mei_cfg {
 	const struct mei_fw_status fw_status;
 	bool (*quirk_probe)(struct pci_dev *pdev);
 	size_t dma_size[DMA_DSCR_NUM];
+	u32 fw_ver_supported:1;
 };
 
 
@@ -74,7 +76,8 @@ struct mei_me_hw {
  * @MEI_ME_UNDEF_CFG:      Lower sentinel.
  * @MEI_ME_ICH_CFG:        I/O Controller Hub legacy devices.
  * @MEI_ME_ICH10_CFG:      I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG:        Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG:       Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG:       Platform Controller Hub platforms (Gen7).
  * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
@@ -89,7 +92,8 @@ enum mei_cfg_idx {
 	MEI_ME_UNDEF_CFG,
 	MEI_ME_ICH_CFG,
 	MEI_ME_ICH10_CFG,
-	MEI_ME_PCH_CFG,
+	MEI_ME_PCH6_CFG,
+	MEI_ME_PCH7_CFG,
 	MEI_ME_PCH_CPT_PBG_CFG,
 	MEI_ME_PCH8_CFG,
 	MEI_ME_PCH8_SPS_CFG,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 377397e..fc7a5e3 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -422,6 +422,8 @@ struct mei_fw_version {
  *
  * @fw_ver : FW versions
  *
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
@@ -500,6 +502,8 @@ struct mei_device {
 
 	struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
 
+	unsigned int fw_f_fw_ver_supported:1;
+
 	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
 	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a66ebce..28cdd87 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -70,13 +70,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
 
-	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -105,8 +105,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
 
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 1b55096..7c623c1 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -150,10 +150,16 @@ enum qseecom_listener_unregister_kthread_state {
 	LSNR_UNREG_KT_WAKEUP,
 };
 
+enum qseecom_unload_app_kthread_state {
+	UNLOAD_APP_KT_SLEEP = 0,
+	UNLOAD_APP_KT_WAKEUP,
+};
+
 static DEFINE_MUTEX(qsee_bw_mutex);
 static DEFINE_MUTEX(app_access_lock);
 static DEFINE_MUTEX(clk_access_lock);
 static DEFINE_MUTEX(listener_access_lock);
+static DEFINE_MUTEX(unload_app_pending_list_lock);
 
 
 struct sglist_info {
@@ -310,12 +316,23 @@ struct qseecom_control {
 	bool smcinvoke_support;
 	uint64_t qseecom_bridge_handle;
 	uint64_t ta_bridge_handle;
+	uint64_t user_contig_bridge_handle;
 
 	struct list_head  unregister_lsnr_pending_list_head;
 	wait_queue_head_t register_lsnr_pending_wq;
 	struct task_struct *unregister_lsnr_kthread_task;
 	wait_queue_head_t unregister_lsnr_kthread_wq;
 	atomic_t unregister_lsnr_kthread_state;
+
+	struct list_head  unload_app_pending_list_head;
+	struct task_struct *unload_app_kthread_task;
+	wait_queue_head_t unload_app_kthread_wq;
+	atomic_t unload_app_kthread_state;
+};
+
+struct qseecom_unload_app_pending_list {
+	struct list_head		list;
+	struct qseecom_dev_handle	*data;
 };
 
 struct qseecom_sec_buf_fd_info {
@@ -345,6 +362,7 @@ struct qseecom_client_handle {
 	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
 	bool from_smcinvoke;
 	struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
+	bool unload_pending;
 };
 
 struct qseecom_listener_handle {
@@ -423,6 +441,8 @@ static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
 						void __user *argp);
 static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
 						void __user *argp);
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id);
 
 static int get_qseecom_keymaster_status(char *str)
 {
@@ -1330,17 +1350,19 @@ static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
 				ion_fd, ret);
 		goto err_detach;
 	}
-	*sgt = new_sgt;
-	*attach = new_attach;
-	*dmabuf = new_dma_buf;
 
 	ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt);
 	if (ret) {
 		pr_err("failed to create bridge for fd %d\n", ion_fd);
-		goto err_detach;
+		goto err_unmap_attachment;
 	}
+	*sgt = new_sgt;
+	*attach = new_attach;
+	*dmabuf = new_dma_buf;
 	return ret;
 
+err_unmap_attachment:
+	dma_buf_unmap_attachment(new_attach, new_sgt, DMA_BIDIRECTIONAL);
 err_detach:
 	dma_buf_detach(new_dma_buf, new_attach);
 err_put:
@@ -1494,8 +1516,6 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
 			rcvd_lstnr.sb_size))
 		return -EFAULT;
 
-	data->listener.id = rcvd_lstnr.listener_id;
-
 	ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
 	if (ptr_svc) {
 		if (!ptr_svc->unregister_pending) {
@@ -1553,6 +1573,7 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
 	new_entry->listener_in_use = false;
 	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
 
+	data->listener.id = rcvd_lstnr.listener_id;
 	pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
 	return ret;
 }
@@ -1617,6 +1638,11 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
 	struct qseecom_registered_listener_list *ptr_svc = NULL;
 	struct qseecom_unregister_pending_list *entry = NULL;
 
+	if (data->released) {
+		pr_err("Don't unregister lsnr %d\n", data->listener.id);
+		return -EINVAL;
+	}
+
 	ptr_svc = __qseecom_find_svc(data->listener.id);
 	if (!ptr_svc) {
 		pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
@@ -2850,8 +2876,11 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
 		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
 			ret = __qseecom_process_incomplete_cmd(data, &resp);
 			if (ret) {
-				pr_err("process_incomplete_cmd failed err: %d\n",
-					ret);
+				/* TZ has created app_id, need to unload it */
+				pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+					ret, resp.result, resp.data,
+					load_img_req.img_name);
+				__qseecom_unload_app(data, resp.data);
 				ret = -EFAULT;
 				goto loadapp_err;
 			}
@@ -2952,24 +2981,66 @@ static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
 	return ret;
 }
 
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id)
+{
+	struct qseecom_unload_app_ireq req;
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	/* Populate the structure for sending scm call to load image */
+	req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+	req.app_id = app_id;
+
+	/* SCM_CALL to unload the app */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload app (id = %d) failed\n", app_id);
+		return -EFAULT;
+	}
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		pr_warn("App (%d) is unloaded\n", app_id);
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
+				app_id, ret, resp.result, resp.data);
+		else
+			pr_warn("App (%d) is unloaded\n", app_id);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("app (%d) unload_failed!!\n", app_id);
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("unload app %d get unknown resp.result %d\n",
+				app_id, resp.result);
+		ret = -EFAULT;
+		break;
+	}
+	return ret;
+}
+
 static int qseecom_unload_app(struct qseecom_dev_handle *data,
 				bool app_crash)
 {
 	unsigned long flags;
-	unsigned long flags1;
 	int ret = 0;
-	struct qseecom_command_scm_resp resp;
 	struct qseecom_registered_app_list *ptr_app = NULL;
-	bool unload = false;
 	bool found_app = false;
-	bool found_dead_app = false;
-	bool scm_called = false;
 
 	if (!data) {
 		pr_err("Invalid/uninitialized device handle\n");
 		return -EINVAL;
 	}
 
+	pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id,
+			data->client.app_name, app_crash);
+
 	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
 		pr_debug("Do not unload keymaster app from tz\n");
 		goto unload_exit;
@@ -2978,126 +3049,42 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 	__qseecom_cleanup_app(data);
 	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
 
-	if (data->client.app_id > 0) {
+	/* ignore app_id 0, it happens when close qseecom_fd if load app fail*/
+	if (!data->client.app_id)
+		goto unload_exit;
+
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+								list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			(!strcmp(ptr_app->app_name, data->client.app_name))) {
+			pr_debug("unload app %d (%s), ref_cnt %d\n",
+				ptr_app->app_id, ptr_app->app_name,
+				ptr_app->ref_cnt);
+			ptr_app->ref_cnt--;
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+							flags);
+	if (!found_app) {
+		pr_err("Cannot find app with id = %d (%s)\n",
+			data->client.app_id, data->client.app_name);
+		ret = -EINVAL;
+		goto unload_exit;
+	}
+
+	if (!ptr_app->ref_cnt) {
+		ret = __qseecom_unload_app(data, data->client.app_id);
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
-		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
-									list) {
-			if (ptr_app->app_id == data->client.app_id) {
-				if (!strcmp((void *)ptr_app->app_name,
-					(void *)data->client.app_name)) {
-					found_app = true;
-					if (ptr_app->app_blocked ||
-							ptr_app->check_block)
-						app_crash = false;
-					if (app_crash || ptr_app->ref_cnt == 1)
-						unload = true;
-					break;
-				}
-				found_dead_app = true;
-				break;
-			}
-		}
+		list_del(&ptr_app->list);
 		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags);
-		if (!found_app && !found_dead_app) {
-			pr_err("Cannot find app with id = %d (%s)\n",
-				data->client.app_id,
-				(char *)data->client.app_name);
-			ret = -EINVAL;
-			goto unload_exit;
-		}
+					flags);
+		kzfree(ptr_app);
 	}
 
-	if (found_dead_app)
-		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
-			(char *)data->client.app_name);
-
-	if (unload) {
-		struct qseecom_unload_app_ireq req;
-		/* Populate the structure for sending scm call to load image */
-		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
-		req.app_id = data->client.app_id;
-
-		/* SCM_CALL to unload the app */
-		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
-				sizeof(struct qseecom_unload_app_ireq),
-				&resp, sizeof(resp));
-		scm_called = true;
-		if (ret) {
-			pr_err("scm_call to unload app (id = %d) failed\n",
-								req.app_id);
-			ret = -EFAULT;
-			goto scm_exit;
-		} else {
-			pr_warn("App id %d now unloaded\n", req.app_id);
-		}
-		if (resp.result == QSEOS_RESULT_FAILURE) {
-			pr_err("app (%d) unload_failed!!\n",
-					data->client.app_id);
-			ret = -EFAULT;
-			goto scm_exit;
-		}
-		if (resp.result == QSEOS_RESULT_SUCCESS)
-			pr_debug("App (%d) is unloaded!!\n",
-					data->client.app_id);
-		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
-			ret = __qseecom_process_incomplete_cmd(data, &resp);
-			if (ret) {
-				pr_err("process_incomplete_cmd fail err: %d\n",
-									ret);
-				goto scm_exit;
-			}
-		}
-	}
-
-scm_exit:
-	if (scm_called) {
-		/* double check if this app_entry still exists */
-		bool doublecheck = false;
-
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
-		list_for_each_entry(ptr_app,
-			&qseecom.registered_app_list_head, list) {
-			if ((ptr_app->app_id == data->client.app_id) &&
-				(!strcmp((void *)ptr_app->app_name,
-				(void *)data->client.app_name))) {
-				doublecheck = true;
-				break;
-			}
-		}
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags1);
-		if (!doublecheck) {
-			pr_warn("app %d(%s) entry is already removed\n",
-				data->client.app_id,
-				(char *)data->client.app_name);
-			found_app = false;
-		}
-	}
 unload_exit:
-	if (found_app) {
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
-		if (app_crash) {
-			ptr_app->ref_cnt = 0;
-			pr_debug("app_crash: ref_count = 0\n");
-		} else {
-			if (ptr_app->ref_cnt == 1) {
-				ptr_app->ref_cnt = 0;
-				pr_debug("ref_count set to 0\n");
-			} else {
-				ptr_app->ref_cnt--;
-				pr_debug("Can't unload app(%d) inuse\n",
-					ptr_app->app_id);
-			}
-		}
-		if (unload) {
-			list_del(&ptr_app->list);
-			kzfree(ptr_app);
-		}
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-								flags1);
-	}
-
 	if (data->client.dmabuf)
 		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
 			data->client.attach, data->client.dmabuf);
@@ -3105,6 +3092,101 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 	return ret;
 }
 
+
+static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+
+	pr_debug("prepare to unload app(%d)(%s), pending %d\n",
+		data->client.app_id, data->client.app_name,
+		data->client.unload_pending);
+	if (data->client.unload_pending)
+		return 0;
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->data = data;
+	list_add_tail(&entry->list,
+		&qseecom.unload_app_pending_list_head);
+	data->client.unload_pending = true;
+	pr_debug("unload ta %d pending\n", data->client.app_id);
+	return 0;
+}
+
+static void __wakeup_unload_app_kthread(void)
+{
+	atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_WAKEUP);
+	wake_up_interruptible(&qseecom.unload_app_kthread_wq);
+}
+
+static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	bool found = false;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
+					list) {
+		if ((entry->data->client.app_id == app_id) &&
+			(!strcmp(entry->data->client.app_name, app_name))) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+	return found;
+}
+
+static void __qseecom_processing_pending_unload_app(void)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	while (!list_empty(&qseecom.unload_app_pending_list_head)) {
+		pos = qseecom.unload_app_pending_list_head.next;
+		entry = list_entry(pos,
+			struct qseecom_unload_app_pending_list, list);
+		if (entry && entry->data) {
+			pr_debug("process pending unload app %d (%s)\n",
+				entry->data->client.app_id,
+				entry->data->client.app_name);
+			mutex_unlock(&unload_app_pending_list_lock);
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(entry->data, true);
+			if (ret)
+				pr_err("unload app %d pending failed %d\n",
+					entry->data->client.app_id, ret);
+			mutex_unlock(&app_access_lock);
+			mutex_lock(&unload_app_pending_list_lock);
+			__qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
+			kzfree(entry->data);
+		}
+		list_del(pos);
+		kzfree(entry);
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+}
+
+static int __qseecom_unload_app_kthread_func(void *data)
+{
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			qseecom.unload_app_kthread_wq,
+			atomic_read(&qseecom.unload_app_kthread_state)
+				== UNLOAD_APP_KT_WAKEUP);
+		pr_debug("kthread to unload app is called, state %d\n",
+			atomic_read(&qseecom.unload_app_kthread_state));
+		__qseecom_processing_pending_unload_app();
+		atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_SLEEP);
+	}
+	pr_warn("kthread to unload app stopped\n");
+	return 0;
+}
+
 static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
 						unsigned long virt)
 {
@@ -3551,6 +3633,13 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
 		return -ENOENT;
 	}
 
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
+
 	if (qseecom.qsee_version < QSEE_VERSION_40) {
 		send_data_req.app_id = data->client.app_id;
 		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
@@ -4631,10 +4720,14 @@ static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
 		break;
 	case QSEOS_RESULT_INCOMPLETE:
 		ret = __qseecom_process_incomplete_cmd(data, &resp);
-		if (ret)
-			pr_err("process_incomplete_cmd FAILED\n");
-		else
+		if (ret) {
+			pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+				ret, resp.result, resp.data, appname);
+			__qseecom_unload_app(data, resp.data);
+			ret = -EFAULT;
+		} else {
 			*app_id = resp.data;
+		}
 		break;
 	case QSEOS_RESULT_FAILURE:
 		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
@@ -4823,6 +4916,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
 	uint32_t app_id = 0;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -4960,6 +5054,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
 	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
 
 	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
 	return 0;
 
 err:
@@ -4970,6 +5065,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
 	kfree(*handle);
 	*handle = NULL;
 	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
 	return ret;
 }
 EXPORT_SYMBOL(qseecom_start_app);
@@ -4984,6 +5080,7 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
 	bool found_handle = false;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -5024,7 +5121,7 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
 		kzfree(kclient);
 		*handle = NULL;
 	}
-
+	__wakeup_unload_app_kthread();
 	return ret;
 }
 EXPORT_SYMBOL(qseecom_shutdown_app);
@@ -5038,6 +5135,7 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
 	bool perf_enabled = false;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -7021,6 +7119,12 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
 			(char *)data->client.app_name);
 		return -ENOENT;
 	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
 
 	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
 						(uintptr_t)req->req_ptr);
@@ -7220,6 +7324,12 @@ static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
 			(char *)data->client.app_name);
 		return -ENOENT;
 	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
 
 	/* validate offsets */
 	for (i = 0; i < MAX_ION_FD; i++) {
@@ -7369,6 +7479,7 @@ static long qseecom_ioctl(struct file *file,
 		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
 		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
 		__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	switch (cmd) {
 	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
@@ -7609,6 +7720,7 @@ static long qseecom_ioctl(struct file *file,
 		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed load_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
 		break;
 	}
 	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
@@ -7627,6 +7739,7 @@ static long qseecom_ioctl(struct file *file,
 		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed unload_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
 		break;
 	}
 	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
@@ -8102,28 +8215,57 @@ static int qseecom_open(struct inode *inode, struct file *file)
 	return ret;
 }
 
+static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
+{
+	if (qseecom.no_clock_support)
+		return;
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE)
+				__qseecom_set_msm_bus_request(INACTIVE);
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+}
+
 static int qseecom_release(struct inode *inode, struct file *file)
 {
 	struct qseecom_dev_handle *data = file->private_data;
 	int ret = 0;
 	bool free_private_data = true;
 
+	__qseecom_release_disable_clk(data);
 	if (!data->released) {
 		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
 			data->type, data->mode, data);
 		switch (data->type) {
 		case QSEECOM_LISTENER_SERVICE:
 			pr_debug("release lsnr svc %d\n", data->listener.id);
-			free_private_data = false;
 			mutex_lock(&listener_access_lock);
 			ret = qseecom_unregister_listener(data);
+			if (!ret)
+				free_private_data = false;
 			data->listener.release_called = true;
 			mutex_unlock(&listener_access_lock);
+			__wakeup_unregister_listener_kthread();
 			break;
 		case QSEECOM_CLIENT_APP:
-			mutex_lock(&app_access_lock);
-			ret = qseecom_unload_app(data, true);
-			mutex_unlock(&app_access_lock);
+			pr_debug("release app %d (%s)\n",
+				data->client.app_id, data->client.app_name);
+			if (data->client.app_id) {
+				free_private_data = false;
+				mutex_lock(&unload_app_pending_list_lock);
+				ret = qseecom_prepare_unload_app(data);
+				mutex_unlock(&unload_app_pending_list_lock);
+				__wakeup_unload_app_kthread();
+			}
 			break;
 		case QSEECOM_SECURE_SERVICE:
 		case QSEECOM_GENERIC:
@@ -8141,24 +8283,6 @@ static int qseecom_release(struct inode *inode, struct file *file)
 		}
 	}
 
-	if (qseecom.support_bus_scaling) {
-		mutex_lock(&qsee_bw_mutex);
-		if (data->mode != INACTIVE) {
-			qseecom_unregister_bus_bandwidth_needs(data);
-			if (qseecom.cumulative_mode == INACTIVE) {
-				ret = __qseecom_set_msm_bus_request(INACTIVE);
-				if (ret)
-					pr_err("Fail to scale down bus\n");
-			}
-		}
-		mutex_unlock(&qsee_bw_mutex);
-	} else {
-		if (data->fast_load_enabled)
-			qsee_disable_clock_vote(data, CLK_SFPB);
-		if (data->perf_enabled)
-			qsee_disable_clock_vote(data, CLK_DFAB);
-	}
-
 	if (free_private_data) {
 		__qseecom_free_tzbuf(&data->sglistinfo_shm);
 		kfree(data);
@@ -9163,6 +9287,8 @@ static int qseecom_init_control(void)
 	init_waitqueue_head(&qseecom.send_resp_wq);
 	init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
 	init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
+	INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
+	init_waitqueue_head(&qseecom.unload_app_kthread_wq);
 	qseecom.send_resp_flag = 0;
 	qseecom.qseos_version = QSEOS_VERSION_14;
 	qseecom.commonlib_loaded = false;
@@ -9222,7 +9348,7 @@ static int qseecom_parse_dt(struct platform_device *pdev)
 	return 0;
 }
 
-static int qseecom_create_kthread_unregister_lsnr(void)
+static int qseecom_create_kthreads(void)
 {
 	int rc = 0;
 
@@ -9236,6 +9362,19 @@ static int qseecom_create_kthread_unregister_lsnr(void)
 	}
 	atomic_set(&qseecom.unregister_lsnr_kthread_state,
 					LSNR_UNREG_KT_SLEEP);
+
+	/*create a kthread to process pending ta unloading task */
+	qseecom.unload_app_kthread_task = kthread_run(
+			__qseecom_unload_app_kthread_func,
+			NULL, "qseecom-unload-ta");
+	if (IS_ERR(qseecom.unload_app_kthread_task)) {
+		rc = PTR_ERR(qseecom.unload_app_kthread_task);
+		pr_err("failed to create kthread to unload ta, rc = %x\n", rc);
+		kthread_stop(qseecom.unregister_lsnr_kthread_task);
+		return rc;
+	}
+	atomic_set(&qseecom.unload_app_kthread_state,
+					UNLOAD_APP_KT_SLEEP);
 	return 0;
 }
 
@@ -9247,14 +9386,13 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
 	struct device_node *ion_node, *node;
 	struct platform_device *ion_pdev = NULL;
 	struct cma *cma = NULL;
-	int ret = -1;
 	uint32_t ns_vmids[] = {VMID_HLOS};
 	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
 
 	ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
 	if (!ion_node) {
 		pr_err("Failed to get qcom,msm-ion node\n");
-		return ret;
+		return -ENODEV;
 	}
 
 	for_each_available_child_of_node(ion_node, node) {
@@ -9263,23 +9401,22 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
 		ion_pdev = of_find_device_by_node(node);
 		if (!ion_pdev) {
 			pr_err("Failed to find node for heap %d\n", heapid);
-			break;
+			return -ENODEV;
 		}
 		cma = dev_get_cma_area(&ion_pdev->dev);
-		if (cma) {
-			heap_pa = cma_get_base(cma);
-			heap_size = (size_t)cma_get_size(cma);
-		} else {
+		if (!cma) {
 			pr_err("Failed to get Heap %d info\n", heapid);
+			return -ENODEV;
 		}
-		break;
-	}
-
-	if (heap_pa)
-		ret = qtee_shmbridge_register(heap_pa,
+		heap_pa = cma_get_base(cma);
+		heap_size = (size_t)cma_get_size(cma);
+		return qtee_shmbridge_register(heap_pa,
 				heap_size, ns_vmids, ns_vm_perms, 1,
 				PERM_READ | PERM_WRITE, handle);
-	return ret;
+	}
+
+	pr_warn("Could not get heap %d info: No shmbridge created\n", heapid);
+	return 0;
 }
 
 static int qseecom_register_shmbridge(void)
@@ -9287,22 +9424,31 @@ static int qseecom_register_shmbridge(void)
 	if (qseecom_register_heap_shmbridge(ION_QSECOM_TA_HEAP_ID,
 					&qseecom.ta_bridge_handle)) {
 		pr_err("Failed to register shmbridge for ta heap\n");
-		return -ENOMEM;
+		return -EINVAL;
 	}
 
 	if (qseecom_register_heap_shmbridge(ION_QSECOM_HEAP_ID,
 					&qseecom.qseecom_bridge_handle)) {
 		pr_err("Failed to register shmbridge for qseecom heap\n");
 		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
-		return -ENOMEM;
+		return -EINVAL;
+	}
+
+	if (qseecom_register_heap_shmbridge(ION_USER_CONTIG_HEAP_ID,
+					&qseecom.user_contig_bridge_handle)) {
+		pr_err("Failed to register shmbridge for user contig heap\n");
+		qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+		return -EINVAL;
 	}
 	return 0;
 }
 
 static void qseecom_deregister_shmbridge(void)
 {
-	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
 	qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
 }
 
 static int qseecom_probe(struct platform_device *pdev)
@@ -9337,17 +9483,20 @@ static int qseecom_probe(struct platform_device *pdev)
 	if (rc)
 		goto exit_deinit_bus;
 
-	rc = qseecom_create_kthread_unregister_lsnr();
+	rc = qseecom_create_kthreads();
 	if (rc)
 		goto exit_deinit_bus;
 
 	rc = qseecom_register_shmbridge();
 	if (rc)
-		goto exit_deinit_bus;
+		goto exit_stop_kthreads;
 
 	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
 	return 0;
 
+exit_stop_kthreads:
+	kthread_stop(qseecom.unload_app_kthread_task);
+	kthread_stop(qseecom.unregister_lsnr_kthread_task);
 exit_deinit_bus:
 	qseecom_deinit_bus();
 exit_deinit_clock:
@@ -9397,6 +9546,7 @@ static int qseecom_remove(struct platform_device *pdev)
 		qseecom_unload_commonlib_image();
 
 	qseecom_deregister_shmbridge();
+	kthread_stop(qseecom.unload_app_kthread_task);
 	kthread_stop(qseecom.unregister_lsnr_kthread_task);
 	qseecom_deinit_bus();
 	qseecom_deinit_clk();
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index b3fa738..f005206 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
 
 	entry = container_of(resource, struct dbell_entry, resource);
 	if (entry->run_delayed) {
-		schedule_work(&entry->work);
+		if (!schedule_work(&entry->work))
+			vmci_resource_put(resource);
 	} else {
 		entry->notify_cb(entry->client_data);
 		vmci_resource_put(resource);
@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
 		    atomic_read(&dbell->active) == 1) {
 			if (dbell->run_delayed) {
 				vmci_resource_get(&dbell->resource);
-				schedule_work(&dbell->work);
+				if (!schedule_work(&dbell->work))
+					vmci_resource_put(&dbell->resource);
 			} else {
 				dbell->notify_cb(dbell->client_data);
 			}
diff --git a/drivers/misc/wigig_sensing.c b/drivers/misc/wigig_sensing.c
new file mode 100644
index 0000000..fe366cf
--- /dev/null
+++ b/drivers/misc/wigig_sensing.c
@@ -0,0 +1,1420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/poll.h>
+#include <linux/spi/spi.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <uapi/misc/wigig_sensing_uapi.h>
+#include "wigig_sensing.h"
+
+#define DRV_NAME "wigig_sensing"
+#define CLEAR_LOW_23_BITS ~(BIT(24) - 1)
+#define NUM_RETRIES  5
+#define FW_TIMEOUT_MSECS (1000)
+
+#define circ_cnt(circ, size) \
+	((CIRC_CNT((circ)->head, (circ)->tail, size)) & ~3)
+#define circ_cnt_to_end(circ, size) \
+	((CIRC_CNT_TO_END((circ)->head, (circ)->tail, size)) & ~3)
+#define circ_space(circ, size) \
+	((CIRC_SPACE((circ)->head, (circ)->tail, size)) & ~3)
+#define circ_space_to_end(circ, size) \
+	((CIRC_SPACE_TO_END((circ)->head, (circ)->tail, size)) & ~3)
+
+struct wigig_sensing_platform_data {
+	struct gpio_desc *dri_gpio;
+};
+
+struct wigig_sensing_ctx *ctx;
+
+static int spis_reset(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0xDA, 0xBA, 0x00, 0x0 };
+	int rc;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc = spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_clock_request(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0xDA, 0xBA, 0x80, 0x0 };
+	int rc;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc = spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_nop(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0x0 };
+	int rc;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc = spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_write_enable(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0x6 };
+	int rc;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc = spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_extended_reset(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[SPIS_EXTENDED_RESET_COMMAND_LEN] = { 0xDA, 0xBA };
+	int rc;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc = spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_read_internal_reg(struct spi_device *spi, u8 address, u32 *val)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0x81, address, 0, 0 };
+	struct spi_transfer tx_xfer = {
+		.tx_buf = ctx->cmd_buf,
+		.rx_buf	= NULL,
+		.len = 4,
+		.bits_per_word = 8,
+	};
+	struct spi_transfer rx_xfer = {
+		.tx_buf = NULL,
+		.rx_buf	= ctx->cmd_reply_buf,
+		.len = 4,
+		.bits_per_word = 8,
+	};
+	int rc;
+	struct spi_message msg;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&tx_xfer, &msg);
+	spi_message_add_tail(&rx_xfer, &msg);
+	rc = spi_sync(ctx->spi_dev, &msg);
+
+	*val = be32_to_cpu(*(u32 *)(ctx->cmd_reply_buf));
+
+	return rc;
+}
+
+static int spis_read_mem(struct spi_device *spi, u32 address, u32 *val)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = {
+		0x83,
+		(address >> 16) & 0xFF,
+		(address >>  8) & 0xFF,
+		(address >>  0) & 0xFF,
+		0, 0, 0, 0
+	};
+	struct spi_transfer tx_xfer = {
+		.tx_buf = ctx->cmd_buf,
+		.rx_buf	= NULL,
+		.len = 8,
+		.bits_per_word = 8,
+	};
+	struct spi_transfer rx_xfer = {
+		.tx_buf = NULL,
+		.rx_buf	= ctx->cmd_reply_buf,
+		.len = 4,
+		.bits_per_word = 8,
+	};
+	int rc;
+	struct spi_message msg;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&tx_xfer, &msg);
+	spi_message_add_tail(&rx_xfer, &msg);
+	rc = spi_sync(ctx->spi_dev, &msg);
+
+	*val = be32_to_cpu(*(u32 *)(ctx->cmd_reply_buf));
+
+	return rc;
+}
+
+static int spis_write_internal_reg(struct spi_device *spi, u8 address, u32 val)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = { 0x82, address, 0, 0, 0, 0 };
+	int rc = spis_write_enable(ctx->spi_dev);
+
+	cmd[2] = (val >> 24) & 0xFF;
+	cmd[3] = (val >> 16) & 0xFF;
+	cmd[4] = (val >>  8) & 0xFF;
+	cmd[5] = (val >>  0) & 0xFF;
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc |= spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_write_mem(struct spi_device *spi, u32 address, u32 val)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	u8 cmd[] = {
+		0x02,
+		(address >> 16) & 0xFF,
+		(address >>  8) & 0xFF,
+		(address >>  0) & 0xFF,
+		(val >> 24) & 0xFF,
+		(val >> 16) & 0xFF,
+		(val >>  8) & 0xFF,
+		(val >>  0) & 0xFF
+	};
+	int rc = spis_write_enable(ctx->spi_dev);
+
+	memcpy(ctx->cmd_buf, cmd, sizeof(cmd));
+	rc |= spi_write(spi, ctx->cmd_buf, sizeof(cmd));
+
+	return rc;
+}
+
+static int spis_write_reg(struct spi_device *spi, u32 addr, u32 val)
+{
+	int rc;
+
+	if (addr < 256)
+		rc = spis_write_internal_reg(spi, addr, val);
+	else
+		rc = spis_write_mem(spi, addr, val);
+
+	pr_debug("write reg 0x%X val 0x%X\n", addr, val);
+
+	return rc;
+}
+
+static int spis_block_read_mem(struct spi_device *spi,
+			       u32 address,
+			       u8 *data,
+			       u32 length,
+			       u32 last_read_length)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	int rc;
+	int overhead = OPCODE_WIDTH + ADDR_WIDTH + DUMMY_BYTES_WIDTH;
+	int sz = overhead + length;
+	struct spi_transfer xfer = {
+		.tx_buf = ctx->tx_buf,
+		.rx_buf = ctx->rx_buf,
+		.len = sz,
+		.bits_per_word = 32,
+	};
+	u32 frame = 0xB << 24;
+
+	frame |= address & 0xFFFFFF;
+	frame = cpu_to_le32(frame);
+
+	/* Read length must be in 32 bit units */
+	if (length & 3) {
+		pr_err("Read length must be a multiple of 32 bits\n");
+		return -EINVAL;
+	}
+	if (length > SPI_MAX_TRANSACTION_SIZE) {
+		pr_err("Read length too large\n");
+		return -EINVAL;
+	}
+
+	/* Write transfer length to SPI core */
+	if (length != last_read_length) {
+		rc = spis_write_reg(ctx->spi_dev,
+				    SPIS_TRNS_LEN_REG_ADDR,
+				    length << 16);
+		if (rc) {
+			pr_err("Failed setting SPIS_TRNS_LEN_REG_ADDR\n");
+			return rc;
+		}
+		ctx->last_read_length = length;
+	}
+
+	memcpy(ctx->tx_buf, &frame, sizeof(frame));
+
+	 /* Execute transaction */
+	rc = spi_sync_transfer(spi, &xfer, 1);
+	if (rc) {
+		pr_err("SPI transaction failed, rc = %d\n", rc);
+		return rc;
+	}
+
+	memcpy(data, ctx->rx_buf + overhead, length);
+
+	pr_debug("Sent block_read_mem command, addr=0x%X, length=%u\n",
+		 address, length);
+
+	return rc;
+}
+
+static int spis_read_reg(struct spi_device *spi, u32 addr, u32 *val)
+{
+	int rc;
+
+	if (addr < 256)
+		rc = spis_read_internal_reg(spi, addr, val);
+	else
+		rc = spis_read_mem(spi, addr, val);
+
+	pr_debug("read reg 0x%X, val = 0x%X\n", addr, *val);
+
+	return rc;
+}
+
+#define RGF_SPI_FIFO_CONTROL_ADDR (0x8800A0)
+#define RGF_SPI_FIFO_WR_PTR_ADDR (0x88009C)
+#define RGF_SPI_FIFO_RD_PTR_ADDR (0x880098)
+#define RGF_SPI_FIFO_BASE_ADDR_ADDR (0x880094)
+#define RGF_SPI_CONTROL_ADDR (0x880090)
+#define RGF_SPI_CONFIG_ADDR (0x88008C)
+static int cache_fifo_regs(struct wigig_sensing_ctx *ctx)
+{
+	int rc;
+	struct spi_device *spi = ctx->spi_dev;
+	struct spi_fifo *f = &ctx->spi_fifo;
+
+	rc  = spis_read_reg(spi, RGF_SPI_CONFIG_ADDR, &f->config.v);
+	rc |= spis_read_reg(spi, RGF_SPI_CONTROL_ADDR, &f->control.v);
+	rc |= spis_read_reg(spi, RGF_SPI_FIFO_BASE_ADDR_ADDR, &f->base_addr);
+	rc |= spis_read_reg(spi, RGF_SPI_FIFO_RD_PTR_ADDR, &f->rd_ptr);
+	rc |= spis_read_reg(spi, RGF_SPI_FIFO_WR_PTR_ADDR, &f->wr_ptr);
+	if (rc) {
+		pr_err("%s failed, rc=%u\n", __func__, rc);
+		return -EFAULT;
+	}
+
+	/* Update read pointer to be the FIFO base address */
+	f->rd_ptr = ctx->spi_fifo.base_addr;
+
+	pr_debug("SPI FIFO base address  = 0x%X\n", f->base_addr);
+	pr_debug("SPI FIFO size          = 0x%X\n", f->config.b.size);
+	pr_debug("SPI FIFO enable        = %u\n", f->config.b.enable);
+	pr_debug("SPI FIFO read pointer  = 0x%X\n", f->rd_ptr);
+	pr_debug("SPI FIFO write pointer = 0x%X\n", f->wr_ptr);
+
+	return 0;
+}
+
+static int spis_init(struct wigig_sensing_ctx *ctx)
+{
+	int rc;
+	u32 spis_sanity_reg = 0;
+	u32 jtag_id = 0;
+	struct spi_device *spi = ctx->spi_dev;
+
+	/* Initialize SPI */
+	spi->max_speed_hz = 32e6;
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_0;
+	rc = spi_setup(spi);
+	if (rc) {
+		pr_err("spi_setup() failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc  = spis_reset(spi);
+	rc |= spis_nop(spi);
+	rc |= spis_clock_request(spi);
+	rc |= spis_nop(spi);
+	rc |= spis_read_internal_reg(spi, SPIS_SANITY_REG_ADDR,
+				     &spis_sanity_reg);
+	pr_debug("sanity = 0x%x\n", spis_sanity_reg);
+	/* use 4 dummy bytes to make block read/writes 32-bit aligned */
+	rc |= spis_write_reg(spi, SPIS_CFG_REG_ADDR, SPIS_CONFIG_REG_OPT_VAL);
+	rc |= spis_read_mem(spi, JTAG_ID_REG_ADDR, &jtag_id);
+	pr_debug("jtag_id = 0x%x\n", jtag_id);
+	if (rc || spis_sanity_reg != SPIS_SANITY_REG_VAL ||
+	    jtag_id != JTAG_ID) {
+		pr_err("SPI init failed, sanity=0x%X, jtag_id=0x%X\n",
+		       spis_sanity_reg, jtag_id);
+		return -EFAULT;
+	}
+
+	/* Copy FIFO register values from the HW */
+	rc = cache_fifo_regs(ctx);
+	if (rc)
+		pr_err("cache_fifo_regs() failed\n");
+
+	return rc;
+}
+
+static enum wigig_sensing_stm_e convert_mode_to_state(
+	enum wigig_sensing_mode mode)
+{
+	switch (mode) {
+	case WIGIG_SENSING_MODE_SEARCH:
+		return WIGIG_SENSING_STATE_SEARCH;
+	case WIGIG_SENSING_MODE_FACIAL_RECOGNITION:
+		return WIGIG_SENSING_STATE_FACIAL;
+	case WIGIG_SENSING_MODE_GESTURE_DETECTION:
+		return WIGIG_SENSING_STATE_GESTURE;
+	case WIGIG_SENSING_MODE_STOP:
+		return WIGIG_SENSING_STATE_READY_STOPPED;
+	case WIGIG_SENSING_MODE_CUSTOM:
+		return WIGIG_SENSING_STATE_CUSTOM;
+	default:
+		return WIGIG_SENSING_STATE_MIN;
+	}
+}
+
+static int wigig_sensing_send_change_mode_command(
+	struct wigig_sensing_ctx *ctx,
+	enum wigig_sensing_mode mode,
+	u32 channel)
+{
+	int rc = 0;
+	union user_rgf_spi_mbox_inb inb_reg = { { 0 } };
+
+	pr_debug("mode=%d, channel=%d\n", mode, channel);
+	inb_reg.b.mode = mode;
+	inb_reg.b.channel_request = channel;
+
+	ctx->inb_cmd = inb_reg;
+
+	mutex_lock(&ctx->spi_lock);
+	rc = spis_extended_reset(ctx->spi_dev);
+	pr_debug("Sent extended reset command, rc = %d\n", rc);
+
+	mutex_unlock(&ctx->spi_lock);
+	if (rc) {
+		pr_err("failed to send extended reset\n");
+		return -EFAULT;
+	}
+	ctx->stm.waiting_for_deep_sleep_exit = true;
+
+	return 0;
+}
+
+static int wigig_sensing_change_state(struct wigig_sensing_ctx *ctx,
+				      struct wigig_sensing_stm *state,
+				      enum wigig_sensing_stm_e new_state)
+{
+	enum wigig_sensing_stm_e curr_state;
+	bool transition_allowed = false;
+
+	if (!state) {
+		pr_err("state is NULL\n");
+		return -EINVAL;
+	}
+	if (new_state <= WIGIG_SENSING_STATE_MIN ||
+	    new_state >= WIGIG_SENSING_STATE_MAX) {
+		pr_err("new_state is invalid\n");
+		return -EINVAL;
+	}
+
+	curr_state = state->state;
+	if (new_state == curr_state) {
+		pr_debug("Already in the requested state, bailing out\n");
+		return 0;
+	}
+
+	if ((new_state == WIGIG_SENSING_STATE_SYS_ASSERT &&
+	     !state->fw_is_ready) ||
+	    (new_state == WIGIG_SENSING_STATE_SPI_READY)) {
+		transition_allowed = true;
+	} else {
+		switch (curr_state) {
+		case WIGIG_SENSING_STATE_INITIALIZED:
+			if (new_state == WIGIG_SENSING_STATE_SPI_READY &&
+			    state->fw_is_ready)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_SPI_READY:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED &&
+			    state->enabled)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_READY_STOPPED:
+			if (new_state == WIGIG_SENSING_STATE_SEARCH        ||
+			    new_state == WIGIG_SENSING_STATE_FACIAL        ||
+			    new_state == WIGIG_SENSING_STATE_GESTURE       ||
+			    new_state == WIGIG_SENSING_STATE_CUSTOM        ||
+			    new_state == WIGIG_SENSING_STATE_GET_PARAMS)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_SEARCH:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
+			    new_state == WIGIG_SENSING_STATE_SEARCH ||
+			    new_state == WIGIG_SENSING_STATE_FACIAL ||
+			    new_state == WIGIG_SENSING_STATE_GESTURE)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_FACIAL:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
+			    new_state == WIGIG_SENSING_STATE_SEARCH)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_GESTURE:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
+			    new_state == WIGIG_SENSING_STATE_SEARCH)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_CUSTOM:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_GET_PARAMS:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED)
+				transition_allowed = true;
+			break;
+		case WIGIG_SENSING_STATE_SYS_ASSERT:
+			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED &&
+			    state->fw_is_ready)
+				transition_allowed = true;
+			break;
+		default:
+			pr_err("new_state is invalid\n");
+			return -EINVAL;
+		}
+	}
+
+	if (transition_allowed) {
+		pr_info("state transition (%d) --> (%d)\n", curr_state,
+			new_state);
+		state->state = new_state;
+	} else {
+		pr_info("state transition rejected (%d) xx> (%d)\n",
+			curr_state, new_state);
+	}
+
+	return 0;
+}
+
+static int wigig_sensing_ioc_set_auto_recovery(struct wigig_sensing_ctx *ctx)
+{
+	pr_info("Handling WIGIG_SENSING_IOCTL_SET_AUTO_RECOVERY\n");
+	pr_info("NOT SUPPORTED\n");
+
+	ctx->stm.auto_recovery = true;
+
+	return 0;
+}
+
+static int wigig_sensing_ioc_get_mode(struct wigig_sensing_ctx *ctx)
+{
+	return ctx->stm.mode;
+}
+
+static int wigig_sensing_ioc_change_mode(struct wigig_sensing_ctx *ctx,
+					 struct wigig_sensing_change_mode req)
+{
+	struct wigig_sensing_stm sim_state;
+	enum wigig_sensing_stm_e new_state;
+	int rc;
+	u32 ch;
+
+	pr_info("mode = %d, channel = %d\n", req.mode, req.channel);
+	if (!ctx)
+		return -EINVAL;
+
+	/* Simulate a state change */
+	new_state = convert_mode_to_state(req.mode);
+	sim_state = ctx->stm;
+	rc = wigig_sensing_change_state(ctx, &sim_state, new_state);
+	if (rc || sim_state.state != new_state) {
+		pr_err("State change not allowed\n");
+		rc = -EFAULT;
+		goto End;
+	}
+
+	/* Send command to FW */
+	ctx->stm.change_mode_in_progress = true;
+	ch = req.has_channel ? req.channel : 0;
+	ctx->stm.burst_size_ready = false;
+	/* Change mode command must not be called during DRI processing */
+	mutex_lock(&ctx->dri_lock);
+	rc = wigig_sensing_send_change_mode_command(ctx, req.mode, ch);
+	mutex_unlock(&ctx->dri_lock);
+	if (rc) {
+		pr_err("wigig_sensing_send_change_mode_command() failed, err %d\n",
+		       rc);
+		goto End;
+	}
+
+	/* Put the calling process to sleep until we get a response from FW */
+	rc = wait_event_interruptible_timeout(
+		ctx->cmd_wait_q,
+		ctx->stm.burst_size_ready,
+		msecs_to_jiffies(FW_TIMEOUT_MSECS));
+	if (rc < 0) {
+		/* Interrupted by a signal */
+		pr_err("wait_event_interruptible_timeout() interrupted by a signal (%d)\n",
+		       rc);
+		return rc;
+	}
+	if (rc == 0) {
+		/* Timeout, FW did not respond in time */
+		pr_err("wait_event_interruptible_timeout() timed out\n");
+		return -ETIME;
+	}
+
+	/* Change internal state */
+	rc = wigig_sensing_change_state(ctx, &ctx->stm, new_state);
+	if (rc || ctx->stm.state != new_state) {
+		pr_err("wigig_sensing_change_state() failed\n");
+		rc = -EFAULT;
+		goto End;
+	}
+
+	ctx->dropped_bursts = 0;
+	ctx->stm.channel_request = ch;
+	ctx->stm.mode = req.mode;
+	ctx->stm.change_mode_in_progress = false;
+
+End:
+	return ctx->stm.burst_size;
+}
+
+static int wigig_sensing_ioc_clear_data(struct wigig_sensing_ctx *ctx)
+{
+	struct cir_data *d = &ctx->cir_data;
+
+	if (mutex_lock_interruptible(&d->lock))
+		return -ERESTARTSYS;
+	d->b.tail = d->b.head = 0;
+
+	/* Make sure that the write above is visible to other threads */
+	wmb();
+	mutex_unlock(&d->lock);
+
+	return 0;
+}
+
+static int wigig_sensing_ioc_get_num_dropped_bursts(
+	struct wigig_sensing_ctx *ctx)
+{
+	return ctx->dropped_bursts;
+}
+
+static int wigig_sensing_ioc_get_event(struct wigig_sensing_ctx *ctx)
+{
+	return 0;
+}
+
+static int wigig_sensing_open(struct inode *inode, struct file *filp)
+{
+	/* forbid opening more then one instance at a time */
+	if (mutex_lock_interruptible(&ctx->file_lock))
+		return -ERESTARTSYS;
+
+	if (ctx->opened) {
+		mutex_unlock(&ctx->file_lock);
+		return -EBUSY;
+	}
+
+	filp->private_data = ctx;
+	ctx->opened = true;
+	mutex_unlock(&ctx->file_lock);
+
+	return 0;
+}
+
+static unsigned int wigig_sensing_poll(struct file *filp, poll_table *wait)
+{
+	struct wigig_sensing_ctx *ctx = filp->private_data;
+	unsigned int mask = 0;
+
+	if (!ctx->opened)
+		return -ENODEV;
+
+	poll_wait(filp, &ctx->data_wait_q, wait);
+
+	if (circ_cnt(&ctx->cir_data.b, ctx->cir_data.size_bytes))
+		mask |= (POLLIN | POLLRDNORM);
+
+	if (ctx->event_pending)
+		mask |= (POLLPRI);
+
+	return mask;
+}
+
+static ssize_t wigig_sensing_read(struct file *filp, char __user *buf,
+				  size_t count, loff_t *f_pos)
+{
+	int rc = 0;
+	struct wigig_sensing_ctx *ctx = filp->private_data;
+	u32 copy_size, size_to_end;
+	int tail;
+	struct cir_data *d = &ctx->cir_data;
+
+	/* Driver not ready to send data */
+	if ((!ctx) ||
+	    (!ctx->spi_dev) ||
+	    (!d->b.buf))
+		return -ENODEV;
+
+	/* No data in the buffer */
+	while (circ_cnt(&d->b, d->size_bytes) == 0) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		if (wait_event_interruptible(ctx->data_wait_q,
+			circ_cnt(&d->b, d->size_bytes) != 0))
+			return -ERESTARTSYS;
+	}
+
+	if (mutex_lock_interruptible(&d->lock))
+		return -ERESTARTSYS;
+
+	copy_size = min_t(u32, circ_cnt(&d->b, d->size_bytes), count);
+	size_to_end = circ_cnt_to_end(&d->b, d->size_bytes);
+	tail = d->b.tail;
+	pr_debug("copy_size=%u, size_to_end=%u, head=%u, tail=%u\n",
+		 copy_size, size_to_end, d->b.head, tail);
+	if (copy_size <= size_to_end) {
+		rc = copy_to_user(buf, &d->b.buf[tail], copy_size);
+		if (rc) {
+			pr_err("copy_to_user() failed.\n");
+			rc = -EFAULT;
+			goto bail_out;
+		}
+	} else {
+		rc = copy_to_user(buf, &d->b.buf[tail], size_to_end);
+		if (rc) {
+			pr_err("copy_to_user() failed.\n");
+			rc = -EFAULT;
+			goto bail_out;
+		}
+
+		rc = copy_to_user(buf + size_to_end, &d->b.buf[0],
+				  copy_size - size_to_end);
+		if (rc) {
+			pr_err("copy_to_user() failed.\n");
+			rc = -EFAULT;
+			goto bail_out;
+		}
+	}
+
+	/* Increment tail pointer */
+	d->b.tail = (d->b.tail + copy_size) & (d->size_bytes - 1);
+
+bail_out:
+	mutex_unlock(&d->lock);
+	return (rc == 0) ? copy_size : rc;
+}
+
+static int wigig_sensing_release(struct inode *inode, struct file *filp)
+{
+	struct wigig_sensing_ctx *ctx = filp->private_data;
+
+	if (!ctx || !ctx->spi_dev)
+		return -ENODEV;
+
+	mutex_lock(&ctx->file_lock);
+
+	if (!ctx->opened) {
+		mutex_unlock(&ctx->file_lock);
+		return -ENODEV;
+	}
+
+	filp->private_data = NULL;
+	ctx->opened = false;
+
+	mutex_unlock(&ctx->file_lock);
+
+	return 0;
+}
+
+static long wigig_sensing_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	int rc;
+	struct wigig_sensing_ctx *ctx = file->private_data;
+
+	if (!ctx || !ctx->spi_dev)
+		return -ENODEV;
+
+	if (mutex_lock_interruptible(&ctx->ioctl_lock))
+		return -ERESTARTSYS;
+
+	if (!ctx->opened) {
+		mutex_unlock(&ctx->ioctl_lock);
+		return -ENODEV;
+	}
+
+	/* Check type and command number */
+	if (_IOC_TYPE(cmd) != WIGIG_SENSING_IOC_MAGIC) {
+		mutex_unlock(&ctx->ioctl_lock);
+		return -ENOTTY;
+	}
+
+	switch (_IOC_NR(cmd)) {
+	case WIGIG_SENSING_IOCTL_SET_AUTO_RECOVERY:
+		pr_info("Received WIGIG_SENSING_IOCTL_SET_AUTO_RECOVERY command\n");
+		rc = wigig_sensing_ioc_set_auto_recovery(ctx);
+		break;
+	case WIGIG_SENSING_IOCTL_GET_MODE:
+		pr_info("Received WIGIG_SENSING_IOCTL_GET_MODE command\n");
+		rc = wigig_sensing_ioc_get_mode(ctx);
+		break;
+	case WIGIG_SENSING_IOCTL_CHANGE_MODE:
+	{
+		struct wigig_sensing_change_mode req;
+
+		pr_info("Received WIGIG_SENSING_IOCTL_CHANGE_MODE command\n");
+
+		if (copy_from_user(&req, (void *)arg, sizeof(req)))
+			return -EFAULT;
+
+		rc = wigig_sensing_ioc_change_mode(ctx, req);
+		break;
+	}
+	case WIGIG_SENSING_IOCTL_CLEAR_DATA:
+		pr_info("Received WIGIG_SENSING_IOCTL_CLEAR_DATA command\n");
+		rc = wigig_sensing_ioc_clear_data(ctx);
+		break;
+	case WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS:
+		pr_info("Received WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS command\n");
+		rc = wigig_sensing_ioc_get_num_dropped_bursts(ctx);
+		break;
+	case WIGIG_SENSING_IOCTL_GET_EVENT:
+		pr_info("Received WIGIG_SENSING_IOCTL_GET_EVENT command\n");
+		rc = wigig_sensing_ioc_get_event(ctx);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&ctx->ioctl_lock);
+
+	return rc;
+}
+
+static const struct file_operations wigig_sensing_fops = {
+	.owner          = THIS_MODULE,
+	.llseek         = no_llseek,
+	.open           = wigig_sensing_open,
+	.poll           = wigig_sensing_poll,
+	.read           = wigig_sensing_read,
+	.release        = wigig_sensing_release,
+	.unlocked_ioctl = wigig_sensing_ioctl,
+};
+
+static int wigig_sensing_alloc_buffer(struct wigig_sensing_ctx *ctx,
+				      u32 buffer_size_bytes)
+{
+	/* Allocate CIR data_buffer */
+	ctx->cir_data.b.buf = vmalloc(buffer_size_bytes);
+	if (!ctx->cir_data.b.buf)
+		return -ENOMEM;
+
+	ctx->cir_data.size_bytes = buffer_size_bytes;
+	mutex_init(&ctx->cir_data.lock);
+
+	return 0;
+}
+
+static int wigig_sensing_deassert_dri(
+	struct wigig_sensing_ctx *ctx,
+	union user_rgf_spi_mbox_inb additional_cmd)
+{
+	union user_rgf_spi_mbox_inb inb_reg;
+	int rc;
+
+	inb_reg = additional_cmd;
+	inb_reg.b.deassert_dri = 1;
+	mutex_lock(&ctx->spi_lock);
+	rc = spis_write_reg(ctx->spi_dev, RGF_USER_SPI_SPI_MBOX_INB, inb_reg.v);
+	mutex_unlock(&ctx->spi_lock);
+	if (rc)
+		pr_err("Fail to write RGF_USER_SPI_SPI_MBOX_INB, err %d\n",
+		       rc);
+
+	return rc;
+}
+
+#define MAX_SPI_READ_CHUNKS (10)
+/*
+ * Calculate SPI transaction size so that the size is smaller than the maximum
+ * size and the size is equal per iteration. The equal sizes causes less SPI
+ * transactions since the length register does not need reprogramming.
+ */
+static u32 calc_spi_transaction_size(
+	u32 fill_level, u32 max_spi_transaction_size)
+{
+	int i;
+	u32 res;
+
+	if (fill_level <= max_spi_transaction_size)
+		return fill_level;
+
+	for (i = 2; i < MAX_SPI_READ_CHUNKS; i++) {
+		res = fill_level / i;
+		if (res <= max_spi_transaction_size && fill_level % i == 0)
+			return res;
+	}
+
+	return max_spi_transaction_size;
+}
+
+static int wigig_sensing_handle_fifo_ready_dri(struct wigig_sensing_ctx *ctx)
+{
+	int rc = 0;
+	u32 burst_size = 0;
+	u32 spi_transaction_size;
+
+	mutex_lock(&ctx->spi_lock);
+
+	/* Read burst size over SPI */
+	rc = spis_read_reg(ctx->spi_dev, RGF_USER_SPI_SPI_EXT_MBOX_OUTB,
+			   &burst_size);
+	if (rc) {
+		pr_err("Fail to read RGF_USER_SPI_SPI_EXT_MBOX_OUTB, err %d\n",
+		       rc);
+		goto End;
+	}
+
+	if (!ctx->stm.enabled && burst_size != 0) {
+		pr_info("Invalid burst size while disabled %d\n", burst_size);
+		rc = -EFAULT;
+		goto End;
+	}
+
+	ctx->stm.burst_size = burst_size;
+	if (!ctx->stm.enabled ||
+	    ctx->stm.state >= WIGIG_SENSING_STATE_SYS_ASSERT ||
+	    ctx->stm.state < WIGIG_SENSING_STATE_SPI_READY) {
+		pr_err("Received burst_size in an unexpected state\n");
+		rc = -EFAULT;
+		goto End;
+	}
+
+	/* Program burst size into the transfer length register */
+	spi_transaction_size = calc_spi_transaction_size(burst_size,
+					SPI_MAX_TRANSACTION_SIZE);
+	rc = spis_write_reg(ctx->spi_dev, SPIS_TRNS_LEN_REG_ADDR,
+			    spi_transaction_size << 16);
+	if (rc) {
+		pr_err("Failed setting SPIS_TRNS_LEN_REG_ADDR\n");
+		goto End;
+	}
+	ctx->last_read_length = spi_transaction_size;
+
+	ctx->stm.burst_size_ready = true;
+
+	/*
+	 * Allocate a temporary buffer to be used in case of cir_data buffer
+	 * wrap around
+	 */
+	if (burst_size != 0) {
+		ctx->temp_buffer = vmalloc(burst_size);
+		if (!ctx->temp_buffer) {
+			rc = -ENOMEM;
+			goto End;
+		}
+	} else if (ctx->temp_buffer) {
+		vfree(ctx->temp_buffer);
+		ctx->temp_buffer = 0;
+	}
+
+	wake_up_interruptible(&ctx->cmd_wait_q);
+End:
+	mutex_unlock(&ctx->spi_lock);
+	return rc;
+}
+
+static int wigig_sensing_chip_data_ready(struct wigig_sensing_ctx *ctx,
+					 u16 fill_level, u32 burst_size)
+{
+	int rc = 0;
+	enum wigig_sensing_stm_e stm_state = ctx->stm.state;
+	struct spi_fifo *spi_fifo = &ctx->spi_fifo;
+	struct cir_data *d = &ctx->cir_data;
+	struct circ_buf local;
+	u32 bytes_to_read;
+	u32 idx = 0;
+	u32 spi_transaction_size;
+	u32 available_space_to_end;
+
+	if (stm_state == WIGIG_SENSING_STATE_INITIALIZED ||
+	    stm_state == WIGIG_SENSING_STATE_SPI_READY ||
+	    stm_state == WIGIG_SENSING_STATE_READY_STOPPED ||
+	    stm_state == WIGIG_SENSING_STATE_SYS_ASSERT) {
+		pr_err("Received data ready interrupt in an unexpected stm_state, disregarding\n");
+		return 0;
+	}
+
+	if (!ctx->cir_data.b.buf)
+		return -EFAULT;
+
+	/*
+	 * Read data from FIFO over SPI
+	 * Disregard the lowest 2 bits of fill_level as SPI can read in 32 bit
+	 * units. Additionally, HW reports the fill_level as one more than
+	 * there is actually (1 when the FIFO is empty)
+	 */
+	if (fill_level == 0 || fill_level == 1) {
+		pr_err("Wrong fill_level received\n");
+		return -EFAULT;
+	}
+	fill_level = (fill_level - 1) & ~0x3;
+
+	if (fill_level > burst_size) {
+		pr_err("FIFO fill level too large, fill_level = %u, burst_size = %u\n",
+		       fill_level, burst_size);
+		return -EFAULT;
+	}
+
+	/*
+	 * In case there is not enough space in the buffer, discard an old
+	 * burst
+	 */
+	if (circ_space(&d->b, d->size_bytes) < fill_level) {
+		mutex_lock(&d->lock);
+		if (circ_space(&d->b, d->size_bytes) < fill_level) {
+			pr_debug("Buffer full, dropping burst\n");
+			d->b.tail = (d->b.tail + burst_size) &
+				(d->size_bytes - 1);
+			ctx->dropped_bursts++;
+		}
+		mutex_unlock(&d->lock);
+	}
+
+	spi_transaction_size =
+		calc_spi_transaction_size(fill_level, SPI_MAX_TRANSACTION_SIZE);
+	local = d->b;
+	mutex_lock(&ctx->spi_lock);
+	while (fill_level > 0) {
+		bytes_to_read = (fill_level < spi_transaction_size) ?
+			fill_level : spi_transaction_size;
+		available_space_to_end =
+			circ_space_to_end(&local, d->size_bytes);
+		pr_debug("fill_level=%u, bytes_to_read=%u, idx=%u, available_space_to_end = %u\n",
+			 fill_level, bytes_to_read, idx,
+			 available_space_to_end);
+		/* Determine transaction type */
+		if (available_space_to_end >= bytes_to_read) {
+			rc = spis_block_read_mem(ctx->spi_dev,
+						 spi_fifo->base_addr + idx,
+						 &d->b.buf[local.head],
+						 bytes_to_read,
+						 ctx->last_read_length
+			);
+		} else {
+			/*
+			 * There is not enough place in the CIR buffer, copy to
+			 * a temporay buffer and then split
+			 */
+			rc = spis_block_read_mem(ctx->spi_dev,
+						 spi_fifo->base_addr + idx,
+						 ctx->temp_buffer,
+						 bytes_to_read,
+						 ctx->last_read_length
+			);
+			memcpy(&d->b.buf[local.head], ctx->temp_buffer,
+			       available_space_to_end);
+			memcpy(&d->b.buf[0],
+			       &ctx->temp_buffer[available_space_to_end],
+			       bytes_to_read - available_space_to_end);
+		}
+
+		fill_level -= bytes_to_read;
+		idx += bytes_to_read;
+		local.head = (local.head + bytes_to_read) & (d->size_bytes - 1);
+	}
+	mutex_unlock(&ctx->spi_lock);
+
+	/* Increment destination rd_ptr */
+	mutex_lock(&d->lock);
+	d->b.head = local.head;
+	pr_debug("head=%u, tail=%u\n", d->b.head, d->b.tail);
+	mutex_unlock(&d->lock);
+
+	wake_up_interruptible(&ctx->data_wait_q);
+
+	return 0;
+}
+
+static int wigig_sensing_spi_init(struct wigig_sensing_ctx *ctx)
+{
+	int rc;
+
+	/* Allocate buffers for SPI transactions */
+	ctx->cmd_buf = devm_kzalloc(ctx->dev, SPI_CMD_BUFFER_SIZE, GFP_KERNEL);
+	if (!ctx->cmd_buf)
+		return -ENOMEM;
+
+	ctx->cmd_reply_buf = devm_kzalloc(ctx->dev, SPI_CMD_BUFFER_SIZE,
+					  GFP_KERNEL);
+	if (!ctx->cmd_reply_buf) {
+		rc = -ENOMEM;
+		goto cmd_reply_buf_alloc_failed;
+	}
+
+	ctx->rx_buf = devm_kzalloc(ctx->dev, SPI_BUFFER_SIZE, GFP_KERNEL);
+	if (!ctx->rx_buf) {
+		rc = -ENOMEM;
+		goto rx_buf_alloc_failed;
+	}
+
+	ctx->tx_buf = devm_kzalloc(ctx->dev, SPI_BUFFER_SIZE, GFP_KERNEL);
+	if (!ctx->tx_buf) {
+		rc = -ENOMEM;
+		goto tx_buf_alloc_failed;
+	}
+
+	/* Initialize SPI slave device */
+	mutex_lock(&ctx->spi_lock);
+	rc = spis_init(ctx);
+	mutex_unlock(&ctx->spi_lock);
+	if (rc) {
+		rc = -EFAULT;
+		goto spis_init_failed;
+	}
+
+	return 0;
+
+spis_init_failed:
+	devm_kfree(ctx->dev, ctx->tx_buf);
+	ctx->tx_buf = NULL;
+tx_buf_alloc_failed:
+	devm_kfree(ctx->dev, ctx->rx_buf);
+	ctx->rx_buf = NULL;
+rx_buf_alloc_failed:
+	devm_kfree(ctx->dev, ctx->cmd_reply_buf);
+	ctx->cmd_reply_buf = NULL;
+cmd_reply_buf_alloc_failed:
+	devm_kfree(ctx->dev, ctx->cmd_buf);
+	ctx->cmd_buf = NULL;
+
+	return rc;
+}
+
+static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
+{
+	struct wigig_sensing_ctx *ctx = cookie;
+	union user_rgf_spi_status spi_status;
+	int rc;
+	u32 sanity_reg = 0;
+	union user_rgf_spi_mbox_inb additional_inb_command;
+	u8 num_retries = 0;
+
+	mutex_lock(&ctx->dri_lock);
+
+	pr_debug("Process DRI signal, ctx->stm.state == %d\n", ctx->stm.state);
+	memset(&additional_inb_command, 0, sizeof(additional_inb_command));
+
+	if (ctx->stm.waiting_for_deep_sleep_exit) {
+		pr_debug("waiting_for_deep_sleep_exit is set, sending NOP\n");
+		ctx->stm.waiting_for_deep_sleep_exit_first_pass = true;
+		mutex_lock(&ctx->spi_lock);
+		rc = spis_nop(ctx->spi_dev);
+		/* use 4 dummy bytes to make block read/writes 32-bit aligned */
+		rc |= spis_write_reg(ctx->spi_dev, SPIS_CFG_REG_ADDR,
+				     SPIS_CONFIG_REG_OPT_VAL);
+		mutex_unlock(&ctx->spi_lock);
+	}
+
+	while (num_retries < NUM_RETRIES) {
+		if (ctx->stm.state == WIGIG_SENSING_STATE_INITIALIZED ||
+		    ctx->stm.spi_malfunction) {
+			pr_debug("Initializing SPI for the first time, or SPI malfunction\n");
+
+			rc = wigig_sensing_spi_init(ctx);
+			if (rc) {
+				pr_err("wigig_sensing_spi_init() failed\n");
+				goto bail_out;
+			}
+
+			ctx->stm.spi_malfunction = false;
+			if (ctx->stm.state == WIGIG_SENSING_STATE_INITIALIZED)
+				wigig_sensing_change_state(ctx, &ctx->stm,
+					WIGIG_SENSING_STATE_SPI_READY);
+		}
+
+		pr_debug("Reading SANITY register\n");
+		mutex_lock(&ctx->spi_lock);
+		rc = spis_read_reg(ctx->spi_dev, SPIS_SANITY_REG_ADDR,
+				   &sanity_reg);
+		mutex_unlock(&ctx->spi_lock);
+		if (rc || sanity_reg != SPIS_SANITY_REG_VAL) {
+			pr_err("Fail to read SANITY, expected 0x%X found 0x%X err %d\n",
+			       SPIS_SANITY_REG_VAL, sanity_reg, (int)rc);
+			ctx->stm.spi_malfunction = true;
+		} else {
+			pr_debug("SANITY OK\n");
+			ctx->stm.spi_malfunction = false;
+			break;
+		}
+
+		num_retries++;
+		if (num_retries == NUM_RETRIES) {
+			pr_err("SPI bus malfunction, bailing out\n");
+			goto bail_out;
+		}
+	}
+
+	pr_debug("Reading RGF_USER_SPI_SPI_MBOX_FILL_STATUS register\n");
+	mutex_lock(&ctx->spi_lock);
+	rc = spis_read_reg(ctx->spi_dev, RGF_USER_SPI_SPI_MBOX_FILL_STATUS,
+			   &spi_status.v);
+	mutex_unlock(&ctx->spi_lock);
+
+	if (rc) {
+		pr_err("Fail to read RGF_USER_SPI_SPI_MBOX_FILL_STATUS, err %d\n",
+		       rc);
+		goto bail_out;
+	}
+
+	if (spi_status.b.int_fw_ready) {
+		pr_debug("FW READY INTERRUPT\n");
+		ctx->stm.fw_is_ready = true;
+		ctx->stm.channel_request = 0;
+		ctx->stm.burst_size = 0;
+		ctx->stm.mode = WIGIG_SENSING_MODE_STOP;
+		ctx->stm.enabled = true;
+
+		wigig_sensing_change_state(ctx, &ctx->stm,
+					   WIGIG_SENSING_STATE_READY_STOPPED);
+
+		spi_status.v &= ~INT_FW_READY;
+	}
+	if (spi_status.b.int_data_ready) {
+		pr_debug("DATA READY INTERRUPT\n");
+		if (!ctx->stm.change_mode_in_progress)
+			wigig_sensing_chip_data_ready(ctx,
+						      spi_status.b.fill_level,
+						      ctx->stm.burst_size);
+		else
+			pr_debug("Change mode in progress, aborting data processing\n");
+		spi_status.v &= ~INT_DATA_READY;
+	}
+	if (spi_status.b.int_sysassert) {
+		pr_debug("SYSASSERT INTERRUPT\n");
+		ctx->stm.fw_is_ready = false;
+
+		rc = wigig_sensing_change_state(ctx, &ctx->stm,
+				WIGIG_SENSING_STATE_SYS_ASSERT);
+		if (rc != 0 ||
+		    ctx->stm.state != WIGIG_SENSING_STATE_SYS_ASSERT)
+			pr_err("State change to WIGIG_SENSING_SYS_ASSERT failed\n");
+
+		ctx->stm.spi_malfunction = true;
+		spi_status.v &= ~INT_SYSASSERT;
+	}
+	if (spi_status.b.int_deep_sleep_exit ||
+	    (ctx->stm.waiting_for_deep_sleep_exit &&
+	     ctx->stm.waiting_for_deep_sleep_exit_first_pass)) {
+		if (spi_status.b.int_deep_sleep_exit)
+			pr_debug("DEEP SLEEP EXIT INTERRUPT\n");
+
+		if (ctx->stm.waiting_for_deep_sleep_exit) {
+			additional_inb_command = ctx->inb_cmd;
+			memset(&ctx->inb_cmd, 0, sizeof(ctx->inb_cmd));
+		} else {
+			pr_err("Got deep sleep exit DRI when ctx->stm.waiting_for_deep_sleep_exit is false\n");
+		}
+		ctx->stm.waiting_for_deep_sleep_exit = false;
+		ctx->stm.waiting_for_deep_sleep_exit_first_pass = false;
+		spi_status.v &= ~INT_DEEP_SLEEP_EXIT;
+	}
+	if (spi_status.b.int_fifo_ready) {
+		pr_debug("FIFO READY INTERRUPT\n");
+		wigig_sensing_handle_fifo_ready_dri(ctx);
+
+		spi_status.v &= ~INT_FIFO_READY;
+	}
+
+	/*
+	 * Check if there are unexpected interrupts, lowest 23 bits needs to be
+	 * cleared
+	 */
+	if ((spi_status.v & CLEAR_LOW_23_BITS) != 0)
+		pr_err("Unexpected interrupt received, spi_status=0x%X\n",
+		       spi_status.v & CLEAR_LOW_23_BITS);
+
+	/* Notify FW we are done with interrupt handling */
+	rc = wigig_sensing_deassert_dri(ctx, additional_inb_command);
+	if (rc)
+		pr_err("wigig_sensing_deassert_dri() failed, rc=%d\n",
+		       rc);
+
+bail_out:
+	mutex_unlock(&ctx->dri_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wigig_sensing_dri_isr_hard(int irq, void *cookie)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int wigig_sensing_probe(struct spi_device *spi)
+{
+	int rc = 0;
+	struct wigig_sensing_platform_data pdata;
+
+	/* Allocate driver context */
+	ctx = devm_kzalloc(&spi->dev, sizeof(struct wigig_sensing_ctx),
+			   GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	/* Initialize driver context */
+	spi_set_drvdata(spi, ctx);
+	ctx->spi_dev = spi;
+	ctx->opened = false;
+	mutex_init(&ctx->ioctl_lock);
+	mutex_init(&ctx->file_lock);
+	mutex_init(&ctx->spi_lock);
+	mutex_init(&ctx->dri_lock);
+	init_waitqueue_head(&ctx->cmd_wait_q);
+	init_waitqueue_head(&ctx->data_wait_q);
+	ctx->stm.state = WIGIG_SENSING_STATE_INITIALIZED;
+
+	/* Allocate memory for the CIRs */
+	/* Allocate a 2MB == 2^21 buffer for CIR data */
+	rc = wigig_sensing_alloc_buffer(ctx, 1 << 21);
+	if (rc) {
+		pr_err("wigig_sensing_alloc_buffer() failed (%d)\n", rc);
+		return rc;
+	}
+
+	/* Create device node */
+	rc = alloc_chrdev_region(&ctx->wigig_sensing_dev, 0, 1, DRV_NAME);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region() failed\n");
+		return rc;
+	}
+
+	ctx->class = class_create(THIS_MODULE, DRV_NAME);
+	if (IS_ERR(ctx->class)) {
+		rc = PTR_ERR(ctx->class);
+		pr_err("Error creating ctx->class: %d\n", rc);
+		goto fail_class_create;
+	}
+
+	ctx->dev = device_create(ctx->class, NULL, ctx->wigig_sensing_dev, ctx,
+				 DRV_NAME);
+	if (IS_ERR(ctx->dev)) {
+		rc = PTR_ERR(ctx->dev);
+		pr_err("device_create failed: %d\n", rc);
+		goto fail_device_create;
+	}
+
+	cdev_init(&ctx->cdev, &wigig_sensing_fops);
+	ctx->cdev.owner = THIS_MODULE;
+	ctx->cdev.ops = &wigig_sensing_fops;
+	rc = cdev_add(&ctx->cdev, ctx->wigig_sensing_dev, 1);
+	if (rc) {
+		pr_err("Error calling cdev_add: %d\n", rc);
+		goto fail_cdev_add;
+	}
+
+	/* Setup DRI - interrupt */
+	pdata.dri_gpio = devm_gpiod_get(&spi->dev, "dri", GPIOD_IN);
+	if (IS_ERR(pdata.dri_gpio)) {
+		pr_err("Could not find dri gpio\n");
+		rc = -EFAULT;
+		goto fail_gpiod_get;
+	}
+	ctx->dri_gpio = pdata.dri_gpio;
+	ctx->dri_irq = gpiod_to_irq(ctx->dri_gpio);
+	pr_debug("dri_irq = %d\n", ctx->dri_irq);
+	rc = devm_request_threaded_irq(&spi->dev, ctx->dri_irq,
+		wigig_sensing_dri_isr_hard, wigig_sensing_dri_isr_thread,
+		IRQF_TRIGGER_RISING, "wigig_sensing_dri", ctx);
+	if (rc) {
+		pr_err("devm_request_threaded_irq() failed (%d)\n", rc);
+		goto fail_gpiod_get;
+	}
+
+	return 0;
+
+fail_gpiod_get:
+	cdev_del(&ctx->cdev);
+fail_cdev_add:
+	device_destroy(ctx->class, ctx->wigig_sensing_dev);
+fail_device_create:
+	class_destroy(ctx->class);
+fail_class_create:
+	unregister_chrdev_region(ctx->wigig_sensing_dev, 1);
+
+	return rc;
+}
+
+static int wigig_sensing_remove(struct spi_device *spi)
+{
+	struct wigig_sensing_ctx *ctx = spi_get_drvdata(spi);
+	struct wigig_sensing_change_mode req = {
+		.mode = WIGIG_SENSING_MODE_STOP,
+		.has_channel = false,
+		.channel = 0,
+	};
+
+	/* Make sure that FW is in STOP mode */
+	wigig_sensing_ioc_change_mode(ctx, req);
+
+	device_destroy(ctx->class, ctx->wigig_sensing_dev);
+	unregister_chrdev_region(ctx->wigig_sensing_dev, 1);
+	class_destroy(ctx->class);
+	cdev_del(&ctx->cdev);
+	vfree(ctx->cir_data.b.buf);
+	spi_set_drvdata(ctx->spi_dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id wigig_sensing_match_table[] = {
+	{ .compatible = DRV_NAME },
+	{}
+};
+
+static struct spi_driver radar_spi_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(wigig_sensing_match_table),
+	},
+	.probe = wigig_sensing_probe,
+	.remove = wigig_sensing_remove,
+};
+
+module_spi_driver(radar_spi_driver);
+
+MODULE_DESCRIPTION("Wigig sensing slave SPI Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/wigig_sensing.h b/drivers/misc/wigig_sensing.h
new file mode 100644
index 0000000..c4b2910
--- /dev/null
+++ b/drivers/misc/wigig_sensing.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux foundation. All rights reserved.
+ */
+
+#ifndef __WIGIG_SENSING_H__
+#define __WIGIG_SENSING_H__
+#include <linux/cdev.h>
+#include <linux/circ_buf.h>
+#include <linux/slab.h>
+#include <uapi/misc/wigig_sensing_uapi.h>
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "[wigig_sensing]: " fmt
+
+/* Registers */
+#define RGF_USER_SPI_SPI_MBOX_FILL_STATUS (0x880080)
+#define RGF_USER_SPI_SPI_EXT_MBOX_OUTB    (0x880084)
+#define RGF_USER_SPI_SPI_MBOX_INB         (0x880088)
+#define RGF_SPI_FIFO_CONTROL_ADDR         (0x8800A0)
+#define RGF_SPI_FIFO_WR_PTR_ADDR          (0x88009C)
+#define RGF_SPI_FIFO_RD_PTR_ADDR          (0x880098)
+#define RGF_SPI_FIFO_BASE_ADDR_ADDR       (0x880094)
+#define RGF_SPI_CONTROL_ADDR              (0x880090)
+#define RGF_SPI_CONFIG_ADDR               (0x88008C)
+
+#define SPIS_TRNS_LEN_REG_ADDR          (0x50)
+#define ADDR_WIDTH                      (3)
+#define DUMMY_BYTES_WIDTH               (4)
+#define OPCODE_WIDTH                    (1)
+#define SPIS_SANITY_REG_ADDR            (0x0)
+#define SPIS_SANITY_REG_VAL             (0xDEADBEEF)
+#define SPIS_CFG_REG_ADDR               (0xC)
+#define JTAG_ID_REG_ADDR                (0x880000)
+#define JTAG_ID                         (0x1007E0E1)
+/* optimized configuration with 4 dummy bytes */
+#define SPIS_CONFIG_REG_OPT_VAL         (0x44200800)
+#define SPIS_EXTENDED_RESET_COMMAND_LEN (225)
+
+#define SPI_MAX_TRANSACTION_SIZE (8*1024)
+#define SPI_CMD_TRANSACTION_SIZE (512)
+#define SPI_BUFFER_SIZE (SPI_MAX_TRANSACTION_SIZE + OPCODE_WIDTH + \
+			 ADDR_WIDTH + DUMMY_BYTES_WIDTH)
+#define SPI_CMD_BUFFER_SIZE (SPI_CMD_TRANSACTION_SIZE + OPCODE_WIDTH + \
+			     ADDR_WIDTH + DUMMY_BYTES_WIDTH)
+
+#define INT_FW_READY          BIT(24)
+#define INT_DATA_READY        BIT(25)
+#define INT_FIFO_READY        BIT(26)
+#define INT_SYSASSERT         BIT(29)
+#define INT_DEEP_SLEEP_EXIT   BIT(30)
+union user_rgf_spi_status {
+	struct {
+		u16 fill_level:16;
+		int reserved1:3;
+		u8 spi_fifo_thr_status:1;
+		u8 spi_fifo_empty_status:1;
+		u8 spi_fifo_full_status:1;
+		u8 spi_fifo_underrun_status:1;
+		u8 spi_fifo_overrun_status:1;
+
+		/* mbox_outb */
+		u8 int_fw_ready:1; /* FW MBOX ready */
+		u8 int_data_ready:1; /* data available on FIFO */
+		u8 int_fifo_ready:1; /* FIFO status update */
+		u8 reserved2:1;
+		u8 reserved3:1;
+		u8 int_sysassert:1; /* SYSASSERT occurred */
+		u8 int_deep_sleep_exit:1;
+		u8 reserved4:1;
+	} __packed b;
+	u32 v;
+} __packed;
+
+union user_rgf_spi_mbox_inb {
+	struct {
+		u8 mode:4;
+		u8 channel_request:4;
+		u32 reserved:23;
+		u8 deassert_dri:1;
+	}  __packed b;
+	u32 v;
+} __packed;
+
+union rgf_spi_config {
+	struct {
+		u16 size:16;
+		u8 reserved1:8;
+		u8 mbox_auto_clear_disable:1;
+		u8 status_auto_clear_disable:1;
+		u8 reserved2:5;
+		u8 enable:1;
+	} __packed b;
+	u32 v;
+} __packed;
+
+union rgf_spi_control {
+	struct {
+		u8 read_ptr_clear:1;
+		u8 fill_level_clear:1;
+		u8 thresh_reach_clear:1;
+		u8 status_field_clear:1;
+		u8 mbox_field_clear:1;
+		u32 reserved:27;
+	} __packed b;
+	u32 v;
+} __packed;
+
+struct cir_data {
+	struct circ_buf b;
+	u32 size_bytes;
+	struct mutex lock;
+};
+
+struct spi_fifo {
+	u32 wr_ptr;
+	u32 rd_ptr;
+	u32 base_addr;
+	union rgf_spi_config config;
+	union rgf_spi_control control;
+};
+
+/**
+ * State machine states
+ * TODO: Document states
+ */
+enum wigig_sensing_stm_e {
+	WIGIG_SENSING_STATE_MIN = 0,
+	WIGIG_SENSING_STATE_INITIALIZED,
+	WIGIG_SENSING_STATE_SPI_READY,
+	WIGIG_SENSING_STATE_READY_STOPPED,
+	WIGIG_SENSING_STATE_SEARCH,
+	WIGIG_SENSING_STATE_FACIAL,
+	WIGIG_SENSING_STATE_GESTURE,
+	WIGIG_SENSING_STATE_CUSTOM,
+	WIGIG_SENSING_STATE_GET_PARAMS,
+	WIGIG_SENSING_STATE_SYS_ASSERT,
+	WIGIG_SENSING_STATE_MAX,
+};
+
+struct wigig_sensing_stm {
+	bool auto_recovery;
+	bool enabled;
+	bool fw_is_ready;
+	bool spi_malfunction;
+	bool sys_assert;
+	bool waiting_for_deep_sleep_exit;
+	bool waiting_for_deep_sleep_exit_first_pass;
+	bool burst_size_ready;
+	bool change_mode_in_progress;
+	enum wigig_sensing_stm_e state;
+	enum wigig_sensing_mode mode;
+	u32 burst_size;
+	u32 channel_request;
+};
+
+struct wigig_sensing_ctx {
+	dev_t wigig_sensing_dev;
+	struct cdev cdev;
+	struct class *class;
+	struct device *dev;
+	struct spi_device *spi_dev;
+
+	 /* Locks */
+	struct mutex ioctl_lock;
+	struct mutex file_lock;
+	struct mutex spi_lock;
+	struct mutex dri_lock;
+	wait_queue_head_t cmd_wait_q;
+	wait_queue_head_t data_wait_q;
+
+	/* DRI */
+	struct gpio_desc *dri_gpio;
+	int dri_irq;
+	bool opened;
+
+	/* Memory buffers for SPI transactions */
+	u8 *tx_buf;
+	u8 *rx_buf;
+	u8 *cmd_buf;
+	u8 *cmd_reply_buf;
+
+	/* SPI FIFO parameters */
+	struct spi_fifo spi_fifo;
+	struct wigig_sensing_stm stm;
+	u32 last_read_length;
+	union user_rgf_spi_mbox_inb inb_cmd;
+
+	/* CIR buffer */
+	struct cir_data cir_data;
+	u8 *temp_buffer;
+	bool event_pending;
+	u32 dropped_bursts;
+};
+
+#endif /* __WIGIG_SENSING_H__ */
+
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 18be910..904386c 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2327,7 +2327,11 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
 		}
 		if (!ret)
 			return MMC_REQ_STARTED;
-		return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+
+		if (ret == -EBUSY)
+			return MMC_REQ_BUSY;
+
+		return MMC_REQ_FAILED_TO_START;
 	default:
 		WARN_ON_ONCE(1);
 		return MMC_REQ_FAILED_TO_START;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index a99812a..c5ab22f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -153,7 +153,9 @@ int mmc_retune(struct mmc_host *host)
 	else
 		return 0;
 
-	if (!host->need_retune || host->doing_retune || !host->card)
+	if (!host->need_retune || host->doing_retune || !host->card
+			|| mmc_card_hs400es(host->card)
+			|| (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
 		return 0;
 
 	host->need_retune = 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 68f8dff..f8d6083 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2316,12 +2316,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	return err;
 }
 
-static int mmc_can_sleep(struct mmc_card *card)
+static int mmc_can_sleepawake(struct mmc_host *host)
 {
-	return (card && card->ext_csd.rev >= 3);
+	return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) && host->card &&
+		(host->card->ext_csd.rev >= 3);
 }
 
-static int mmc_sleep(struct mmc_host *host)
+static int mmc_sleepawake(struct mmc_host *host, bool sleep)
 {
 	struct mmc_command cmd = {};
 	struct mmc_card *card = host->card;
@@ -2331,13 +2332,16 @@ static int mmc_sleep(struct mmc_host *host)
 	/* Re-tuning can't be done once the card is deselected */
 	mmc_retune_hold(host);
 
-	err = mmc_deselect_cards(host);
-	if (err)
-		goto out_release;
+	if (sleep) {
+		err = mmc_deselect_cards(host);
+		if (err)
+			goto out_release;
+	}
 
 	cmd.opcode = MMC_SLEEP_AWAKE;
 	cmd.arg = card->rca << 16;
-	cmd.arg |= 1 << 15;
+	if (sleep)
+		cmd.arg |= 1 << 15;
 
 	/*
 	 * If the max_busy_timeout of the host is specified, validate it against
@@ -2365,6 +2369,9 @@ static int mmc_sleep(struct mmc_host *host)
 	if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 		mmc_delay(timeout_ms);
 
+	if (!sleep)
+		err = mmc_select_card(card);
+
 out_release:
 	mmc_retune_release(host);
 	return err;
@@ -2467,6 +2474,69 @@ static void mmc_detect(struct mmc_host *host)
 	}
 }
 
+static int mmc_cache_card_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err || !ext_csd) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only cache read/write fields that the sw changes */
+	card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ_MODE_EN];
+	card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
+	card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
+	card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
+
+	kfree(ext_csd);
+
+	return 0;
+}
+
+static int mmc_test_awake_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only compare read/write fields that the sw changes */
+	pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
+		mmc_hostname(host), __func__,
+		card->ext_csd.raw_ext_csd_cmdq,
+		ext_csd[EXT_CSD_CMDQ_MODE_EN],
+		card->ext_csd.raw_ext_csd_cache_ctrl,
+		ext_csd[EXT_CSD_CACHE_CTRL],
+		card->ext_csd.raw_ext_csd_bus_width,
+		ext_csd[EXT_CSD_BUS_WIDTH],
+		card->ext_csd.raw_ext_csd_hs_timing,
+		ext_csd[EXT_CSD_HS_TIMING]);
+
+	err = !((card->ext_csd.raw_ext_csd_cmdq ==
+			ext_csd[EXT_CSD_CMDQ_MODE_EN]) &&
+		(card->ext_csd.raw_ext_csd_cache_ctrl ==
+			ext_csd[EXT_CSD_CACHE_CTRL]) &&
+		(card->ext_csd.raw_ext_csd_bus_width ==
+			ext_csd[EXT_CSD_BUS_WIDTH]) &&
+		(card->ext_csd.raw_ext_csd_hs_timing ==
+			ext_csd[EXT_CSD_HS_TIMING]));
+
+	kfree(ext_csd);
+
+	return err;
+}
+
 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 {
 	int err = 0;
@@ -2500,10 +2570,13 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 	if (err)
 		goto out;
 
-	if (mmc_can_sleep(host->card))
-		err = mmc_sleep(host);
-	else if (!mmc_host_is_spi(host))
+	if (mmc_can_sleepawake(host)) {
+		memcpy(&host->cached_ios, &host->ios, sizeof(host->cached_ios));
+		mmc_cache_card_ext_csd(host);
+		err = mmc_sleepawake(host, true);
+	} else if (!mmc_host_is_spi(host)) {
 		err = mmc_deselect_cards(host);
+	}
 
 	if (!err) {
 		mmc_power_off(host);
@@ -2517,6 +2590,93 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 	return err;
 }
 
+static int mmc_partial_init(struct mmc_host *host)
+{
+	int err = 0;
+	struct mmc_card *card = host->card;
+
+	pr_debug("%s: %s: starting partial init\n",
+		mmc_hostname(host), __func__);
+
+	mmc_set_bus_width(host, host->cached_ios.bus_width);
+	mmc_set_timing(host, host->cached_ios.timing);
+	mmc_set_clock(host, host->cached_ios.clock);
+	mmc_set_bus_mode(host, host->cached_ios.bus_mode);
+
+	if (mmc_card_hs400(card)) {
+		if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
+			err = host->ops->enhanced_strobe(host);
+		else if (host->ops->execute_tuning)
+			err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
+	} else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
+		err = host->ops->execute_tuning(host,
+			MMC_SEND_TUNING_BLOCK_HS200);
+		if (err)
+			pr_warn("%s: %s: tuning execution failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+	}
+
+	/*
+	 * The ext_csd is read to make sure the card did not went through
+	 * Power-failure during sleep period.
+	 * A subset of the W/E_P, W/C_P register will be tested. In case
+	 * these registers values are different from the values that were
+	 * cached during suspend, we will conclude that a Power-failure occurred
+	 * and will do full initialization sequence.
+	 * In addition, full init sequence also transfer ext_csd before moving
+	 * to CMDQ mode which has a side affect of configuring SDHCI registers
+	 * which needed to be done before moving to CMDQ mode. The same
+	 * registers need to be configured for partial init.
+	 */
+	err = mmc_test_awake_ext_csd(host);
+	if (err) {
+		pr_debug("%s: %s: fail on ext_csd read (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+	pr_debug("%s: %s: reading and comparing ext_csd successful\n",
+		mmc_hostname(host), __func__);
+
+	/*
+	 * Enable Command Queue if supported. Note that Packed Commands cannot
+	 * be used with Command Queue.
+	 */
+	card->ext_csd.cmdq_en = false;
+	if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+		err = mmc_cmdq_enable(card);
+		if (err) {
+			pr_warn("%s: Enabling CMDQ failed\n",
+				mmc_hostname(card->host));
+			card->ext_csd.cmdq_support = false;
+			card->ext_csd.cmdq_depth = 0;
+			goto out;
+		}
+	}
+	/*
+	 * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
+	 * disabled for a time, so a flag is needed to indicate to re-enable the
+	 * Command Queue.
+	 */
+	card->reenable_cmdq = card->ext_csd.cmdq_en;
+
+	if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+		err = host->cqe_ops->cqe_enable(host, card);
+		if (err) {
+			pr_err("%s: Failed to enable CQE, error %d\n",
+				mmc_hostname(host), err);
+		} else {
+			host->cqe_enabled = true;
+			pr_debug("%s: Command Queue Engine enabled\n",
+				mmc_hostname(host));
+		}
+	}
+out:
+	pr_debug("%s: %s: done partial init (%d)\n",
+		mmc_hostname(host), __func__, err);
+	return err;
+}
+
 /*
  * Suspend callback
  */
@@ -2542,7 +2702,7 @@ static int mmc_suspend(struct mmc_host *host)
  */
 static int _mmc_resume(struct mmc_host *host)
 {
-	int err = 0;
+	int err = -EINVAL;
 	int retries = 3;
 
 	mmc_claim_host(host);
@@ -2555,16 +2715,28 @@ static int _mmc_resume(struct mmc_host *host)
 	mmc_log_string(host, "Enter\n");
 	mmc_power_up(host, host->card->ocr);
 	while (retries) {
-		err = mmc_init_card(host, host->card->ocr, host->card);
+		if (mmc_can_sleepawake(host)) {
+			err = mmc_sleepawake(host, false);
+			if (!err)
+				err = mmc_partial_init(host);
+			if (err)
+				pr_err("%s: %s: awake failed (%d), fallback to full init\n",
+						mmc_hostname(host), __func__,
+						err);
+		}
 		if (err) {
-			pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
-					mmc_hostname(host), err, retries);
-			retries--;
-			mmc_power_off(host);
-			usleep_range(5000, 5500);
-			mmc_power_up(host, host->card->ocr);
-			mmc_select_voltage(host, host->card->ocr);
-			continue;
+			err = mmc_init_card(host, host->card->ocr, host->card);
+			if (err) {
+				pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
+						mmc_hostname(host), err,
+						retries);
+				retries--;
+				mmc_power_off(host);
+				usleep_range(5000, 5500);
+				mmc_power_up(host, host->card->ocr);
+				mmc_select_voltage(host, host->card->ocr);
+				continue;
+			}
 		}
 		break;
 	}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c87b821..62d4ad0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1502,6 +1502,12 @@ int mmc_attach_sd(struct mmc_host *host)
 			goto err;
 	}
 
+	/*
+	 * Some SD cards claims an out of spec VDD voltage range. Let's treat
+	 * these bits as being in-valid and especially also bit7.
+	 */
+	ocr &= ~0x7FFF;
+
 	rocr = mmc_select_voltage(host, ocr);
 
 	/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index b299a24..d206f2d 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -35,6 +35,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
 {
 	struct mmc_card *card = host->card;
 	int i, ret, count;
+	bool sdio_irq_pending = host->sdio_irq_pending;
 	unsigned char pending;
 	struct sdio_func *func;
 
@@ -42,13 +43,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
 	if (mmc_card_suspended(card))
 		return 0;
 
+	/* Clear the flag to indicate that we have processed the IRQ. */
+	host->sdio_irq_pending = false;
+
 	/*
 	 * Optimization, if there is only 1 function interrupt registered
 	 * and we know an IRQ was signaled then call irq handler directly.
 	 * Otherwise do the full probe.
 	 */
 	func = card->sdio_single_irq;
-	if (func && host->sdio_irq_pending) {
+	if (func && sdio_irq_pending) {
 		func->irq_handler(func);
 		return 1;
 	}
@@ -100,7 +104,6 @@ void sdio_run_irqs(struct mmc_host *host)
 {
 	mmc_claim_host(host);
 	if (host->sdio_irqs) {
-		host->sdio_irq_pending = true;
 		process_sdio_pending_irqs(host);
 		if (host->ops->ack_sdio_irq)
 			host->ops->ack_sdio_irq(host);
@@ -119,6 +122,7 @@ void sdio_irq_work(struct work_struct *work)
 
 void sdio_signal_irq(struct mmc_host *host)
 {
+	host->sdio_irq_pending = true;
 	queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
 }
 EXPORT_SYMBOL_GPL(sdio_signal_irq);
@@ -164,7 +168,6 @@ static int sdio_irq_thread(void *_host)
 		if (ret)
 			break;
 		ret = process_sdio_pending_irqs(host);
-		host->sdio_irq_pending = false;
 		mmc_release_host(host);
 
 		/*
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index cc9a7a8..ae24827 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -102,6 +102,10 @@ static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
 static void cqhci_dumpregs(struct cqhci_host *cq_host)
 {
 	struct mmc_host *mmc = cq_host->mmc;
+	int offset = 0;
+
+	if (cq_host->offset_changed)
+		offset = CQE_V5_VENDOR_CFG;
 
 	mmc_log_string(mmc,
 	"CQHCI_CTL=0x%08x CQHCI_IS=0x%08x CQHCI_ISTE=0x%08x CQHCI_ISGE=0x%08x CQHCI_TDBR=0x%08x CQHCI_TCN=0x%08x CQHCI_DQS=0x%08x CQHCI_DPT=0x%08x CQHCI_TERRI=0x%08x CQHCI_CRI=0x%08x CQHCI_CRA=0x%08x CQHCI_CRDCT=0x%08x\n",
@@ -147,6 +151,8 @@ static void cqhci_dumpregs(struct cqhci_host *cq_host)
 	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 		   cqhci_readl(cq_host, CQHCI_CRI),
 		   cqhci_readl(cq_host, CQHCI_CRA));
+	CQHCI_DUMP("Vendor cfg 0x%08x\n",
+		   cqhci_readl(cq_host, CQHCI_VENDOR_CFG + offset));
 
 	if (cq_host->ops->dumpregs)
 		cq_host->ops->dumpregs(mmc);
@@ -273,12 +279,16 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
 	if (cqcap & CQHCI_CAP_CS) {
 		/*
 		 * In case host controller supports cryptographic operations
-		 * then, it uses 128bit task descriptor. Upper 64 bits of task
-		 * descriptor would be used to pass crypto specific informaton.
+		 * then, enable crypro support.
 		 */
-		cq_host->caps |= CQHCI_CAP_CRYPTO_SUPPORT |
-				CQHCI_TASK_DESC_SZ_128;
+		cq_host->caps |= CQHCI_CAP_CRYPTO_SUPPORT;
 		cqcfg |= CQHCI_ICE_ENABLE;
+		/*
+		 * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+		 * in CQ register space, due to which few CQ registers are
+		 * shifted. Set offset_changed boolean to use updated address.
+		 */
+		 cq_host->offset_changed = true;
 	}
 
 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
@@ -293,6 +303,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
 
 	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 
+	/* send QSR at lesser intervals than the default */
+	cqhci_writel(cq_host, SEND_QSR_INTERVAL, CQHCI_SSC1);
+
 	cqhci_set_irqs(cq_host, 0);
 
 	mmc->cqe_on = true;
@@ -375,8 +388,11 @@ static void cqhci_off(struct mmc_host *mmc)
 	bool timed_out;
 	u32 reg;
 
-	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) {
+		pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(mmc),
+				__func__, mmc->cqe_on ? "off" : "on");
 		return;
+	}
 
 	if (cq_host->ops->disable)
 		cq_host->ops->disable(mmc, false);
@@ -391,7 +407,7 @@ static void cqhci_off(struct mmc_host *mmc)
 			break;
 	}
 
-	if (timed_out)
+	if (timed_out && !(reg & CQHCI_HALT))
 		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 	else
 		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
@@ -536,7 +552,7 @@ static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 		resp_type = 0x0;
 		timing = 0x1;
 	} else {
-		if (mrq->cmd->flags & MMC_RSP_R1B) {
+		if (mrq->cmd->flags & MMC_RSP_BUSY) {
 			resp_type = 0x3;
 			timing = 0x0;
 		} else {
@@ -660,6 +676,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 		if (cq_host->ops->crypto_cfg) {
 			err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
 			if (err) {
+				mmc->err_stats[MMC_ERR_ICE_CFG]++;
 				pr_err("%s: failed to configure crypto: err %d tag %d\n",
 						mmc_hostname(mmc), err, tag);
 				goto out;
@@ -674,7 +691,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 		if (err) {
 			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 			       mmc_hostname(mmc), err);
-			return err;
+			goto end_crypto;
 		}
 		/* PM QoS */
 		sdhci_msm_pm_qos_irq_vote(host);
@@ -695,8 +712,18 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
 	cq_host->qcnt += 1;
 
+	/* Ensure the task descriptor list is flushed before ringing doorbell */
+	wmb();
+	if (cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)) {
+		cqhci_dumpregs(cq_host);
+		BUG();
+	}
 	mmc_log_string(mmc, "tag: %d\n", tag);
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+	/* Commit the doorbell write immediately */
+	wmb();
 	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 			 mmc_hostname(mmc), tag);
@@ -705,6 +732,20 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
 	if (err)
 		cqhci_post_req(mmc, mrq);
+
+	goto out;
+
+end_crypto:
+	if (cq_host->ops->crypto_cfg_end && mrq->data) {
+		err = cq_host->ops->crypto_cfg_end(mmc, mrq);
+		if (err)
+			pr_err("%s: failed to end ice config: err %d tag %d\n",
+					mmc_hostname(mmc), err, tag);
+	}
+	if (!(cq_host->caps & CQHCI_CAP_CRYPTO_SUPPORT) &&
+			cq_host->ops->crypto_cfg_reset && mrq->data)
+		cq_host->ops->crypto_cfg_reset(mmc, tag);
+
 out:
 	return err;
 }
@@ -806,8 +847,10 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 	struct cqhci_slot *slot = &cq_host->slot[tag];
 	struct mmc_request *mrq = slot->mrq;
 	struct mmc_data *data;
-	int err = 0;
+	int err = 0, offset = 0;
 
+	if (cq_host->offset_changed)
+		offset = CQE_V5_VENDOR_CFG;
 	if (!mrq) {
 		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 			  mmc_hostname(mmc), tag);
@@ -837,6 +880,11 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 			data->bytes_xfered = 0;
 		else
 			data->bytes_xfered = data->blksz * data->blocks;
+	} else {
+		cqhci_writel(cq_host, cqhci_readl(cq_host,
+				CQHCI_VENDOR_CFG + offset) |
+				CMDQ_SEND_STATUS_TRIGGER,
+				CQHCI_VENDOR_CFG + offset);
 	}
 
 	if (!(cq_host->caps & CQHCI_CAP_CRYPTO_SUPPORT) &&
@@ -990,8 +1038,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 	bool ret;
 	u32 ctl;
 
-	if (cqhci_halted(cq_host))
+	if (cqhci_halted(cq_host)) {
+		pr_debug("%s: CQE is already halted.\n", mmc_hostname(mmc));
 		return true;
+	}
 
 	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 
@@ -1007,7 +1057,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 	ret = cqhci_halted(cq_host);
 
 	if (!ret)
-		pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+		pr_err("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 
 	mmc_log_string(mmc, "halt done with ret %d\n", ret);
 	return ret;
@@ -1019,7 +1069,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
  * layers will need to send a STOP command), so we set the timeout based on a
  * generous command timeout.
  */
-#define CQHCI_START_HALT_TIMEOUT	5
+#define CQHCI_START_HALT_TIMEOUT	5000
 
 static void cqhci_recovery_start(struct mmc_host *mmc)
 {
@@ -1208,6 +1258,7 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
 	      bool dma64)
 {
 	int err;
+	u32 cqcap = 0;
 
 	cq_host->dma64 = dma64;
 	cq_host->mmc = mmc;
@@ -1222,6 +1273,16 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
 	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 		mmc->cqe_qdepth -= 1;
 
+	cqcap = cqhci_readl(cq_host, CQHCI_CAP);
+	if (cqcap & CQHCI_CAP_CS) {
+		/*
+		 * In case host controller supports cryptographic operations
+		 * then, it uses 128bit task descriptor. Upper 64 bits of task
+		 * descriptor would be used to pass crypto specific informaton.
+		 */
+		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+	}
+
 	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
 				     sizeof(*cq_host->slot), GFP_KERNEL);
 	if (!cq_host->slot) {
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index e87e9bc..6b43517 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -94,6 +95,12 @@
 /* send status config 2 */
 #define CQHCI_SSC2			0x44
 
+/*
+ * Value n means CQE would send CMD13 during the transfer of data block
+ * BLOCK_CNT-n
+ */
+#define SEND_QSR_INTERVAL 0x70001
+
 /* response for dcmd */
 #define CQHCI_CRDCT			0x48
 
@@ -116,6 +123,14 @@
 /* command response argument */
 #define CQHCI_CRA			0x5C
 
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQE_V5_VENDOR_CFG	0x900
+#define CQHCI_VENDOR_CFG   0x100
+#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+
 #define CQHCI_INT_ALL			0xF
 #define CQHCI_IC_DEFAULT_ICCTH		31
 #define CQHCI_IC_DEFAULT_ICTOVAL	1
@@ -183,6 +198,7 @@ struct cqhci_host {
 	bool activated;
 	bool waiting_for_idle;
 	bool recovery_halt;
+	bool offset_changed;
 
 	size_t desc_size;
 	size_t data_size;
@@ -224,7 +240,7 @@ struct cqhci_host_ops {
 
 static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
 {
-	if (unlikely(host->ops->write_l))
+	if (unlikely(host->ops && host->ops->write_l))
 		host->ops->write_l(host, val, reg);
 	else
 		writel_relaxed(val, host->mmio + reg);
@@ -232,7 +248,7 @@ static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
 
 static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
 {
-	if (unlikely(host->ops->read_l))
+	if (unlikely(host->ops && host->ops->read_l))
 		return host->ops->read_l(host, reg);
 	else
 		return readl_relaxed(host->mmio + reg);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 942da07..22c454c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3486,6 +3486,10 @@ int dw_mci_runtime_resume(struct device *dev)
 	/* Force setup bus to guarantee available clock output */
 	dw_mci_setup_bus(host->slot, true);
 
+	/* Re-enable SDIO interrupts. */
+	if (sdio_irq_claimed(host->slot->mmc))
+		__dw_mci_enable_sdio_irq(host->slot, 1);
+
 	/* Now that slots are all setup, we can enable card detect */
 	dw_mci_enable_cd(host);
 
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 45baf5d..61f0fad 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -636,6 +636,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
 		host->ops.card_busy = renesas_sdhi_card_busy;
 		host->ops.start_signal_voltage_switch =
 			renesas_sdhi_start_signal_voltage_switch;
+
+		/* SDR and HS200/400 registers requires HW reset */
+		if (of_data && of_data->scc_offset) {
+			priv->scc_ctl = host->ctl + of_data->scc_offset;
+			host->mmc->caps |= MMC_CAP_HW_RESET;
+			host->hw_reset = renesas_sdhi_hw_reset;
+		}
 	}
 
 	/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -693,8 +700,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
 		const struct renesas_sdhi_scc *taps = of_data->taps;
 		bool hit = false;
 
-		host->mmc->caps |= MMC_CAP_HW_RESET;
-
 		for (i = 0; i < of_data->taps_num; i++) {
 			if (taps[i].clk_rate == 0 ||
 			    taps[i].clk_rate == host->mmc->f_max) {
@@ -707,12 +712,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
 		if (!hit)
 			dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
 
-		priv->scc_ctl = host->ctl + of_data->scc_offset;
 		host->init_tuning = renesas_sdhi_init_tuning;
 		host->prepare_tuning = renesas_sdhi_prepare_tuning;
 		host->select_tuning = renesas_sdhi_select_tuning;
 		host->check_scc_error = renesas_sdhi_check_scc_error;
-		host->hw_reset = renesas_sdhi_hw_reset;
 		host->prepare_hs400_tuning =
 			renesas_sdhi_prepare_hs400_tuning;
 		host->hs400_downgrade = renesas_sdhi_disable_scc;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index a2abb5f..28ed3a9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2278,6 +2278,8 @@ void sdhci_msm_cqe_enable(struct mmc_host *mmc)
 		host->desc_sz = 12;
 
 	sdhci_cqe_enable(mmc);
+	/* Set maximum timeout as per qti spec */
+	sdhci_writeb(host, 0xF, SDHCI_TIMEOUT_CONTROL);
 }
 
 void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
@@ -2398,10 +2400,9 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
 
 	msm_host->mmc->caps2 |= MMC_CAP2_CQE;
 	cq_host->ops = &sdhci_msm_cqhci_ops;
+	msm_host->cq_host = cq_host;
 
 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
-	if (dma64)
-		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
 
 	ret = cqhci_init(cq_host, host->mmc, dma64);
 	if (ret) {
@@ -3948,6 +3949,46 @@ static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
 		sizeof(struct sdhci_host));
 }
 
+#define MAX_TEST_BUS 60
+#define DRV_NAME "cqhci-host"
+static void sdhci_msm_cqe_dump_debug_ram(struct sdhci_host *host)
+{
+	int i = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct cqhci_host *cq_host;
+	u32 version;
+	u16 minor;
+	int offset;
+
+	if (msm_host->cq_host)
+		cq_host = msm_host->cq_host;
+	else
+		return;
+
+	version = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_MCI_VERSION);
+	minor = version & CORE_VERSION_TARGET_MASK;
+	/* registers offset changed starting from 4.2.0 */
+	offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+
+	if (cq_host->offset_changed)
+		offset += CQE_V5_VENDOR_CFG;
+	pr_err("---- Debug RAM dump ----\n");
+	pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
+	       cqhci_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
+	       cqhci_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
+
+	while (i < 16) {
+		pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
+		       cqhci_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
+		i++;
+	}
+	pr_err("-------------------------\n");
+}
+
 void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -3962,6 +4003,8 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
 
 	sdhci_msm_cache_debug_data(host);
 	pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+	if (msm_host->cq_host)
+		sdhci_msm_cqe_dump_debug_ram(host);
 
 	mmc_log_string(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
 		sdhci_msm_readl_relaxed(host,
@@ -5045,8 +5088,14 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 	msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
 	if (!IS_ERR(msm_host->pclk)) {
 		ret = clk_prepare_enable(msm_host->pclk);
-		if (ret)
+		if (ret) {
+			dev_err(&pdev->dev, "Iface clk not enabled (%d)\n"
+					, ret);
 			goto bus_clk_disable;
+		}
+	} else {
+		ret = PTR_ERR(msm_host->pclk);
+		dev_err(&pdev->dev, "Iface clk get failed (%d)\n", ret);
 	}
 	atomic_set(&msm_host->controller_clock, 1);
 
@@ -5068,6 +5117,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 	msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(msm_host->clk)) {
 		ret = PTR_ERR(msm_host->clk);
+		dev_err(&pdev->dev, "Core clk get failed (%d)\n", ret);
 		goto bus_aggr_clk_disable;
 	}
 
@@ -5078,9 +5128,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 		goto bus_aggr_clk_disable;
 	}
 	ret = clk_prepare_enable(msm_host->clk);
-	if (ret)
+	if (ret) {
+		dev_err(&pdev->dev, "Core clk not enabled (%d)\n", ret);
 		goto bus_aggr_clk_disable;
-
+	}
 	msm_host->clk_rate = sdhci_msm_get_min_clock(host);
 	atomic_set(&msm_host->clks_on, 1);
 
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 58f5632a..051dfa8 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -225,6 +225,7 @@ struct sdhci_msm_host {
 	atomic_t clks_on; /* Set if clocks are enabled */
 	struct sdhci_msm_pltfm_data *pdata;
 	struct mmc_host  *mmc;
+	struct cqhci_host *cq_host;
 	struct sdhci_msm_debug_data cached_data;
 	struct sdhci_pltfm_data sdhci_msm_pdata;
 	u32 curr_pwr_state;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7fdac27..9c77bfe 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
 	ret = mmc_of_parse(host->mmc);
 	if (ret) {
-		dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
 		goto unreg_clk;
 	}
 
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 682c573..e284102 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -365,6 +365,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
 	pm_runtime_use_autosuspend(&pdev->dev);
 
+	/* HS200 is broken at this moment */
+	host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
 	ret = sdhci_add_host(host);
 	if (ret)
 		goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index e5c598ae..6627523 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 
 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
-	value |= ESDHC_DMA_SNOOP;
+
+	if (of_dma_is_coherent(dev->of_node))
+		value |= ESDHC_DMA_SNOOP;
+	else
+		value &= ~ESDHC_DMA_SNOOP;
+
 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
 	return 0;
 }
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index c4115ba..7179439 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1577,6 +1577,8 @@ static const struct pci_device_id pci_ids[] = {
 	SDHCI_PCI_DEVICE(INTEL, CNPH_SD,   intel_byt_sd),
 	SDHCI_PCI_DEVICE(INTEL, ICP_EMMC,  intel_glk_emmc),
 	SDHCI_PCI_DEVICE(INTEL, ICP_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, CML_EMMC,  intel_glk_emmc),
+	SDHCI_PCI_DEVICE(INTEL, CML_SD,    intel_byt_sd),
 	SDHCI_PCI_DEVICE(O2, 8120,     o2),
 	SDHCI_PCI_DEVICE(O2, 8220,     o2),
 	SDHCI_PCI_DEVICE(O2, 8221,     o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 2ef0bdc..6f04a62 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -50,6 +50,8 @@
 #define PCI_DEVICE_ID_INTEL_CNPH_SD	0xa375
 #define PCI_DEVICE_ID_INTEL_ICP_EMMC	0x34c4
 #define PCI_DEVICE_ID_INTEL_ICP_SD	0x34f8
+#define PCI_DEVICE_ID_INTEL_CML_EMMC	0x02c4
+#define PCI_DEVICE_ID_INTEL_CML_SD	0x02f5
 
 #define PCI_DEVICE_ID_SYSKONNECT_8000	0x8000
 #define PCI_DEVICE_ID_VIA_95D0		0x95d0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index db3b237..7fd1626 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2089,7 +2089,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
 	else if (timing == MMC_TIMING_UHS_SDR12)
 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
-	else if (timing == MMC_TIMING_UHS_SDR25)
+	else if (timing == MMC_TIMING_SD_HS ||
+		 timing == MMC_TIMING_MMC_HS ||
+		 timing == MMC_TIMING_UHS_SDR25)
 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
 	else if (timing == MMC_TIMING_UHS_SDR50)
 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -3292,6 +3294,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
 static void sdhci_adma_show_error(struct sdhci_host *host)
 {
 	void *desc = host->adma_table;
+	dma_addr_t dma = host->adma_addr;
 
 	sdhci_dumpregs(host);
 
@@ -3299,18 +3302,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
 		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			DBG("%pK: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
-			    desc, le32_to_cpu(dma_desc->addr_hi),
+			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			    (unsigned long long)dma,
+			    le32_to_cpu(dma_desc->addr_hi),
 			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 		else
-			DBG("%pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
-			    desc, le32_to_cpu(dma_desc->addr_lo),
+			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			    (unsigned long long)dma,
+			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 
 		desc += host->desc_sz;
+		dma += host->desc_sz;
 
 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
 			break;
@@ -3407,7 +3413,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 		host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
 	}
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
-		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+		       intmask);
 		sdhci_adma_show_error(host);
 		host->mmc->err_stats[MMC_ERR_ADMA]++;
 		host->data->error = -EIO;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6..ba44ea6 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1627,29 +1627,35 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
 			continue;
 		}
 
-		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+		/*
+		 * We check "time_after" and "!chip_good" before checking
+		 * "chip_good" to avoid the failure due to scheduling.
+		 */
+		if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) {
 			xip_enable(map, chip, adr);
 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
 			xip_disable(map, chip, adr);
+			ret = -EIO;
 			break;
 		}
 
-		if (chip_ready(map, adr))
+		if (chip_good(map, adr, datum))
 			break;
 
 		/* Latency issues. Drop the lock, wait a while and retry */
 		UDELAY(map, chip, adr, 1);
 	}
+
 	/* Did we succeed? */
-	if (!chip_good(map, adr, datum)) {
+	if (ret) {
 		/* reset on all failures. */
 		map_write(map, CMD(0xF0), chip->start);
 		/* FIXME - should have reset delay before continuing */
 
-		if (++retry_cnt <= MAX_RETRIES)
+		if (++retry_cnt <= MAX_RETRIES) {
+			ret = 0;
 			goto retry;
-
-		ret = -EIO;
+		}
 	}
 	xip_enable(map, chip, adr);
  op_done:
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index dce5b7e..ab5a877 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -863,19 +863,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 	return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
 }
 
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+				    u32 sectors)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 	struct mtk_ecc_stats stats;
+	u32 reg_size = mtk_nand->fdm.reg_size;
 	int rc, i;
 
 	rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
 	if (rc) {
 		memset(buf, 0xff, sectors * chip->ecc.size);
 		for (i = 0; i < sectors; i++)
-			memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+			memset(oob_ptr(chip, start + i), 0xff, reg_size);
 		return 0;
 	}
 
@@ -895,7 +897,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 	u32 spare = mtk_nand->spare_per_sector;
 	u32 column, sectors, start, end, reg;
 	dma_addr_t addr;
-	int bitflips;
+	int bitflips = 0;
 	size_t len;
 	u8 *buf;
 	int rc;
@@ -962,14 +964,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 	if (rc < 0) {
 		dev_err(nfc->dev, "subpage done timeout\n");
 		bitflips = -EIO;
-	} else {
-		bitflips = 0;
-		if (!raw) {
-			rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
-			bitflips = rc < 0 ? -ETIMEDOUT :
-				mtk_nfc_update_ecc_stats(mtd, buf, sectors);
-			mtk_nfc_read_fdm(chip, start, sectors);
-		}
+	} else if (!raw) {
+		rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+		bitflips = rc < 0 ? -ETIMEDOUT :
+			mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+		mtk_nfc_read_fdm(chip, start, sectors);
 	}
 
 	dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 8459115..553776c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
 static void arcnet_rx(struct net_device *dev, int bufnum)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
-	struct archdr pkt;
+	union {
+		struct archdr pkt;
+		char buf[512];
+	} rxdata;
 	struct arc_rfc1201 *soft;
 	int length, ofs;
 
-	soft = &pkt.soft.rfc1201;
+	soft = &rxdata.pkt.soft.rfc1201;
 
-	lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
-	if (pkt.hard.offset[0]) {
-		ofs = pkt.hard.offset[0];
+	lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
+	if (rxdata.pkt.hard.offset[0]) {
+		ofs = rxdata.pkt.hard.offset[0];
 		length = 256 - ofs;
 	} else {
-		ofs = pkt.hard.offset[1];
+		ofs = rxdata.pkt.hard.offset[1];
 		length = 512 - ofs;
 	}
 
 	/* get the full header, if possible */
-	if (sizeof(pkt.soft) <= length) {
-		lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
+	if (sizeof(rxdata.pkt.soft) <= length) {
+		lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
 	} else {
-		memset(&pkt.soft, 0, sizeof(pkt.soft));
+		memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
 		lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
 	}
 
 	arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
-		   bufnum, pkt.hard.source, pkt.hard.dest, length);
+		   bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
 
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += length + ARC_HDR_SIZE;
@@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
 	if (arc_proto_map[soft->proto]->is_ip) {
 		if (BUGLVL(D_PROTO)) {
 			struct ArcProto
-			*oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
+			*oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
 			*newp = arc_proto_map[soft->proto];
 
 			if (oldp != newp) {
 				arc_printk(D_PROTO, dev,
 					   "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
-					   soft->proto, pkt.hard.source,
+					   soft->proto, rxdata.pkt.hard.source,
 					   newp->suffix, oldp->suffix);
 			}
 		}
@@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
 		lp->default_proto[0] = soft->proto;
 
 		/* in striking contrast, the following isn't a hack. */
-		lp->default_proto[pkt.hard.source] = soft->proto;
+		lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
 	}
 	/* call the protocol-specific receiver. */
-	arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
+	arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
 }
 
 static void null_rx(struct net_device *dev, int bufnum,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index be0b785..0d2392c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
 done:
 	bond_dev->vlan_features = vlan_features;
 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_STAG_TX |
 				    NETIF_F_GSO_UDP_L4;
 	bond_dev->gso_max_segs = gso_max_segs;
 	netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -2188,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
 	bond_for_each_slave(bond, slave, iter) {
 		switch (slave->new_link) {
 		case BOND_LINK_NOCHANGE:
+			/* For 802.3ad mode, check current slave speed and
+			 * duplex again in case its port was disabled after
+			 * invalid speed/duplex reporting but recovered before
+			 * link monitoring could make a decision on the actual
+			 * link status
+			 */
+			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+			    slave->link == BOND_LINK_UP)
+				bond_3ad_adapter_speed_duplex_changed(slave);
 			continue;
 
 		case BOND_LINK_UP:
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c05e4d5..bd127ce 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
 		return -EINVAL;
 
 	dev->rtnl_link_ops = &can_link_ops;
+	netif_carrier_off(dev);
+
 	return register_netdev(dev);
 }
 EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index b8c39ed..179bfcd 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
 		if (!netdev)
 			continue;
 
-		strncpy(name, netdev->name, IFNAMSIZ);
+		strlcpy(name, netdev->name, IFNAMSIZ);
 
 		unregister_sja1000dev(netdev);
 
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index da64e71..de8d9dc 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
 static int mcp251x_hw_reset(struct spi_device *spi)
 {
 	struct mcp251x_priv *priv = spi_get_drvdata(spi);
-	u8 reg;
+	unsigned long timeout;
 	int ret;
 
 	/* Wait for oscillator startup timer after power up */
@@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
 	/* Wait for oscillator startup timer after reset */
 	mdelay(MCP251X_OST_DELAY_MS);
 
-	reg = mcp251x_read_reg(spi, CANSTAT);
-	if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
-		return -ENODEV;
+	/* Wait for reset to finish */
+	timeout = jiffies + HZ;
+	while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+	       CANCTRL_REQOP_CONF) {
+		usleep_range(MCP251X_OST_DELAY_MS * 1000,
+			     MCP251X_OST_DELAY_MS * 1000 * 2);
 
+		if (time_after(jiffies, timeout)) {
+			dev_err(&spi->dev,
+				"MCP251x didn't enter in conf mode after reset\n");
+			return -EBUSY;
+		}
+	}
 	return 0;
 }
 
@@ -678,17 +687,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
 		return regulator_disable(reg);
 }
 
-static void mcp251x_open_clean(struct net_device *net)
-{
-	struct mcp251x_priv *priv = netdev_priv(net);
-	struct spi_device *spi = priv->spi;
-
-	free_irq(spi->irq, priv);
-	mcp251x_hw_sleep(spi);
-	mcp251x_power_enable(priv->transceiver, 0);
-	close_candev(net);
-}
-
 static int mcp251x_stop(struct net_device *net)
 {
 	struct mcp251x_priv *priv = netdev_priv(net);
@@ -954,37 +952,43 @@ static int mcp251x_open(struct net_device *net)
 				   flags | IRQF_ONESHOT, DEVICE_NAME, priv);
 	if (ret) {
 		dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
-		mcp251x_power_enable(priv->transceiver, 0);
-		close_candev(net);
-		goto open_unlock;
+		goto out_close;
 	}
 
 	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
 				   0);
+	if (!priv->wq) {
+		ret = -ENOMEM;
+		goto out_clean;
+	}
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
 	ret = mcp251x_hw_reset(spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 	ret = mcp251x_setup(net, spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 	ret = mcp251x_set_normal_mode(spi);
-	if (ret) {
-		mcp251x_open_clean(net);
-		goto open_unlock;
-	}
+	if (ret)
+		goto out_free_wq;
 
 	can_led_event(net, CAN_LED_EVENT_OPEN);
 
 	netif_wake_queue(net);
+	mutex_unlock(&priv->mcp_lock);
 
-open_unlock:
+	return 0;
+
+out_free_wq:
+	destroy_workqueue(priv->wq);
+out_clean:
+	free_irq(spi->irq, priv);
+	mcp251x_hw_sleep(spi);
+out_close:
+	mcp251x_power_enable(priv->transceiver, 0);
+	close_candev(net);
 	mutex_unlock(&priv->mcp_lock);
 	return ret;
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 740ef47..43b0fa2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
 
 		dev_prev_siblings = dev->prev_siblings;
 		dev->state &= ~PCAN_USB_STATE_CONNECTED;
-		strncpy(name, netdev->name, IFNAMSIZ);
+		strlcpy(name, netdev->name, IFNAMSIZ);
 
 		unregister_netdev(netdev);
 
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index bdd8f2d..33232cc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -543,7 +543,7 @@ qca8k_setup(struct dsa_switch *ds)
 		    BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
 
 	/* Setup connection between CPU port & user ports */
-	for (i = 0; i < DSA_MAX_PORTS; i++) {
+	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
 		/* CPU port gets connected to all user ports of the switch */
 		if (dsa_is_cpu_port(ds, i)) {
 			qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -897,7 +897,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
 	if (id != QCA8K_ID_QCA8337)
 		return -ENODEV;
 
-	priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+	priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
 	if (!priv->ds)
 		return -ENOMEM;
 
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 35b767b..c281c48 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
 			 const struct switchdev_obj_port_vlan *vlan)
 {
 	struct realtek_smi *smi = ds->priv;
+	u16 vid;
 	int ret;
 
-	if (!smi->ops->is_vlan_valid(smi, port))
-		return -EINVAL;
+	for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+		if (!smi->ops->is_vlan_valid(smi, vid))
+			return -EINVAL;
 
 	dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
 		 vlan->vid_begin, vlan->vid_end);
@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
 	u16 vid;
 	int ret;
 
-	if (!smi->ops->is_vlan_valid(smi, port))
-		return;
+	for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+		if (!smi->ops->is_vlan_valid(smi, vid))
+			return;
 
 	dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
 		 port,
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a4d5049..f4b14b6 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 	irq = of_irq_get(intc, 0);
 	if (irq <= 0) {
 		dev_err(smi->dev, "failed to get parent IRQ\n");
-		return irq ? irq : -EINVAL;
+		ret = irq ? irq : -EINVAL;
+		goto out_put_node;
 	}
 
 	/* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 			  &val);
 	if (ret) {
 		dev_err(smi->dev, "can't read interrupt status\n");
-		return ret;
+		goto out_put_node;
 	}
 
 	/* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 				 val);
 	if (ret) {
 		dev_err(smi->dev, "could not configure IRQ polarity\n");
-		return ret;
+		goto out_put_node;
 	}
 
 	ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 					"RTL8366RB", smi);
 	if (ret) {
 		dev_err(smi->dev, "unable to request irq: %d\n", ret);
-		return ret;
+		goto out_put_node;
 	}
 	smi->irqdomain = irq_domain_add_linear(intc,
 					       RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
 					       smi);
 	if (!smi->irqdomain) {
 		dev_err(smi->dev, "failed to create IRQ domain\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_put_node;
 	}
 	for (i = 0; i < smi->num_ports; i++)
 		irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
 
-	return 0;
+out_put_node:
+	of_node_put(intc);
+	return ret;
 }
 
 static int rtl8366rb_set_addr(struct realtek_smi *smi)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b41f236..7ce9c69 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
 
 	ret = xgbe_platform_init();
 	if (ret)
-		return ret;
+		goto err_platform_init;
 
 	ret = xgbe_pci_init();
 	if (ret)
-		return ret;
+		goto err_pci_init;
 
 	return 0;
+
+err_pci_init:
+	xgbe_platform_exit();
+err_platform_init:
+	unregister_netdevice_notifier(&xgbe_netdev_notifier);
+	return ret;
 }
 
 static void __exit xgbe_mod_exit(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d335c33..82582fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -89,6 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
 			}
 		}
 
+err_exit:
 		if (!was_tx_cleaned)
 			work_done = budget;
 
@@ -98,7 +99,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
 					1U << self->aq_ring_param.vec_idx);
 		}
 	}
-err_exit:
+
 	return work_done;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 33baa17..cf01e73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 	/* if VF indicate to PF this function is going down (PF will delete sp
 	 * elements and clear initializations
 	 */
-	if (IS_VF(bp))
+	if (IS_VF(bp)) {
+		bnx2x_clear_vlan_info(bp);
 		bnx2x_vfpf_close_vf(bp);
-	else if (unload_mode != UNLOAD_RECOVERY)
+	} else if (unload_mode != UNLOAD_RECOVERY) {
 		/* if this is a normal/close unload need to clean up chip*/
 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
-	else {
+	} else {
 		/* Send the UNLOAD_REQUEST to the MCP */
 		bnx2x_send_unload_req(bp, unload_mode);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e508e5..ee5159e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
 /**
  * bnx2x_sp_event - handle ramrods completion.
  *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2c9af0f..68c62e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
 	return rc;
 }
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+
+	/* Mark that hw forgot all entries */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+
+	bp->vlan_cnt = 0;
+}
+
 static int bnx2x_del_all_vlans(struct bnx2x *bp)
 {
 	struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
 	unsigned long ramrod_flags = 0, vlan_flags = 0;
-	struct bnx2x_vlan_entry *vlan;
 	int rc;
 
 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
 	if (rc)
 		return rc;
 
-	/* Mark that hw forgot all entries */
-	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		vlan->hw = false;
-	bp->vlan_cnt = 0;
+	bnx2x_clear_vlan_info(bp);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 14b49612a..4dabf37 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -369,6 +369,7 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DOWN_PHY_EN		(1 << 20)
 
 #define EXT_RGMII_OOB_CTRL		0x0C
+#define  RGMII_MODE_EN_V123		(1 << 0)
 #define  RGMII_LINK			(1 << 4)
 #define  OOB_DISABLE			(1 << 5)
 #define  RGMII_MODE_EN			(1 << 6)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index de0e24d..0d527fa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -261,7 +261,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 	 */
 	if (priv->ext_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-		reg |= RGMII_MODE_EN | id_mode_dis;
+		reg |= id_mode_dis;
+		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+			reg |= RGMII_MODE_EN_V123;
+		else
+			reg |= RGMII_MODE_EN;
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 	}
 
@@ -276,11 +280,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device_node *dn = priv->pdev->dev.of_node;
 	struct phy_device *phydev;
-	u32 phy_flags;
+	u32 phy_flags = 0;
 	int ret;
 
 	/* Communicate the integrated PHY revision */
-	phy_flags = priv->gphy_rev;
+	if (priv->internal_phy)
+		phy_flags = priv->gphy_rev;
 
 	/* Initialize link state variables that bcmgenet_mii_setup() uses */
 	priv->old_link = -1;
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 6aeb104..1ab40c9 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
 
 #include "cavium_ptp.h"
 
-#define DRV_NAME	"Cavium PTP Driver"
+#define DRV_NAME "cavium_ptp"
 
 #define PCI_DEVICE_ID_CAVIUM_PTP	0xA00C
 #define PCI_DEVICE_ID_CAVIUM_RST	0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 8f746e1..3deb3c0 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -238,8 +238,10 @@ int octeon_setup_iq(struct octeon_device *oct,
 	}
 
 	oct->num_iqs++;
-	if (oct->fn_list.enable_io_queues(oct))
+	if (oct->fn_list.enable_io_queues(oct)) {
+		octeon_delete_instr_queue(oct, iq_no);
 		return 1;
+	}
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index c34ea38..6be6de0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (!adapter->regs) {
 		dev_err(&pdev->dev, "cannot map device registers\n");
 		err = -ENOMEM;
-		goto out_free_adapter;
+		goto out_free_adapter_nofail;
 	}
 
 	adapter->pdev = pdev;
@@ -3398,6 +3398,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		if (adapter->port[i])
 			free_netdev(adapter->port[i]);
 
+out_free_adapter_nofail:
+	kfree_skb(adapter->nofail_skb);
+
 out_free_adapter:
 	kfree(adapter);
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0f72f9c..b429b72 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3276,8 +3276,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
 		return -ENOMEM;
 
 	err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
-	if (err)
+	if (err) {
+		kvfree(t);
 		return err;
+	}
 
 	bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
 	kvfree(t);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 4bc2110..dba8a0c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 static int alloc_uld_rxqs(struct adapter *adap,
 			  struct sge_uld_rxq_info *rxq_info, bool lro)
 {
-	struct sge *s = &adap->sge;
 	unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+	int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
 	struct sge_ofld_rxq *q = rxq_info->uldrxq;
 	unsigned short *ids = rxq_info->rspq_id;
-	unsigned int bmap_idx = 0;
+	struct sge *s = &adap->sge;
 	unsigned int per_chan;
-	int i, err, msi_idx, que_idx = 0;
 
 	per_chan = rxq_info->nrxq / adap->params.nports;
 
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
 
 		if (msi_idx >= 0) {
 			bmap_idx = get_msix_idx_from_bmap(adap);
+			if (bmap_idx < 0) {
+				err = -ENOSPC;
+				goto freeout;
+			}
 			msi_idx = adap->msix_info_ulds[bmap_idx].idx;
 		}
 		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 6127697..a91d49d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -157,6 +157,7 @@ struct hip04_priv {
 	unsigned int reg_inten;
 
 	struct napi_struct napi;
+	struct device *dev;
 	struct net_device *ndev;
 
 	struct tx_desc *tx_desc;
@@ -185,7 +186,7 @@ struct hip04_priv {
 
 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
 {
-	return (head - tail) % (TX_DESC_NUM - 1);
+	return (head - tail) % TX_DESC_NUM;
 }
 
 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
 		}
 
 		if (priv->tx_phys[tx_tail]) {
-			dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+			dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
 					 priv->tx_skb[tx_tail]->len,
 					 DMA_TO_DEVICE);
 			priv->tx_phys[tx_tail] = 0;
@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 		return NETDEV_TX_BUSY;
 	}
 
-	phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, phys)) {
+	phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, phys)) {
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 	u16 len;
 	u32 err;
 
+	/* clean up tx descriptors */
+	tx_remaining = hip04_tx_reclaim(ndev, false);
+
 	while (cnt && !last) {
 		buf = priv->rx_buf[priv->rx_head];
 		skb = build_skb(buf, priv->rx_buf_size);
@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 			goto refill;
 		}
 
-		dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+		dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
 				 RX_BUF_SIZE, DMA_FROM_DEVICE);
 		priv->rx_phys[priv->rx_head] = 0;
 
@@ -534,9 +538,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 		buf = netdev_alloc_frag(priv->rx_buf_size);
 		if (!buf)
 			goto done;
-		phys = dma_map_single(&ndev->dev, buf,
+		phys = dma_map_single(priv->dev, buf,
 				      RX_BUF_SIZE, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&ndev->dev, phys))
+		if (dma_mapping_error(priv->dev, phys))
 			goto done;
 		priv->rx_buf[priv->rx_head] = buf;
 		priv->rx_phys[priv->rx_head] = phys;
@@ -557,8 +561,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 	}
 	napi_complete_done(napi, rx);
 done:
-	/* clean up tx descriptors and start a new timer if necessary */
-	tx_remaining = hip04_tx_reclaim(ndev, false);
+	/* start a new timer if necessary */
 	if (rx < budget && tx_remaining)
 		hip04_start_tx_timer(priv);
 
@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
 	for (i = 0; i < RX_DESC_NUM; i++) {
 		dma_addr_t phys;
 
-		phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+		phys = dma_map_single(priv->dev, priv->rx_buf[i],
 				      RX_BUF_SIZE, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&ndev->dev, phys))
+		if (dma_mapping_error(priv->dev, phys))
 			return -EIO;
 
 		priv->rx_phys[i] = phys;
@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
 
 	for (i = 0; i < RX_DESC_NUM; i++) {
 		if (priv->rx_phys[i]) {
-			dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+			dma_unmap_single(priv->dev, priv->rx_phys[i],
 					 RX_BUF_SIZE, DMA_FROM_DEVICE);
 			priv->rx_phys[i] = 0;
 		}
@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	priv = netdev_priv(ndev);
+	priv->dev = d;
 	priv->ndev = ndev;
 	platform_set_drvdata(pdev, ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc2..9a3bc09 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -156,11 +156,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 {
 	u32 time_cnt;
 	u32 reg_value;
+	int ret;
 
 	regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
 	for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-		regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+		ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+		if (ret)
+			return ret;
+
 		reg_value &= st_msk;
 		if ((!!check_st) == (!!reg_value))
 			break;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index b69c622..6f0e401 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,6 +96,8 @@
 
 #define OPT_SWAP_PORT	0x0001	/* Need to wordswp on the MPU port */
 
+#define LIB82596_DMA_ATTR	DMA_ATTR_NON_CONSISTENT
+
 #define DMA_WBACK(ndev, addr, len) \
 	do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
@@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
 
 	unregister_netdev (dev);
 	dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
-		       lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+		       lp->dma_addr, LIB82596_DMA_ATTR);
 	free_netdev (dev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 2f7ae11..d0e8193 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
 
 	dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
 			      &lp->dma_addr, GFP_KERNEL,
-			      DMA_ATTR_NON_CONSISTENT);
+			      LIB82596_DMA_ATTR);
 	if (!dma) {
 		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
 		return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
 	i = register_netdev(dev);
 	if (i) {
 		dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
-			       dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+			       dma, lp->dma_addr, LIB82596_DMA_ATTR);
 		return i;
 	}
 
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index b2c04a7..43c1fd1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -23,6 +23,8 @@
 
 static const char sni_82596_string[] = "snirm_82596";
 
+#define LIB82596_DMA_ATTR	0
+
 #define DMA_WBACK(priv, addr, len)     do { } while (0)
 #define DMA_INV(priv, addr, len)       do { } while (0)
 #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
 
 	unregister_netdev(dev);
 	dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
-		       lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+		       lp->dma_addr, LIB82596_DMA_ATTR);
 	iounmap(lp->ca);
 	iounmap(lp->mpu_port);
 	free_netdev (dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f70cb4d..40ad1e5 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1618,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	struct net_device *netdev;
 	struct ibmveth_adapter *adapter;
 	unsigned char *mac_addr_p;
-	unsigned int *mcastFilterSize_p;
+	__be32 *mcastFilterSize_p;
 	long ret;
 	unsigned long ret_attr;
 
@@ -1640,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 		return -EINVAL;
 	}
 
-	mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
-						VETH_MCAST_FILTER_SIZE, NULL);
+	mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+							VETH_MCAST_FILTER_SIZE,
+							NULL);
 	if (!mcastFilterSize_p) {
 		dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
 			"attribute\n");
@@ -1658,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
 	adapter->vdev = dev;
 	adapter->netdev = netdev;
-	adapter->mcastFilterSize = *mcastFilterSize_p;
+	adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
 	adapter->pool_config = 0;
 
 	netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0ae43d2..8fa1473 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1586,6 +1586,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
 					       (u64)tx_buff->indir_dma,
 					       (u64)num_entries);
+		dma_unmap_single(dev, tx_buff->indir_dma,
+				 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
 	} else {
 		tx_buff->num_entries = num_entries;
 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1996,6 +1998,13 @@ static void __ibmvnic_reset(struct work_struct *work)
 
 	rwi = get_next_rwi(adapter);
 	while (rwi) {
+		if (adapter->state == VNIC_REMOVING ||
+		    adapter->state == VNIC_REMOVED) {
+			kfree(rwi);
+			rc = EBUSY;
+			break;
+		}
+
 		if (adapter->force_reset_recovery) {
 			adapter->force_reset_recovery = false;
 			rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2722,12 +2731,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
 
 	if (adapter->resetting &&
 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
-		u64 val = (0xff000000) | scrq->hw_irq;
+		struct irq_desc *desc = irq_to_desc(scrq->irq);
+		struct irq_chip *chip = irq_desc_get_chip(desc);
 
-		rc = plpar_hcall_norets(H_EOI, val);
-		if (rc)
-			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
-				val, rc);
+		chip->irq_eoi(&desc->irq_data);
 	}
 
 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -2747,7 +2754,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
 	union sub_crq *next;
 	int index;
 	int i, j;
-	u8 *first;
 
 restart_loop:
 	while (pending_scrq(adapter, scrq)) {
@@ -2777,14 +2783,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
 
 				txbuff->data_dma[j] = 0;
 			}
-			/* if sub_crq was sent indirectly */
-			first = &txbuff->indir_arr[0].generic.first;
-			if (*first == IBMVNIC_CRQ_CMD) {
-				dma_unmap_single(dev, txbuff->indir_dma,
-						 sizeof(txbuff->indir_arr),
-						 DMA_TO_DEVICE);
-				*first = 0;
-			}
 
 			if (txbuff->last_frag) {
 				dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index cdae0ef..7998a73b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 			else
 				phy_reg |= 0xFA;
 			e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+			if (speed == SPEED_1000) {
+				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+							    &phy_reg);
+
+				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+							     phy_reg);
+			}
 		}
 		hw->phy.ops.release(hw);
 
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index eb09c755f..1502895 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -210,7 +210,7 @@
 
 /* PHY Power Management Control */
 #define HV_PM_CTRL		PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA	0x100
+#define HV_PM_CTRL_K1_CLK_REQ		0x200
 #define HV_PM_CTRL_K1_ENABLE		0x4000
 
 #define I217_PLL_CLOCK_GATE_REG	PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4e04985..055562c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2566,6 +2566,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
 		return;
 	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
 		return;
+	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+		return;
+	}
 
 	for (v = 0; v < pf->num_alloc_vsi; v++) {
 		if (pf->vsi[v] &&
@@ -2580,6 +2584,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
 			}
 		}
 	}
+	clear_bit(__I40E_VF_DISABLE, pf->state);
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 410d5d3..8528076 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -34,6 +34,7 @@
 #include <net/tc_act/tc_mirred.h>
 #include <net/vxlan.h>
 #include <net/mpls.h>
+#include <net/xfrm.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -2625,7 +2626,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 		/* 16K ints/sec to 9.2K ints/sec */
 		avg_wire_size *= 15;
 		avg_wire_size += 11452;
-	} else if (avg_wire_size <= 1980) {
+	} else if (avg_wire_size < 1968) {
 		/* 9.2K ints/sec to 8K ints/sec */
 		avg_wire_size *= 5;
 		avg_wire_size += 22420;
@@ -2658,6 +2659,8 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 	case IXGBE_LINK_SPEED_2_5GB_FULL:
 	case IXGBE_LINK_SPEED_1GB_FULL:
 	case IXGBE_LINK_SPEED_10_FULL:
+		if (avg_wire_size > 8064)
+			avg_wire_size = 8064;
 		itr += DIV_ROUND_UP(avg_wire_size,
 				    IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
 		       IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -8599,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 #endif /* IXGBE_FCOE */
 
 #ifdef CONFIG_XFRM_OFFLOAD
-	if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+	if (xfrm_offload(skb) &&
+	    !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
 		goto out_drop;
 #endif
 	tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6455511..9b608d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4412,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
 	if (state->pause & MLO_PAUSE_RX)
 		ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
 
-	ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
-	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
-		 MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+	ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+		   MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
 
 	writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
 	writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 15dea48..d6f8a41 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3122,7 +3122,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
 	skb_put(skb, len);
 
 	if (dev->features & NETIF_F_RXCSUM) {
-		skb->csum = csum;
+		skb->csum = le16_to_cpu(csum);
 		skb->ip_summed = CHECKSUM_COMPLETE;
 	}
 
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 4ade864..d013f30 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4954,6 +4954,13 @@ static const struct dmi_system_id msi_blacklist[] = {
 			DMI_MATCH(DMI_BOARD_NAME, "P6T"),
 		},
 	},
+	{
+		.ident = "ASUS P6X",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+		},
+	},
 	{}
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f5cd953..45d9a5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
 	if (err) {
 		en_err(priv, "Failed to allocate RSS indirection QP\n");
-		goto rss_err;
+		goto qp_alloc_err;
 	}
 
 	rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1244,6 +1244,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
 	kfree(rss_map->indir_qp);
 	rss_map->indir_qp = NULL;
 rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index da52e60..d79e177 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -210,6 +210,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
 	"tx_cqe_moder",
 	"rx_cqe_compress",
 	"rx_striding_rq",
+	"rx_no_csum_complete",
 };
 
 enum mlx5e_priv_flag {
@@ -217,6 +218,7 @@ enum mlx5e_priv_flag {
 	MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
 	MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
 	MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
+	MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
 };
 
 #define MLX5E_SET_PFLAG(params, pflag, enable)			\
@@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp {
 enum {
 	MLX5E_RQ_STATE_ENABLED,
 	MLX5E_RQ_STATE_AM,
+	MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 };
 
 struct mlx5e_cq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde6..a4be04d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
 	return &arfs_t->rules_hash[bucket_idx];
 }
 
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
-	return (skb->protocol == htons(ETH_P_IP)) ?
-		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
 					 u8 ip_proto, __be16 etype)
 {
@@ -599,31 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
 	arfs_may_expire_flow(priv);
 }
 
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->dest;
-	return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->source;
-	return ((struct udphdr *)transport_header)->source;
-}
-
 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 					 struct arfs_table *arfs_t,
-					 const struct sk_buff *skb,
+					 const struct flow_keys *fk,
 					 u16 rxq, u32 flow_id)
 {
 	struct arfs_rule *rule;
@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	INIT_WORK(&rule->arfs_work, arfs_handle_work);
 
 	tuple = &rule->tuple;
-	tuple->etype = skb->protocol;
+	tuple->etype = fk->basic.n_proto;
+	tuple->ip_proto = fk->basic.ip_proto;
 	if (tuple->etype == htons(ETH_P_IP)) {
-		tuple->src_ipv4 = ip_hdr(skb)->saddr;
-		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+		tuple->src_ipv4 = fk->addrs.v4addrs.src;
+		tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
 	} else {
-		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
 		       sizeof(struct in6_addr));
-		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
 		       sizeof(struct in6_addr));
 	}
-	tuple->ip_proto = arfs_get_ip_proto(skb);
-	tuple->src_port = arfs_get_src_port(skb);
-	tuple->dst_port = arfs_get_dst_port(skb);
+	tuple->src_port = fk->ports.src;
+	tuple->dst_port = fk->ports.dst;
 
 	rule->flow_id = flow_id;
 	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	return rule;
 }
 
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
-			 const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
 {
-	if (tuple->etype == htons(ETH_P_IP) &&
-	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
-	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
-		return true;
-	if (tuple->etype == htons(ETH_P_IPV6) &&
-	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-		     sizeof(struct in6_addr))) &&
-	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-		     sizeof(struct in6_addr))))
-		return true;
+	if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+		return false;
+	if (tuple->etype != fk->basic.n_proto)
+		return false;
+	if (tuple->etype == htons(ETH_P_IP))
+		return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+		       tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+	if (tuple->etype == htons(ETH_P_IPV6))
+		return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+			       sizeof(struct in6_addr)) &&
+		       !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+			       sizeof(struct in6_addr));
 	return false;
 }
 
 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
-					const struct sk_buff *skb)
+					const struct flow_keys *fk)
 {
 	struct arfs_rule *arfs_rule;
 	struct hlist_head *head;
-	__be16 src_port = arfs_get_src_port(skb);
-	__be16 dst_port = arfs_get_dst_port(skb);
 
-	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+	head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
 	hlist_for_each_entry(arfs_rule, head, hlist) {
-		if (arfs_rule->tuple.src_port == src_port &&
-		    arfs_rule->tuple.dst_port == dst_port &&
-		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+		if (arfs_cmp(&arfs_rule->tuple, fk))
 			return arfs_rule;
-		}
 	}
 
 	return NULL;
@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 	struct arfs_table *arfs_t;
 	struct arfs_rule *arfs_rule;
+	struct flow_keys fk;
 
-	if (skb->protocol != htons(ETH_P_IP) &&
-	    skb->protocol != htons(ETH_P_IPV6))
+	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+		return -EPROTONOSUPPORT;
+
+	if (fk.basic.n_proto != htons(ETH_P_IP) &&
+	    fk.basic.n_proto != htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
 
 	if (skb->encapsulation)
 		return -EPROTONOSUPPORT;
 
-	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+	arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
 	if (!arfs_t)
 		return -EPROTONOSUPPORT;
 
 	spin_lock_bh(&arfs->arfs_lock);
-	arfs_rule = arfs_find_rule(arfs_t, skb);
+	arfs_rule = arfs_find_rule(arfs_t, &fk);
 	if (arfs_rule) {
 		if (arfs_rule->rxq == rxq_index) {
 			spin_unlock_bh(&arfs->arfs_lock);
@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 		}
 		arfs_rule->rxq = rxq_index;
 	} else {
-		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
-					    rxq_index, flow_id);
+		arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
 		if (!arfs_rule) {
 			spin_unlock_bh(&arfs->arfs_lock);
 			return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 792bb8b..10d72c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
 	struct mlx5_core_dev *mdev = priv->mdev;
 	int err;
 
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return -EOPNOTSUPP;
+
 	if (pauseparam->autoneg)
 		return -EINVAL;
 
@@ -1507,6 +1510,28 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
 	return 0;
 }
 
+static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_channels *channels = &priv->channels;
+	struct mlx5e_channel *c;
+	int i;
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+	    priv->channels.params.xdp_prog)
+		return 0;
+
+	for (i = 0; i < channels->num; i++) {
+		c = channels->c[i];
+		if (enable)
+			__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+		else
+			__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+	}
+
+	return 0;
+}
+
 static int mlx5e_handle_pflag(struct net_device *netdev,
 			      u32 wanted_flags,
 			      enum mlx5e_priv_flag flag,
@@ -1558,6 +1583,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
 	err = mlx5e_handle_pflag(netdev, pflags,
 				 MLX5E_PFLAG_RX_STRIDING_RQ,
 				 set_pflag_rx_striding_rq);
+	if (err)
+		goto out;
+
+	err = mlx5e_handle_pflag(netdev, pflags,
+				 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+				 set_pflag_rx_no_csum_complete);
 
 out:
 	mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 83ab2c0..7e67063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -934,6 +934,13 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
 	if (params->rx_dim_enabled)
 		__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
+	/* We disable csum_complete when XDP is enabled since
+	 * XDP programs might manipulate packets which will render
+	 * skb->checksum incorrect.
+	 */
+	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
+		__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+
 	return 0;
 
 err_destroy_rq:
@@ -4533,6 +4540,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 		params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
 
 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
 
 	/* RQ */
 	/* Prefer Striding RQ, unless any of the following holds:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d3f794d..df49dc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
 #include <net/busy_poll.h>
 #include <net/ip6_checksum.h>
 #include <net/page_pool.h>
+#include <net/inet_ecn.h>
 #include "en.h"
 #include "en_tc.h"
 #include "eswitch.h"
@@ -688,27 +689,110 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
-static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
+					__be16 *proto)
 {
-	__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+	*proto = ((struct ethhdr *)skb->data)->h_proto;
+	*proto = __vlan_get_protocol(skb, *proto, network_depth);
 
-	ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
-	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+	if (*proto == htons(ETH_P_IP))
+		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+	if (*proto == htons(ETH_P_IPV6))
+		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+	return false;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
+static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
 {
-	const void *fcs_bytes;
-	u32 _fcs_bytes;
+	int network_depth = 0;
+	__be16 proto;
+	void *ip;
+	int rc;
 
-	fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-				       ETH_FCS_LEN, &_fcs_bytes);
+	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
+		return;
 
-	return __get_unaligned_cpu32(fcs_bytes);
+	ip = skb->data + network_depth;
+	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
+					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
+
+	rq->stats->ecn_mark += !!rc;
+}
+
+static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
+{
+	void *ip_p = skb->data + network_depth;
+
+	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
+					    ((struct ipv6hdr *)ip_p)->nexthdr;
 }
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+		       struct mlx5e_rq_stats *stats)
+{
+	stats->csum_complete_tail_slow++;
+	skb->csum = csum_block_add(skb->csum,
+				   skb_checksum(skb, offset, len, 0),
+				   offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+		  struct mlx5e_rq_stats *stats)
+{
+	u8 tail_padding[MAX_PADDING];
+	int len = skb->len - offset;
+	void *tail;
+
+	if (unlikely(len > MAX_PADDING)) {
+		tail_padding_csum_slow(skb, offset, len, stats);
+		return;
+	}
+
+	tail = skb_header_pointer(skb, offset, len, tail_padding);
+	if (unlikely(!tail)) {
+		tail_padding_csum_slow(skb, offset, len, stats);
+		return;
+	}
+
+	stats->csum_complete_tail++;
+	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+		       struct mlx5e_rq_stats *stats)
+{
+	struct ipv6hdr *ip6;
+	struct iphdr   *ip4;
+	int pkt_len;
+
+	switch (proto) {
+	case htons(ETH_P_IP):
+		ip4 = (struct iphdr *)(skb->data + network_depth);
+		pkt_len = network_depth + ntohs(ip4->tot_len);
+		break;
+	case htons(ETH_P_IPV6):
+		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+		break;
+	default:
+		return;
+	}
+
+	if (likely(pkt_len >= skb->len))
+		return;
+
+	tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
 				     struct mlx5_cqe64 *cqe,
 				     struct mlx5e_rq *rq,
@@ -717,6 +801,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 {
 	struct mlx5e_rq_stats *stats = rq->stats;
 	int network_depth = 0;
+	__be16 proto;
 
 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
 		goto csum_none;
@@ -727,6 +812,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 		return;
 	}
 
+	/* True when explicitly set via priv flag, or XDP prog is loaded */
+	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+		goto csum_unnecessary;
+
 	/* CQE csum doesn't cover padding octets in short ethernet
 	 * frames. And the pad field is appended prior to calculating
 	 * and appending the FCS field.
@@ -738,7 +827,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 	if (short_frame(skb->len))
 		goto csum_unnecessary;
 
-	if (likely(is_last_ethertype_ip(skb, &network_depth))) {
+	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
+		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+			goto csum_unnecessary;
+
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
 		if (network_depth > ETH_HLEN)
@@ -749,10 +841,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 			skb->csum = csum_partial(skb->data + ETH_HLEN,
 						 network_depth - ETH_HLEN,
 						 skb->csum);
-		if (unlikely(netdev->features & NETIF_F_RXFCS))
-			skb->csum = csum_block_add(skb->csum,
-						   (__force __wsum)mlx5e_get_fcs(skb),
-						   skb->len - ETH_FCS_LEN);
+
+		mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
 		stats->csum_complete++;
 		return;
 	}
@@ -775,6 +865,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 	stats->csum_none++;
 }
 
+#define MLX5E_CE_BIT_MASK 0x80
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 				      u32 cqe_bcnt,
 				      struct mlx5e_rq *rq,
@@ -819,6 +911,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
 
 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+	/* checking CE bit in cqe - MSB in ml_path field */
+	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
+		mlx5e_enable_ecn(rq, skb);
+
 	skb->protocol = eth_type_trans(skb, netdev);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7047cc2..8255d79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -53,10 +53,13 @@ static const struct counter_desc sw_stats_desc[] = {
 
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -144,9 +147,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
 		s->rx_bytes	+= rq_stats->bytes;
 		s->rx_lro_packets += rq_stats->lro_packets;
 		s->rx_lro_bytes	+= rq_stats->lro_bytes;
+		s->rx_ecn_mark	+= rq_stats->ecn_mark;
 		s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
 		s->rx_csum_none	+= rq_stats->csum_none;
 		s->rx_csum_complete += rq_stats->csum_complete;
+		s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+		s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
 		s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
 		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
 		s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1137,6 +1143,8 @@ static const struct counter_desc rq_stats_desc[] = {
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
@@ -1144,6 +1152,7 @@ static const struct counter_desc rq_stats_desc[] = {
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 0ad7a16..3ea8033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -66,10 +66,13 @@ struct mlx5e_sw_stats {
 	u64 tx_nop;
 	u64 rx_lro_packets;
 	u64 rx_lro_bytes;
+	u64 rx_ecn_mark;
 	u64 rx_removed_vlan_packets;
 	u64 rx_csum_unnecessary;
 	u64 rx_csum_none;
 	u64 rx_csum_complete;
+	u64 rx_csum_complete_tail;
+	u64 rx_csum_complete_tail_slow;
 	u64 rx_csum_unnecessary_inner;
 	u64 rx_xdp_drop;
 	u64 rx_xdp_redirect;
@@ -179,11 +182,14 @@ struct mlx5e_rq_stats {
 	u64 packets;
 	u64 bytes;
 	u64 csum_complete;
+	u64 csum_complete_tail;
+	u64 csum_complete_tail_slow;
 	u64 csum_unnecessary;
 	u64 csum_unnecessary_inner;
 	u64 csum_none;
 	u64 lro_packets;
 	u64 lro_bytes;
+	u64 ecn_mark;
 	u64 removed_vlan_packets;
 	u64 xdp_drop;
 	u64 xdp_redirect;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0e820cf..231ed50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1642,6 +1642,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
 	{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},	/* ConnectX-6 VF */
 	{ PCI_VDEVICE(MELLANOX, 0xa2d2) },			/* BlueField integrated ConnectX-5 network controller */
 	{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},	/* BlueField integrated ConnectX-5 network controller VF */
+	{ PCI_VDEVICE(MELLANOX, 0xa2d6) },			/* BlueField-2 integrated ConnectX-6 Dx network controller */
 	{ 0, }
 };
 
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b2d2ec8..6789eed 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 * setup (if available). */
 	status = myri10ge_request_irq(mgp);
 	if (status != 0)
-		goto abort_with_firmware;
+		goto abort_with_slices;
 	myri10ge_free_irq(mgp);
 
 	/* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e57d237..c19e88e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -259,6 +259,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
 		if (!repr_priv) {
 			err = -ENOMEM;
+			nfp_repr_free(repr);
 			goto err_reprs_clean;
 		}
 
@@ -271,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 		port = nfp_port_alloc(app, port_type, repr);
 		if (IS_ERR(port)) {
 			err = PTR_ERR(port);
+			kfree(repr_priv);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
 		}
@@ -291,6 +293,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 		err = nfp_repr_init(app, repr,
 				    port_id, port, priv->nn->dp.netdev);
 		if (err) {
+			kfree(repr_priv);
 			nfp_port_free(port);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
@@ -373,6 +376,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
 		if (!repr_priv) {
 			err = -ENOMEM;
+			nfp_repr_free(repr);
 			goto err_reprs_clean;
 		}
 
@@ -382,11 +386,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
 		if (IS_ERR(port)) {
 			err = PTR_ERR(port);
+			kfree(repr_priv);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
 		}
 		err = nfp_port_init_phy_port(app->pf, app, port, i);
 		if (err) {
+			kfree(repr_priv);
 			nfp_port_free(port);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
@@ -399,6 +405,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 		err = nfp_repr_init(app, repr,
 				    cmsg_port_id, port, priv->nn->dp.netdev);
 		if (err) {
+			kfree(repr_priv);
 			nfp_port_free(port);
 			nfp_repr_free(repr);
 			goto err_reprs_clean;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 08381ef..41d30f5 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1371,13 +1371,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
 	pldat->dma_buff_base_p = dma_handle;
 
 	netdev_dbg(ndev, "IO address space     :%pR\n", res);
-	netdev_dbg(ndev, "IO address size      :%d\n", resource_size(res));
+	netdev_dbg(ndev, "IO address size      :%zd\n",
+			(size_t)resource_size(res));
 	netdev_dbg(ndev, "IO address (mapped)  :0x%p\n",
 			pldat->net_base);
 	netdev_dbg(ndev, "IRQ number           :%d\n", ndev->irq);
-	netdev_dbg(ndev, "DMA buffer size      :%d\n", pldat->dma_buff_size);
-	netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
-			pldat->dma_buff_base_p);
+	netdev_dbg(ndev, "DMA buffer size      :%zd\n", pldat->dma_buff_size);
+	netdev_dbg(ndev, "DMA buffer P address :%pad\n",
+			&pldat->dma_buff_base_p);
 	netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
 			pldat->dma_buff_base_v);
 
@@ -1424,8 +1425,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_out_unregister_netdev;
 
-	netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
-	       res->start, ndev->irq);
+	netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
+	       (unsigned long)res->start, ndev->irq);
 
 	phydev = ndev->phydev;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b22f464..f9e4750 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
 						snprintf(bit_name, 30,
 							 p_aeu->bit_name, num);
 					else
-						strncpy(bit_name,
+						strlcpy(bit_name,
 							p_aeu->bit_name, 30);
 
 					/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index cf3b0e3..637687b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1150,7 +1150,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 					      &drv_version);
 		if (rc) {
 			DP_NOTICE(cdev, "Failed sending drv version command\n");
-			return rc;
+			goto err4;
 		}
 	}
 
@@ -1158,6 +1158,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 
 	return 0;
 
+err4:
+	qed_ll2_dealloc_if(cdev);
 err3:
 	qed_hw_stop(cdev);
 err2:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 13802b8..909422d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
 	/* Vendor specific information */
 	dev->vendor_id = cdev->vendor_id;
 	dev->vendor_part_id = cdev->device_id;
-	dev->hw_ver = 0;
+	dev->hw_ver = cdev->chip_rev;
 	dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
 		      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
 
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 10b075b..783ee6a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2788,6 +2788,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
 				netdev_err(qdev->ndev,
 					   "PCI mapping failed with error: %d\n",
 					   err);
+				dev_kfree_skb_irq(skb);
 				ql_free_large_buffers(qdev);
 				return -ENOMEM;
 			}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Makefile b/drivers/net/ethernet/qualcomm/rmnet/Makefile
index ac76d0e..3f714f3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/Makefile
+++ b/drivers/net/ethernet/qualcomm/rmnet/Makefile
@@ -10,4 +10,5 @@
 rmnet-y		 += rmnet_map_data.o
 rmnet-y		 += rmnet_map_command.o
 rmnet-y		 += rmnet_descriptor.o
+rmnet-y		 += rmnet_genl.o
 obj-$(CONFIG_RMNET) += rmnet.o
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 72f3e4b..6f6ebfb 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -15,6 +15,7 @@
 #include "rmnet_private.h"
 #include "rmnet_map.h"
 #include "rmnet_descriptor.h"
+#include "rmnet_genl.h"
 #include <soc/qcom/rmnet_qmi.h>
 #include <soc/qcom/qmi_rmnet.h>
 
@@ -79,6 +80,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
 	if (port->nr_rmnet_devs)
 		return -EINVAL;
 
+	netdev_rx_handler_unregister(real_dev);
+
 	rmnet_map_cmd_exit(port);
 	rmnet_map_tx_aggregate_exit(port);
 
@@ -86,8 +89,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
 
 	kfree(port);
 
-	netdev_rx_handler_unregister(real_dev);
-
 	/* release reference on real_dev */
 	dev_put(real_dev);
 
@@ -255,9 +256,9 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
 	if (!port->nr_rmnet_devs)
 		qmi_rmnet_qmi_exit(port->qmi_info, port);
 
-	rmnet_unregister_real_device(real_dev, port);
+	unregister_netdevice(dev);
 
-	unregister_netdevice_queue(dev, head);
+	rmnet_unregister_real_device(real_dev, port);
 }
 
 static void rmnet_force_unassociate_device(struct net_device *dev)
@@ -287,7 +288,9 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
 		synchronize_rcu();
 		kfree(ep);
 	}
-
+	/* Unregistering devices in context before freeing port.
+	 * If this API becomes non-context their order should switch.
+	 */
 	unregister_netdevice_many(&list);
 
 	rmnet_unregister_real_device(real_dev, port);
@@ -726,6 +729,8 @@ static int __init rmnet_init(void)
 		unregister_netdevice_notifier(&rmnet_dev_notifier);
 		return rc;
 	}
+	rmnet_core_genl_init();
+
 	return rc;
 }
 
@@ -733,6 +738,7 @@ static void __exit rmnet_exit(void)
 {
 	unregister_netdevice_notifier(&rmnet_dev_notifier);
 	rtnl_link_unregister(&rmnet_link_ops);
+	rmnet_core_genl_deinit();
 }
 
 module_init(rmnet_init)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
index 40238e6..751a50a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
@@ -34,8 +34,9 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
 {
 	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
 	struct rmnet_frag_descriptor *frag_desc;
+	unsigned long flags;
 
-	spin_lock(&port->desc_pool_lock);
+	spin_lock_irqsave(&port->desc_pool_lock, flags);
 	if (!list_empty(&pool->free_list)) {
 		frag_desc = list_first_entry(&pool->free_list,
 					     struct rmnet_frag_descriptor,
@@ -52,7 +53,7 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
 	}
 
 out:
-	spin_unlock(&port->desc_pool_lock);
+	spin_unlock_irqrestore(&port->desc_pool_lock, flags);
 	return frag_desc;
 }
 EXPORT_SYMBOL(rmnet_get_frag_descriptor);
@@ -62,6 +63,7 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
 {
 	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
 	struct page *page = skb_frag_page(&frag_desc->frag);
+	unsigned long flags;
 
 	list_del(&frag_desc->list);
 	if (page)
@@ -70,9 +72,9 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
 	memset(frag_desc, 0, sizeof(*frag_desc));
 	INIT_LIST_HEAD(&frag_desc->list);
 	INIT_LIST_HEAD(&frag_desc->sub_frags);
-	spin_lock(&port->desc_pool_lock);
+	spin_lock_irqsave(&port->desc_pool_lock, flags);
 	list_add_tail(&frag_desc->list, &pool->free_list);
-	spin_unlock(&port->desc_pool_lock);
+	spin_unlock_irqrestore(&port->desc_pool_lock, flags);
 }
 EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
 
@@ -457,14 +459,14 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
 		if (frag_desc->trans_len)
 			skb_set_transport_header(head_skb, frag_desc->ip_len);
 
-		/* Packets that have no data portion don't need any frags */
-		if (hdr_len == skb_frag_size(&frag_desc->frag))
-			goto skip_frags;
-
 		/* If the headers we added are the start of the page,
 		 * we don't want to add them twice
 		 */
 		if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
+			/* "Header only" packets can be fast-forwarded */
+			if (hdr_len == skb_frag_size(&frag_desc->frag))
+				goto skip_frags;
+
 			if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
 				kfree_skb(head_skb);
 				return NULL;
@@ -553,9 +555,12 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
 	}
 
 	/* Handle csum offloading */
-	if (frag_desc->csum_valid) {
+	if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
 		/* Set the partial checksum information */
 		rmnet_frag_partial_csum(head_skb, frag_desc);
+	} else if (frag_desc->csum_valid) {
+		/* Non-RSB/RSC/perf packet. The current checksum is fine */
+		head_skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else if (frag_desc->hdrs_valid &&
 		   (frag_desc->trans_proto == IPPROTO_TCP ||
 		    frag_desc->trans_proto == IPPROTO_UDP)) {
@@ -670,6 +675,12 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
 		new_frag->tcp_seq_set = 1;
 		new_frag->tcp_seq = htonl(ntohl(th->seq) +
 					  coal_desc->data_offset);
+	} else if (coal_desc->trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh;
+
+		uh = (struct udphdr *)(hdr_start + coal_desc->ip_len);
+		if (coal_desc->ip_proto == 4 && !uh->check)
+			csum_valid = true;
 	}
 
 	if (coal_desc->ip_proto == 4) {
@@ -734,6 +745,7 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
 	u8 pkt, total_pkt = 0;
 	u8 nlo;
 	bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
+	bool zero_csum = false;
 
 	/* Pull off the headers we no longer need */
 	if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
@@ -794,7 +806,12 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
 		th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
 		coal_desc->trans_len = th->doff * 4;
 	} else if (coal_desc->trans_proto == IPPROTO_UDP) {
-		coal_desc->trans_len = sizeof(struct udphdr);
+		struct udphdr *uh;
+
+		uh = (struct udphdr *)((u8 *)iph + coal_desc->ip_len);
+		coal_desc->trans_len = sizeof(*uh);
+		if (coal_desc->ip_proto == 4 && !uh->check)
+			zero_csum = true;
 	} else {
 		priv->stats.coal.coal_trans_invalid++;
 		return;
@@ -802,7 +819,7 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
 
 	coal_desc->hdrs_valid = 1;
 
-	if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+	if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
 		/* Mark the checksum as valid if it checks out */
 		if (rmnet_frag_validate_csum(coal_desc))
 			coal_desc->csum_valid = true;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
new file mode 100644
index 0000000..1eb72ca
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Generic Netlink
+ *
+ */
+
+#include "rmnet_genl.h"
+#include <net/sock.h>
+#include <linux/skbuff.h>
+
+/* Static Functions and Definitions */
+static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
+						1] = {
+	[RMNET_CORE_GENL_ATTR_INT]  = { .type = NLA_S32 },
+	[RMNET_CORE_GENL_ATTR_PID_BPS] = { .len =
+				sizeof(struct rmnet_core_pid_bps_resp) },
+	[RMNET_CORE_GENL_ATTR_PID_BOOST] = { .len =
+				sizeof(struct rmnet_core_pid_boost_req) },
+	[RMNET_CORE_GENL_ATTR_STR]  = { .type = NLA_NUL_STRING },
+};
+
+#define RMNET_CORE_GENL_OP(_cmd, _func)			\
+	{						\
+		.cmd	= _cmd,				\
+		.policy	= rmnet_genl_attr_policy,	\
+		.doit	= _func,			\
+		.dumpit	= NULL,				\
+		.flags	= 0,				\
+	}
+
+static const struct genl_ops rmnet_core_genl_ops[] = {
+	RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
+			   rmnet_core_genl_pid_bps_req_hdlr),
+	RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
+			   rmnet_core_genl_pid_boost_req_hdlr),
+};
+
+struct genl_family rmnet_core_genl_family = {
+	.hdrsize = 0,
+	.name    = RMNET_CORE_GENL_FAMILY_NAME,
+	.version = RMNET_CORE_GENL_VERSION,
+	.maxattr = RMNET_CORE_GENL_ATTR_MAX,
+	.ops     = rmnet_core_genl_ops,
+	.n_ops   = ARRAY_SIZE(rmnet_core_genl_ops),
+};
+
+#define RMNET_PID_STATS_HT_SIZE (8)
+#define RMNET_PID_STATS_HT rmnet_pid_ht
+DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
+spinlock_t rmnet_pid_ht_splock; /* Spinlock definition for pid hash table */
+
+#define RMNET_GENL_SEC_TO_MSEC(x)   ((x) * 1000)
+#define RMNET_GENL_SEC_TO_NSEC(x)   ((x) * 1000000000)
+#define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
+#define RMNET_GENL_NSEC_TO_SEC(x) ({\
+	u64 __quotient = (x); \
+	do_div(__quotient, 1000000000); \
+	__quotient; \
+})
+
+int rmnet_core_userspace_connected;
+#define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
+
+struct rmnet_pid_node_s {
+	struct hlist_node list;
+	time_t timstamp_last_query;
+	u64 tx_bytes;
+	u64 tx_bytes_last_query;
+	u64 tx_bps;
+	u64 sched_boost_period_ms;
+	int sched_boost_remaining_ms;
+	int sched_boost_enable;
+	pid_t pid;
+};
+
+void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
+				      int *boost_enable, u64 *boost_period)
+{
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+	u8 is_match_found = 0;
+	u64 tx_bytes = 0;
+
+	*boost_enable = 0;
+	*boost_period = 0;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
+					    list, pid) {
+			if (pid != node_p->pid)
+				continue;
+
+			/* PID Match found */
+			is_match_found = 1;
+			node_p->tx_bytes += len;
+			tx_bytes = node_p->tx_bytes;
+
+			if (node_p->sched_boost_enable) {
+				rm_err("boost triggered for pid %d",
+				       pid);
+				/* Just triggered boost, dont re-trigger */
+				node_p->sched_boost_enable = 0;
+				*boost_enable = 1;
+				*boost_period = node_p->sched_boost_period_ms;
+				node_p->sched_boost_remaining_ms =
+							(int)*boost_period;
+			}
+
+			break;
+		}
+
+		if (is_match_found)
+			break;
+
+		/* No PID match */
+		node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
+		if (!node_p)
+			break;
+
+		node_p->pid = pid;
+		node_p->tx_bytes = len;
+		node_p->sched_boost_enable = 0;
+		node_p->sched_boost_period_ms = 0;
+		node_p->sched_boost_remaining_ms = 0;
+		hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+}
+
+void rmnet_boost_for_pid(pid_t pid, int boost_enable,
+			 u64 boost_period)
+{
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
+					    list, pid) {
+			if (pid != node_p->pid)
+				continue;
+
+			/* PID Match found */
+			rm_err("CORE_BOOST: enable boost for pid %d for %d ms",
+			       pid, boost_period);
+			node_p->sched_boost_enable = boost_enable;
+			node_p->sched_boost_period_ms = boost_period;
+			break;
+		}
+
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+}
+
+static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
+				      *pid_bps_resp_ptr)
+{
+	struct timespec time;
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+	u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
+	int i;
+	u16 bkt;
+
+	(void)getnstimeofday(&time);
+	pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
+		   time.tv_nsec;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		i = 0;
+
+		hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
+				   node_p, list) {
+			tx_bytes_cur = node_p->tx_bytes;
+			if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
+				/* Dont send inactive pids to userspace */
+				/* TODO: can remove from hash table probably */
+				node_p->tx_bps = 0;
+				node_p->timstamp_last_query =
+					pid_bps_resp_ptr->timestamp;
+				node_p->sched_boost_remaining_ms = 0;
+				continue;
+			}
+
+			/* Compute bits per second */
+			byte_diff = (node_p->tx_bytes -
+				     node_p->tx_bytes_last_query);
+			time_diff_ns = (pid_bps_resp_ptr->timestamp -
+					node_p->timstamp_last_query);
+
+			tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
+			/* Note that do_div returns remainder and the */
+			/* numerator gets assigned the quotient */
+			/* Since do_div takes the numerator as a reference, */
+			/* a tmp_bits is used*/
+			do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
+			node_p->tx_bps = tmp_bits;
+
+			if (node_p->sched_boost_remaining_ms >=
+			    RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
+				node_p->sched_boost_remaining_ms -=
+				RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
+
+				rm_err("CORE_BOOST: enabling boost for pid %d\n"
+				       "sched boost remaining = %d ms",
+				       node_p->pid,
+				       node_p->sched_boost_remaining_ms);
+			} else {
+				node_p->sched_boost_remaining_ms = 0;
+			}
+
+			pid_bps_resp_ptr->list[i].pid = node_p->pid;
+			pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
+			pid_bps_resp_ptr->list[i].boost_remaining_ms =
+					node_p->sched_boost_remaining_ms;
+
+			node_p->timstamp_last_query =
+				pid_bps_resp_ptr->timestamp;
+			node_p->tx_bytes_last_query = tx_bytes_cur;
+			i++;
+
+			/* Support copying up to 32 active pids */
+			if (i >= RMNET_CORE_GENL_MAX_PIDS)
+				break;
+		}
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+
+	pid_bps_resp_ptr->list_len = i;
+}
+
+int rmnet_core_genl_send_resp(struct genl_info *info,
+			      struct rmnet_core_pid_bps_resp *pid_bps_resp)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int rc;
+
+	if (!info || !pid_bps_resp) {
+		rm_err("%s", "SHS_GNL: Invalid params\n");
+		goto out;
+	}
+
+	skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
+	if (!skb)
+		goto out;
+
+	msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
+			       &rmnet_core_genl_family,
+			       0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
+	if (!msg_head) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
+		     sizeof(struct rmnet_core_pid_bps_resp),
+		     pid_bps_resp);
+	if (rc != 0)
+		goto out;
+
+	genlmsg_end(skb, msg_head);
+
+	rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+	if (rc != 0)
+		goto out;
+
+	rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
+	return RMNET_GENL_SUCCESS;
+
+out:
+	/* TODO: Need to free skb?? */
+	rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
+	rmnet_core_userspace_connected = 0;
+	return RMNET_GENL_FAILURE;
+}
+
+int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
+				     struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_core_pid_bps_req  pid_bps_req;
+	struct rmnet_core_pid_bps_resp pid_bps_resp;
+	int is_req_valid = 0;
+
+	rm_err("CORE_GNL: %s connected = %d", __func__,
+	       rmnet_core_userspace_connected);
+
+	if (!info) {
+		rm_err("%s", "CORE_GNL: error - info is null");
+		pid_bps_resp.valid = 0;
+	} else {
+		na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
+		if (na) {
+			if (nla_memcpy(&pid_bps_req, na,
+				       sizeof(pid_bps_req)) > 0) {
+				is_req_valid = 1;
+			} else {
+				rm_err("CORE_GNL: nla_memcpy failed %d\n",
+				       RMNET_CORE_GENL_ATTR_PID_BPS);
+			}
+		} else {
+			rm_err("CORE_GNL: no info->attrs %d\n",
+			       RMNET_CORE_GENL_ATTR_PID_BPS);
+		}
+	}
+
+	if (!rmnet_core_userspace_connected)
+		rmnet_core_userspace_connected = 1;
+
+	/* Copy to pid/byte list to the payload */
+	if (is_req_valid) {
+		memset(&pid_bps_resp, 0x0,
+		       sizeof(pid_bps_resp));
+		rmnet_create_pid_bps_resp(&pid_bps_resp);
+	}
+	pid_bps_resp.valid = 1;
+
+	rmnet_core_genl_send_resp(info, &pid_bps_resp);
+
+	return RMNET_GENL_SUCCESS;
+}
+
+int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
+				       struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_core_pid_boost_req pid_boost_req;
+	int is_req_valid = 0;
+	u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
+	u16 i = 0;
+
+	rm_err("%s", "CORE_GNL: %s", __func__);
+
+	if (!info) {
+		rm_err("%s", "CORE_GNL: error - info is null");
+		return RMNET_GENL_FAILURE;
+	}
+
+	na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
+	if (na) {
+		if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
+			is_req_valid = 1;
+		} else {
+			rm_err("CORE_GNL: nla_memcpy failed %d\n",
+			       RMNET_CORE_GENL_ATTR_PID_BOOST);
+			return RMNET_GENL_FAILURE;
+		}
+	} else {
+		rm_err("CORE_GNL: no info->attrs %d\n",
+		       RMNET_CORE_GENL_ATTR_PID_BOOST);
+		return RMNET_GENL_FAILURE;
+	}
+
+	if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
+		boost_pid_cnt = pid_boost_req.list_len;
+
+	if (!pid_boost_req.valid)
+		boost_pid_cnt = 0;
+
+	for (i = 0; i < boost_pid_cnt; i++) {
+		if (pid_boost_req.list[i].boost_enabled) {
+			rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
+					    pid_boost_req.list[i].boost_period);
+		}
+	}
+
+	return RMNET_GENL_SUCCESS;
+}
+
+/* register new rmnet core driver generic netlink family */
+int rmnet_core_genl_init(void)
+{
+	int ret;
+
+	ret = genl_register_family(&rmnet_core_genl_family);
+	if (ret != 0) {
+		rm_err("CORE_GNL: register family failed: %i", ret);
+		genl_unregister_family(&rmnet_core_genl_family);
+		return RMNET_GENL_FAILURE;
+	}
+
+	rm_err("CORE_GNL: successfully registered generic netlink family: %s",
+	       RMNET_CORE_GENL_FAMILY_NAME);
+
+	return RMNET_GENL_SUCCESS;
+}
+
+/* Unregister the generic netlink family */
+int rmnet_core_genl_deinit(void)
+{
+	int ret;
+
+	ret = genl_unregister_family(&rmnet_core_genl_family);
+	if (ret != 0)
+		rm_err("CORE_GNL: unregister family failed: %i\n", ret);
+
+	return RMNET_GENL_SUCCESS;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.h
new file mode 100644
index 0000000..2a735fa
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Generic Netlink
+ *
+ */
+
+#ifndef _RMNET_GENL_H_
+#define _RMNET_GENL_H_
+
+#include <net/genetlink.h>
+
+#define RMNET_CORE_DEBUG 0
+
+#define rm_err(fmt, ...)  \
+	do { if (RMNET_CORE_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
+
+/* Generic Netlink Definitions */
+#define RMNET_CORE_GENL_VERSION 1
+#define RMNET_CORE_GENL_FAMILY_NAME "RMNET_CORE"
+
+#define RMNET_CORE_GENL_MAX_PIDS 32
+
+#define RMNET_GENL_SUCCESS (0)
+#define RMNET_GENL_FAILURE (-1)
+
+extern int rmnet_core_userspace_connected;
+
+enum {
+	RMNET_CORE_GENL_CMD_UNSPEC,
+	RMNET_CORE_GENL_CMD_PID_BPS_REQ,
+	RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
+	__RMNET_CORE_GENL_CMD_MAX,
+};
+
+enum {
+	RMNET_CORE_GENL_ATTR_UNSPEC,
+	RMNET_CORE_GENL_ATTR_STR,
+	RMNET_CORE_GENL_ATTR_INT,
+	RMNET_CORE_GENL_ATTR_PID_BPS,
+	RMNET_CORE_GENL_ATTR_PID_BOOST,
+	__RMNET_CORE_GENL_ATTR_MAX,
+};
+
+#define RMNET_CORE_GENL_ATTR_MAX (__RMNET_CORE_GENL_ATTR_MAX - 1)
+
+struct rmnet_core_pid_bps_info {
+	u64 tx_bps;
+	u32 pid;
+	u32 boost_remaining_ms;
+};
+
+struct rmnet_core_pid_boost_info {
+	u32 boost_enabled;
+	/* Boost period in ms */
+	u32 boost_period;
+	u32 pid;
+};
+
+struct rmnet_core_pid_bps_req {
+	struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+struct rmnet_core_pid_bps_resp {
+	struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+struct rmnet_core_pid_boost_req {
+	struct rmnet_core_pid_boost_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+/* Function Prototypes */
+int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
+				     struct genl_info *info);
+
+int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
+				       struct genl_info *info);
+
+/* Called by vnd select queue */
+void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
+				      int *boost_enable, u64 *boost_period);
+
+int rmnet_core_genl_init(void);
+
+int rmnet_core_genl_deinit(void);
+
+#endif /*_RMNET_CORE_GENL_H_*/
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 694e596..da5e47c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -716,6 +716,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
 	__sum16 *check = NULL;
 	u32 alloc_len;
+	bool zero_csum = false;
 
 	/* We can avoid copying the data if the SKB we got from the lower-level
 	 * drivers was nonlinear.
@@ -747,6 +748,8 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 
 		uh->len = htons(skbn->len);
 		check = &uh->check;
+		if (coal_meta->ip_proto == 4 && !uh->check)
+			zero_csum = true;
 	}
 
 	/* Push IP header and update necessary fields */
@@ -767,7 +770,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 	}
 
 	/* Handle checksum status */
-	if (likely(csum_valid)) {
+	if (likely(csum_valid) || zero_csum) {
 		/* Set the partial checksum information */
 		rmnet_map_partial_csum(skbn, coal_meta);
 	} else if (check) {
@@ -865,6 +868,7 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 	u8 pkt, total_pkt = 0;
 	u8 nlo;
 	bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
+	bool zero_csum = false;
 
 	memset(&coal_meta, 0, sizeof(coal_meta));
 
@@ -926,12 +930,15 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 		uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
 		coal_meta.trans_len = sizeof(*uh);
 		coal_meta.trans_header = uh;
+		/* Check for v4 zero checksum */
+		if (coal_meta.ip_proto == 4 && !uh->check)
+			zero_csum = true;
 	} else {
 		priv->stats.coal.coal_trans_invalid++;
 		return;
 	}
 
-	if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+	if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
 		rmnet_map_move_headers(coal_skb);
 		/* Mark as valid if it checks out */
 		if (rmnet_map_validate_csum(coal_skb, &coal_meta))
@@ -1270,6 +1277,7 @@ static void rmnet_free_agg_pages(struct rmnet_port *port)
 	struct rmnet_agg_page *agg_page, *idx;
 
 	list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
+		list_del(&agg_page->list);
 		put_page(agg_page->page);
 		kfree(agg_page);
 	}
@@ -1329,6 +1337,7 @@ static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
 	}
 
 	agg_page->page = page;
+	INIT_LIST_HEAD(&agg_page->list);
 
 	return agg_page;
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index f00f1ce..59aa361 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -14,6 +14,7 @@
 #include "rmnet_private.h"
 #include "rmnet_map.h"
 #include "rmnet_vnd.h"
+#include "rmnet_genl.h"
 #include "rmnet_trace.h"
 
 #include <soc/qcom/qmi_rmnet.h>
@@ -158,12 +159,24 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev,
 				  struct net_device *sb_dev,
 				  select_queue_fallback_t fallback)
 {
+	u64 boost_period = 0;
+	int boost_trigger = 0;
 	struct rmnet_priv *priv = netdev_priv(dev);
 	int txq = 0;
 
 	if (priv->real_dev)
 		txq = qmi_rmnet_get_queue(dev, skb);
 
+	if (rmnet_core_userspace_connected) {
+		rmnet_update_pid_and_check_boost(task_pid_nr(current),
+						 skb->len,
+						 &boost_trigger,
+						 &boost_period);
+
+		if (boost_trigger)
+			set_task_boost(1, boost_period);
+	}
+
 	return (txq < dev->real_num_tx_queues) ? txq : 0;
 }
 
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 5f092bb..5462d2e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Renesas Ethernet AVB device driver
  *
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
@@ -514,7 +514,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
 			kfree(ts_skb);
 			if (tag == tfa_tag) {
 				skb_tstamp_tx(skb, &shhwtstamps);
+				dev_consume_skb_any(skb);
 				break;
+			} else {
+				dev_kfree_skb_any(skb);
 			}
 		}
 		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1556,7 +1559,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 					 DMA_TO_DEVICE);
 			goto unmap;
 		}
-		ts_skb->skb = skb;
+		ts_skb->skb = skb_get(skb);
 		ts_skb->tag = priv->ts_skb_tag++;
 		priv->ts_skb_tag &= 0x3ff;
 		list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1685,6 +1688,7 @@ static int ravb_close(struct net_device *ndev)
 	/* Clear the timestamp list */
 	list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
 		list_del(&ts_skb->list);
+		kfree_skb(ts_skb->skb);
 		kfree(ts_skb);
 	}
 
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 696037d..ad557f4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -793,15 +793,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
 		printk(KERN_ERR "Sgiseeq: Cannot register net device, "
 		       "aborting.\n");
 		err = -ENODEV;
-		goto err_out_free_page;
+		goto err_out_free_attrs;
 	}
 
 	printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
 
 	return 0;
 
-err_out_free_page:
-	free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+	dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+		       sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
 err_out_free_dev:
 	free_netdev(dev);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 3b174ea..f45df6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1203,10 +1203,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
 	int ret;
 	struct device *dev = &bsp_priv->pdev->dev;
 
-	if (!ldo) {
-		dev_err(dev, "no regulator found\n");
-		return -1;
-	}
+	if (!ldo)
+		return 0;
 
 	if (enable) {
 		ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index d0e6e15..48cf5e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
 	u32 value;
 
 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + base_register);
 
@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
 	u32 value;
 
 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + base_register);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index d182f82..870302a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
 	u32 value, reg;
 
 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + reg);
 	value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
 	u32 value, reg;
 
 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+	if (queue >= 4)
+		queue -= 4;
 
 	value = readl(ioaddr + reg);
 	value &= ~XGMAC_QxMDMACH(queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0101eba..014fe93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4522,8 +4522,10 @@ int stmmac_suspend(struct device *dev)
 		stmmac_mac_set(priv, priv->ioaddr, false);
 		pinctrl_pm_select_sleep_state(priv->device);
 		/* Disable clock in case of PWM is off */
-		clk_disable(priv->plat->pclk);
-		clk_disable(priv->plat->stmmac_clk);
+		if (priv->plat->clk_ptp_ref)
+			clk_disable_unprepare(priv->plat->clk_ptp_ref);
+		clk_disable_unprepare(priv->plat->pclk);
+		clk_disable_unprepare(priv->plat->stmmac_clk);
 	}
 	mutex_unlock(&priv->lock);
 
@@ -4588,8 +4590,10 @@ int stmmac_resume(struct device *dev)
 	} else {
 		pinctrl_pm_select_default_state(priv->device);
 		/* enable the clk previously disabled */
-		clk_enable(priv->plat->stmmac_clk);
-		clk_enable(priv->plat->pclk);
+		clk_prepare_enable(priv->plat->stmmac_clk);
+		clk_prepare_enable(priv->plat->pclk);
+		if (priv->plat->clk_ptp_ref)
+			clk_prepare_enable(priv->plat->clk_ptp_ref);
 		/* reset the phy so that it's ready */
 		if (priv->mii)
 			stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18a..37c0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
 		entry = &priv->tc_entries[i];
 		if (!entry->in_use && !first && free)
 			first = entry;
-		if (entry->handle == loc && !free)
+		if ((entry->handle == loc) && !free && !entry->is_frag)
 			dup = entry;
 	}
 
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index cce9c9e..9146068 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1497,7 +1497,7 @@ tc35815_rx(struct net_device *dev, int limit)
 			pci_unmap_single(lp->pci_dev,
 					 lp->rx_skbs[cur_bd].skb_dma,
 					 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-			if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+			if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
 				memmove(skb->data, skb->data - NET_IP_ALIGN,
 					pkt_len);
 			data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e6..f076050 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -383,9 +383,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
 static void tsi108_stat_carry(struct net_device *dev)
 {
 	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
 	u32 carry1, carry2;
 
-	spin_lock_irq(&data->misclock);
+	spin_lock_irqsave(&data->misclock, flags);
 
 	carry1 = TSI_READ(TSI108_STAT_CARRY1);
 	carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -453,7 +454,7 @@ static void tsi108_stat_carry(struct net_device *dev)
 			      TSI108_STAT_TXPAUSEDROP_CARRY,
 			      &data->tx_pause_drop);
 
-	spin_unlock_irq(&data->misclock);
+	spin_unlock_irqrestore(&data->misclock, flags);
 }
 
 /* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cc60ef9..6f6c0db 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1248,12 +1248,15 @@ static void netvsc_get_stats64(struct net_device *net,
 			       struct rtnl_link_stats64 *t)
 {
 	struct net_device_context *ndev_ctx = netdev_priv(net);
-	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+	struct netvsc_device *nvdev;
 	struct netvsc_vf_pcpu_stats vf_tot;
 	int i;
 
+	rcu_read_lock();
+
+	nvdev = rcu_dereference(ndev_ctx->nvdev);
 	if (!nvdev)
-		return;
+		goto out;
 
 	netdev_stats_to_stats64(t, &net->stats);
 
@@ -1292,6 +1295,8 @@ static void netvsc_get_stats64(struct net_device *net,
 		t->rx_packets	+= packets;
 		t->multicast	+= multicast;
 	}
+out:
+	rcu_read_unlock();
 }
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 4f684cb..078027b 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface)
 
 	ieee802154_unregister_hw(atusb->hw);
 
+	usb_put_dev(atusb->usb_dev);
+
 	ieee802154_free_hw(atusb->hw);
 
 	usb_set_intfdata(interface, NULL);
-	usb_put_dev(atusb->usb_dev);
 
 	pr_debug("%s done\n", __func__);
 }
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b2ff903..38a4165 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3151,12 +3151,12 @@ static int ca8210_probe(struct spi_device *spi_device)
 		goto error;
 	}
 
+	priv->spi->dev.platform_data = pdata;
 	ret = ca8210_get_platform_data(priv->spi, pdata);
 	if (ret) {
 		dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
 		goto error;
 	}
-	priv->spi->dev.platform_data = pdata;
 
 	ret = ca8210_dev_com_init(priv);
 	if (ret) {
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index f1ed174..be1f1a8 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -821,7 +821,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
 		err = hwsim_subscribe_all_others(phy);
 		if (err < 0) {
 			mutex_unlock(&hwsim_phys_lock);
-			goto err_reg;
+			goto err_subscribe;
 		}
 	}
 	list_add_tail(&phy->list, &hwsim_phys);
@@ -831,6 +831,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
 
 	return idx;
 
+err_subscribe:
+	ieee802154_unregister_hw(phy->hw);
 err_reg:
 	kfree(pib);
 err_pib:
@@ -920,9 +922,9 @@ static __init int hwsim_init_module(void)
 	return 0;
 
 platform_drv:
-	genl_unregister_family(&hwsim_genl_family);
-platform_dev:
 	platform_device_unregister(mac802154hwsim_dev);
+platform_dev:
+	genl_unregister_family(&hwsim_genl_family);
 	return rc;
 }
 
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2c97135..0dc92d2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1238,6 +1238,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
 		macsec_rxsa_put(rx_sa);
 	macsec_rxsc_put(rx_sc);
 
+	skb_orphan(skb);
 	ret = gro_cells_receive(&macsec->gro_cells, skb);
 	if (ret == NET_RX_SUCCESS)
 		count_rx(dev, skb->len);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2b1e336..bf4070e 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
 
 static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
 {
+	u16 lb_dis = BIT(1);
+
 	if (disable)
-		ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
+		ns_exp_write(phydev, 0x1c0,
+			     ns_exp_read(phydev, 0x1c0) | lb_dis);
 	else
 		ns_exp_write(phydev, 0x1c0,
-			     ns_exp_read(phydev, 0x1c0) & 0xfffe);
+			     ns_exp_read(phydev, 0x1c0) & ~lb_dis);
 
 	pr_debug("10BASE-T HDX loopback %s\n",
-		 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+		 (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
 }
 
 static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1..7278eca 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
 		if (!phy->last_triggered)
 			led_trigger_event(&phy->led_link_trigger->trigger,
 					  LED_FULL);
+		else
+			led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
 
-		led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
 		led_trigger_event(&plt->trigger, LED_FULL);
 		phy->last_triggered = plt;
 	}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 2e8056d..723611a 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -380,8 +380,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
  *  Local device  Link partner
  *  Pause AsymDir Pause AsymDir Result
  *    1     X       1     X     TX+RX
- *    0     1       1     1     RX
- *    1     1       0     1     TX
+ *    0     1       1     1     TX
+ *    1     1       0     1     RX
  */
 static void phylink_resolve_flow(struct phylink *pl,
 				 struct phylink_link_state *state)
@@ -402,7 +402,7 @@ static void phylink_resolve_flow(struct phylink *pl,
 			new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
 		else if (pause & MLO_PAUSE_ASYM)
 			new_pause = state->pause & MLO_PAUSE_SYM ?
-				 MLO_PAUSE_RX : MLO_PAUSE_TX;
+				 MLO_PAUSE_TX : MLO_PAUSE_RX;
 	} else {
 		new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
 	}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8b1ef1b..afaa7d1 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1419,6 +1419,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
 			netif_wake_queue(ppp->dev);
 		else
 			netif_stop_queue(ppp->dev);
+	} else {
+		kfree_skb(skb);
 	}
 	ppp_xmit_unlock(ppp);
 }
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dc30f11..3feb49b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
 
 	team->dev->vlan_features = vlan_features;
 	team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				     NETIF_F_HW_VLAN_CTAG_TX |
+				     NETIF_F_HW_VLAN_STAG_TX |
 				     NETIF_F_GSO_UDP_L4;
 	team->dev->hard_header_len = max_hard_header_len;
 
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2c80972..37edf7a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -801,7 +801,8 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-		      bool skip_filter, bool napi, bool napi_frags)
+		      bool skip_filter, bool napi, bool napi_frags,
+		      bool publish_tun)
 {
 	struct tun_file *tfile = file->private_data;
 	struct net_device *dev = tun->dev;
@@ -881,7 +882,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
 	 * initialized tfile; otherwise we risk using half-initialized
 	 * object.
 	 */
-	rcu_assign_pointer(tfile->tun, tun);
+	if (publish_tun)
+		rcu_assign_pointer(tfile->tun, tun);
 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 	tun->numqueues++;
 	tun_set_real_num_queues(tun);
@@ -2557,7 +2559,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
 				 ifr->ifr_flags & IFF_NAPI,
-				 ifr->ifr_flags & IFF_NAPI_FRAGS);
+				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
 		if (err < 0)
 			return err;
 
@@ -2656,13 +2658,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
 		INIT_LIST_HEAD(&tun->disabled);
 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-				 ifr->ifr_flags & IFF_NAPI_FRAGS);
+				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
 		if (err < 0)
 			goto err_free_flow;
 
 		err = register_netdevice(tun->dev);
 		if (err < 0)
 			goto err_detach;
+		/* free_netdev() won't check refcnt, to aovid race
+		 * with dev_put() we need publish tun after registration.
+		 */
+		rcu_assign_pointer(tfile->tun, tun);
 	}
 
 	netif_carrier_on(tun->dev);
@@ -2806,7 +2812,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
 		if (ret < 0)
 			goto unlock;
 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
-				 tun->flags & IFF_NAPI_FRAGS);
+				 tun->flags & IFF_NAPI_FRAGS, true);
 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
 		tun = rtnl_dereference(tfile->tun);
 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5c42cf8..85fba64 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -221,9 +221,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
 		goto bad_desc;
 	}
 skip:
-	if (	rndis &&
-		header.usb_cdc_acm_descriptor &&
-		header.usb_cdc_acm_descriptor->bmCapabilities) {
+	/* Communcation class functions with bmCapabilities are not
+	 * RNDIS.  But some Wireless class RNDIS functions use
+	 * bmCapabilities for their own purpose. The failsafe is
+	 * therefore applied only to Communication class RNDIS
+	 * functions.  The rndis test is redundant, but a cheap
+	 * optimization.
+	 */
+	if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+	    header.usb_cdc_acm_descriptor &&
+	    header.usb_cdc_acm_descriptor->bmCapabilities) {
 			dev_dbg(&intf->dev,
 				"ACM capabilities %02x, not really RNDIS?\n",
 				header.usb_cdc_acm_descriptor->bmCapabilities);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1eaec64..f53e3e4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
 	u8 ep;
 
 	for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
-
 		e = intf->cur_altsetting->endpoint + ep;
+
+		/* ignore endpoints which cannot transfer data */
+		if (!usb_endpoint_maxp(&e->desc))
+			continue;
+
 		switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
 		case USB_ENDPOINT_XFER_INT:
 			if (usb_endpoint_dir_in(&e->desc)) {
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 947bea8..dfbdea2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
 	}
 	if (!timeout) {
 		dev_err(&udev->dev, "firmware not ready in time\n");
-		return -ETIMEDOUT;
+		ret = -ETIMEDOUT;
+		goto err;
 	}
 
 	/* enable ethernet mode (?) */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index d6916f7..5251c5f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2634,14 +2634,18 @@ static struct hso_device *hso_create_bulk_serial_device(
 		 */
 		if (serial->tiocmget) {
 			tiocmget = serial->tiocmget;
+			tiocmget->endp = hso_get_ep(interface,
+						    USB_ENDPOINT_XFER_INT,
+						    USB_DIR_IN);
+			if (!tiocmget->endp) {
+				dev_err(&interface->dev, "Failed to find INT IN ep\n");
+				goto exit;
+			}
+
 			tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
 			if (tiocmget->urb) {
 				mutex_init(&tiocmget->mutex);
 				init_waitqueue_head(&tiocmget->waitq);
-				tiocmget->endp = hso_get_ep(
-					interface,
-					USB_ENDPOINT_XFER_INT,
-					USB_DIR_IN);
 			} else
 				hso_free_tiomget(serial);
 		}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index bd2ba36..0cc6993 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
 	status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
 					 usb_buf, 24);
 	if (status != 0)
-		return status;
+		goto out;
 
 	memcpy(usb_buf, init_msg_2, 12);
 	status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
 					 usb_buf, 28);
 	if (status != 0)
-		return status;
+		goto out;
 
 	memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
 	kfree(usb_buf);
 	return status;
 }
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index d2f94ea..267978e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3788,7 +3788,7 @@ static int lan78xx_probe(struct usb_interface *intf,
 	ret = register_netdev(netdev);
 	if (ret != 0) {
 		netif_err(dev, probe, netdev, "couldn't register the device\n");
-		goto out3;
+		goto out4;
 	}
 
 	usb_set_intfdata(intf, dev);
@@ -3803,12 +3803,14 @@ static int lan78xx_probe(struct usb_interface *intf,
 
 	ret = lan78xx_phy_init(dev);
 	if (ret < 0)
-		goto out4;
+		goto out5;
 
 	return 0;
 
-out4:
+out5:
 	unregister_netdev(netdev);
+out4:
+	usb_free_urb(dev->urb_intr);
 out3:
 	lan78xx_unbind(dev, intf);
 out2:
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f4247b2..b7a0df9 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
 	int i;
-	__u8 tmp;
+	__u8 tmp = 0;
 	__le16 retdatai;
 	int ret;
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 128c8a3..6f517e6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
 	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
 	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
+	{QMI_FIXED_INTF(0x2020, 0x2060, 4)},	/* BroadMobi BM818 */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
@@ -1285,6 +1286,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},	/* Cinterion PHxx,PXxx (2 RmNet) */
 	{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},	/* Cinterion PHxx,PXxx (2 RmNet) */
 	{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},	/* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+	{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)},	/* Cinterion CLS8 */
 	{QMI_FIXED_INTF(0x413c, 0x81a2, 8)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a3, 8)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a4, 8)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f1b5201..a291e5f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -788,8 +788,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
 	ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
 			      RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
 			      value, index, tmp, size, 500);
+	if (ret < 0)
+		memset(data, 0xff, size);
+	else
+		memcpy(data, tmp, size);
 
-	memcpy(data, tmp, size);
 	kfree(tmp);
 
 	return ret;
@@ -4471,10 +4474,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
 	struct r8152 *tp = usb_get_intfdata(intf);
 
 	clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-	mutex_lock(&tp->control);
 	tp->rtl_ops.init(tp);
 	queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-	mutex_unlock(&tp->control);
+	set_ethernet_addr(tp);
 	return rtl8152_resume(intf);
 }
 
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1085497..84b354f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -112,6 +112,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
 			int				intr = 0;
 
 			e = alt->endpoint + ep;
+
+			/* ignore endpoints which cannot transfer data */
+			if (!usb_endpoint_maxp(&e->desc))
+				continue;
+
 			switch (e->desc.bmAttributes) {
 			case USB_ENDPOINT_XFER_INT:
 				if (!usb_endpoint_dir_in(&e->desc))
@@ -351,6 +356,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
 {
 	enum usb_device_speed speed = dev->udev->speed;
 
+	if (!dev->rx_urb_size || !dev->hard_mtu)
+		goto insanity;
 	switch (speed) {
 	case USB_SPEED_HIGH:
 		dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
@@ -367,6 +374,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
 		dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
 		break;
 	default:
+insanity:
 		dev->rx_qlen = dev->tx_qlen = 4;
 	}
 }
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168..489cba9 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
 			}
 			result = i2400m_barker_db_add(barker);
 			if (result < 0)
-				goto error_add;
+				goto error_parse_add;
 		}
 		kfree(options_orig);
 	}
 	return 0;
 
+error_parse_add:
 error_parse:
+	kfree(options_orig);
 error_add:
 	kfree(i2400m_barker_db);
 	return result;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ba15afd..05d8c8e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,6 +18,22 @@
 #define WIL_BRD_SUFFIX_CN "CN"
 #define WIL_BRD_SUFFIX_FCC "FCC"
 
+#define WIL_EDMG_CHANNEL_9_SUBCHANNELS	(BIT(0) | BIT(1))
+#define WIL_EDMG_CHANNEL_10_SUBCHANNELS	(BIT(1) | BIT(2))
+#define WIL_EDMG_CHANNEL_11_SUBCHANNELS	(BIT(2) | BIT(3))
+
+/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth
+ * configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13.
+ * The value 5 allowing CB1 and CB2 of adjacent channels.
+ */
+#define WIL_EDMG_BW_CONFIGURATION 5
+
+/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that
+ * are allowed to be used for EDMG transmissions in the BSS as defined by
+ * IEEE 802.11 section 9.4.2.251.
+ */
+#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
@@ -56,6 +72,39 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
 	CHAN60G(4, 0),
 };
 
+/* Rx channel bonding mode */
+enum wil_rx_cb_mode {
+	WIL_RX_CB_MODE_DMG,
+	WIL_RX_CB_MODE_EDMG,
+	WIL_RX_CB_MODE_WIDE,
+};
+
+static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+	switch (cb_mode) {
+	case WIL_RX_CB_MODE_DMG:
+	case WIL_RX_CB_MODE_EDMG:
+		return 1;
+	case WIL_RX_CB_MODE_WIDE:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
+static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+	switch (cb_mode) {
+	case WMI_TX_MODE_DMG:
+	case WMI_TX_MODE_EDMG_CB1:
+		return 1;
+	case WMI_TX_MODE_EDMG_CB2:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
 static void
 wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
 {
@@ -167,6 +216,13 @@ void update_supported_bands(struct wil6210_priv *wil)
 
 	wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
 						wil_num_supported_channels(wil);
+
+	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
+		wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels =
+							WIL_EDMG_CHANNELS;
+		wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config =
+						      WIL_EDMG_BW_CONFIGURATION;
+	}
 }
 
 /* Vendor id to be used in vendor specific command and events
@@ -593,6 +649,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 	} __packed reply;
 	struct wil_net_stats *stats = &wil->sta[cid].stats;
 	int rc;
+	u8 txflag = RATE_INFO_FLAGS_DMG;
 
 	memset(&reply, 0, sizeof(reply));
 
@@ -606,7 +663,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 		    "  MCS %d TSF 0x%016llx\n"
 		    "  BF status 0x%08x RSSI %d SQI %d%%\n"
 		    "  Tx Tpt %d goodput %d Rx goodput %d\n"
-		    "  Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+		    "  Sectors(rx:tx) my %d:%d peer %d:%d\n"
+		    "  Tx mode %d}\n",
 		    cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs),
 		    le64_to_cpu(reply.evt.tsf), reply.evt.status,
 		    reply.evt.rssi,
@@ -617,7 +675,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 		    le16_to_cpu(reply.evt.my_rx_sector),
 		    le16_to_cpu(reply.evt.my_tx_sector),
 		    le16_to_cpu(reply.evt.other_rx_sector),
-		    le16_to_cpu(reply.evt.other_tx_sector));
+		    le16_to_cpu(reply.evt.other_tx_sector),
+		    reply.evt.tx_mode);
 
 	sinfo->generation = wil->sinfo_gen;
 
@@ -630,9 +689,16 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
 			BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
 			BIT_ULL(NL80211_STA_INFO_TX_FAILED);
 
-	sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
+	if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG)
+		txflag = RATE_INFO_FLAGS_EDMG;
+
+	sinfo->txrate.flags = txflag;
 	sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
 	sinfo->rxrate.mcs = stats->last_mcs_rx;
+	sinfo->txrate.n_bonded_ch =
+				  wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
+	sinfo->rxrate.n_bonded_ch =
+			     wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx);
 	sinfo->rx_bytes = stats->rx_bytes;
 	sinfo->rx_packets = stats->rx_packets;
 	sinfo->rx_dropped_misc = stats->rx_dropped;
@@ -1310,6 +1376,33 @@ static int wil_ft_connect(struct wiphy *wiphy,
 	return rc;
 }
 
+static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config,
+				    u8 edmg_channels, u8 *wmi_ch)
+{
+	if (!edmg_bw_config) {
+		*wmi_ch = 0;
+		return 0;
+	} else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) {
+		/* convert from edmg channel bitmap into edmg channel number */
+		switch (edmg_channels) {
+		case WIL_EDMG_CHANNEL_9_SUBCHANNELS:
+			return wil_spec2wmi_ch(9, wmi_ch);
+		case WIL_EDMG_CHANNEL_10_SUBCHANNELS:
+			return wil_spec2wmi_ch(10, wmi_ch);
+		case WIL_EDMG_CHANNEL_11_SUBCHANNELS:
+			return wil_spec2wmi_ch(11, wmi_ch);
+		default:
+			wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n",
+				edmg_channels);
+			return -EINVAL;
+		}
+	} else {
+		wil_err(wil, "Unsupported EDMG BW configuration %d\n",
+			edmg_bw_config);
+		return -EINVAL;
+	}
+}
+
 static int wil_cfg80211_connect(struct wiphy *wiphy,
 				struct net_device *ndev,
 				struct cfg80211_connect_params *sme)
@@ -1455,7 +1548,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 	memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
 	conn.channel = ch - 1;
 
-	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities))
+	if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
 		if (wil->force_edmg_channel) {
 			rc = wil_spec2wmi_ch(wil->force_edmg_channel,
 					     &conn.edmg_channel);
@@ -1463,7 +1556,15 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 				wil_err(wil,
 					"wmi channel for channel %d not found",
 					wil->force_edmg_channel);
+		} else {
+			rc = wil_get_wmi_edmg_channel(wil,
+						      sme->edmg.bw_config,
+						      sme->edmg.channels,
+						      &conn.edmg_channel);
+			if (rc < 0)
+				return rc;
 		}
+	}
 
 	ether_addr_copy(conn.bssid, bss->bssid);
 	ether_addr_copy(conn.dst_mac, bss->bssid);
@@ -2032,7 +2133,7 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
 static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 				  struct net_device *ndev,
 				  const u8 *ssid, size_t ssid_len, u32 privacy,
-				  int bi, u8 chan,
+				  int bi, u8 chan, u8 wmi_edmg_channel,
 				  struct cfg80211_beacon_data *bcon,
 				  u8 hidden_ssid, u32 pbss)
 {
@@ -2049,7 +2150,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (pbss)
 		wmi_nettype = WMI_NETTYPE_P2P;
 
-	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
+	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d ap_ps=%d\n", vif->mid,
+		     is_go, wil->ap_ps);
 	if (is_go && !pbss) {
 		wil_err(wil, "P2P GO must be in PBSS\n");
 		return -ENOTSUPP;
@@ -2110,6 +2212,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 
 	vif->privacy = privacy;
 	vif->channel = chan;
+	vif->wmi_edmg_channel = wmi_edmg_channel;
 	vif->hidden_ssid = hidden_ssid;
 	vif->pbss = pbss;
 	vif->bi = bi;
@@ -2131,7 +2234,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	}
 
 
-	rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go);
+	rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel,
+			   hidden_ssid, is_go);
 	if (rc)
 		goto err_pcp_start;
 
@@ -2139,6 +2243,14 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (rc)
 		goto err_bcast;
 
+	if (test_bit(WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT,
+		     wil->fw_capabilities)) {
+		enum wmi_ps_profile_type ps_profile = wil->ap_ps ?
+			wil->ps_profile : WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+		wil_ps_update(wil, ps_profile);
+	}
+
 	goto out; /* success */
 
 err_bcast:
@@ -2188,7 +2300,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
 		rc = _wil_cfg80211_start_ap(wiphy, ndev,
 					    vif->ssid, vif->ssid_len,
 					    vif->privacy, vif->bi,
-					    vif->channel, &bcon,
+					    vif->channel,
+					    vif->wmi_edmg_channel, &bcon,
 					    vif->hidden_ssid, vif->pbss);
 		if (rc) {
 			wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
@@ -2238,7 +2351,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
 		rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
 					    vif->ssid_len, privacy,
 					    wdev->beacon_interval,
-					    vif->channel, bcon,
+					    vif->channel,
+					    vif->wmi_edmg_channel, bcon,
 					    vif->hidden_ssid,
 					    vif->pbss);
 	} else {
@@ -2257,10 +2371,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
 	struct ieee80211_channel *channel = info->chandef.chan;
 	struct cfg80211_beacon_data *bcon = &info->beacon;
 	struct cfg80211_crypto_settings *crypto = &info->crypto;
+	u8 wmi_edmg_channel;
 	u8 hidden_ssid;
 
 	wil_dbg_misc(wil, "start_ap\n");
 
+	rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config,
+				      info->chandef.edmg.channels,
+				      &wmi_edmg_channel);
+	if (rc < 0)
+		return rc;
+
 	if (!channel) {
 		wil_err(wil, "AP: No channel???\n");
 		return -EINVAL;
@@ -2300,7 +2421,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
 	rc = _wil_cfg80211_start_ap(wiphy, ndev,
 				    info->ssid, info->ssid_len, info->privacy,
 				    info->beacon_interval, channel->hw_value,
-				    bcon, hidden_ssid, info->pbss);
+				    wmi_edmg_channel, bcon, hidden_ssid,
+				    info->pbss);
 
 	return rc;
 }
@@ -3663,9 +3785,7 @@ static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
 				     "NL_60G_GEN_FW_RESET, resetting...\n");
 
 			mutex_lock(&wil->mutex);
-			down_write(&wil->mem_lock);
 			rc = wil_reset(wil, true);
-			up_write(&wil->mem_lock);
 			mutex_unlock(&wil->mutex);
 
 			break;
diff --git a/drivers/net/wireless/ath/wil6210/config.c b/drivers/net/wireless/ath/wil6210/config.c
index a5788c10..6e47f45 100644
--- a/drivers/net/wireless/ath/wil6210/config.c
+++ b/drivers/net/wireless/ath/wil6210/config.c
@@ -209,6 +209,10 @@ static struct wil_config_entry config_table[] = {
 			     wil_ini_param_type_signed, &agg_wsize, 0,
 			     sizeof(agg_wsize), WIL_CONFIG_AGG_WSIZE_MIN,
 			     WIL_CONFIG_AGG_WSIZE_MAX),
+	WIL_CONFIG_INI_PARAM(WIL_CONFIG_AC_QUEUES_NAME,
+			     wil_ini_param_type_unsigned, &ac_queues, 0,
+			     WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
+			     WIL_CONFIG_BOOL_MAX),
 	WIL_CONFIG_INI_PARAM(WIL_CONFIG_DROP_IF_FULL_NAME,
 			     wil_ini_param_type_unsigned, &drop_if_ring_full,
 			     0, WIL_CONFIG_BOOL_SIZE, WIL_CONFIG_BOOL_MIN,
@@ -664,7 +668,7 @@ int wil_parse_config_ini(struct wil6210_priv *wil)
 
 		/* parse new line */
 		name = strsep(&buffer, "=");
-		if (!name) {
+		if (!name || !buffer) {
 			wil_err(wil, "file parse error at line %d. expecting '='\n",
 				line_index);
 			rc = -EINVAL;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7a33dab..8606c64 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -99,6 +99,22 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
 
 		v = (ring_id % 2 ? (v >> 16) : (v & 0xffff));
 		seq_printf(s, "  hwhead = %u\n", v);
+		if (!ring->is_rx) {
+			struct wil_ring_tx_data *txdata =
+				&wil->ring_tx_data[ring_id];
+
+			seq_printf(s, "  available = %d\n",
+				   wil_ring_avail_tx(ring) -
+				   txdata->tx_reserved_count);
+			seq_printf(s, "  used = %d\n",
+				   wil_ring_used_tx(ring));
+			seq_printf(s, "\n  tx_res_count = %d\n",
+				   txdata->tx_reserved_count);
+			seq_printf(s, "  tx_res_count_used = %d\n",
+				   txdata->tx_reserved_count_used);
+			seq_printf(s, "  tx_res_count_unavail = %d\n",
+				   txdata->tx_reserved_count_not_avail);
+		}
 	}
 	seq_printf(s, "  hwtail = [0x%08x] -> ", ring->hwtail);
 	x = wmi_addr(wil, ring->hwtail);
@@ -1028,6 +1044,18 @@ static const struct file_operations fops_pmcdata = {
 	.llseek		= wil_pmc_llseek,
 };
 
+static int wil_pmcring_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wil_pmcring_read, inode->i_private);
+}
+
+static const struct file_operations fops_pmcring = {
+	.open		= wil_pmcring_seq_open,
+	.release	= single_release,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+};
+
 /*---tx_mgmt---*/
 /* Write mgmt frame to this file to send it */
 static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -2569,6 +2597,7 @@ static const struct {
 	{"back",	0644,		&fops_back},
 	{"pmccfg",	0644,		&fops_pmccfg},
 	{"pmcdata",	0444,		&fops_pmcdata},
+	{"pmcring",	0444,		&fops_pmcring},
 	{"temp",	0444,		&fops_temp},
 	{"freq",	0444,		&fops_freq},
 	{"link",	0444,		&fops_link},
@@ -2639,6 +2668,8 @@ static const struct dbg_off dbg_wil_off[] = {
 	WIL_FIELD(rx_buff_id_count, 0644,	doff_u32),
 	WIL_FIELD(amsdu_en, 0644,	doff_u8),
 	WIL_FIELD(force_edmg_channel, 0644,	doff_u8),
+	WIL_FIELD(ap_ps, 0644, doff_u8),
+	WIL_FIELD(tx_reserved_entries, 0644, doff_u32),
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/ipa.c b/drivers/net/wireless/ath/wil6210/ipa.c
index 4c089ea..01f045c 100644
--- a/drivers/net/wireless/ath/wil6210/ipa.c
+++ b/drivers/net/wireless/ath/wil6210/ipa.c
@@ -128,7 +128,7 @@ static void wil_ipa_rx(struct wil_ipa *ipa, struct sk_buff *skb)
 
 	stats = &wil->sta[cid].stats;
 
-	wil_netif_rx(skb, ndev, cid, stats, false);
+	wil_netif_rx(skb, ndev, cid, stats, ipa->lan_rx_napi);
 
 	return;
 
@@ -901,6 +901,7 @@ static int wil_ipa_wigig_init(struct wil_ipa *ipa)
 	}
 
 	ipa->uc_db_pa = out.uc_db_pa;
+	ipa->lan_rx_napi = out.lan_rx_napi_enable;
 
 	return 0;
 
diff --git a/drivers/net/wireless/ath/wil6210/ipa.h b/drivers/net/wireless/ath/wil6210/ipa.h
index 5b9fa84..5b3273c 100644
--- a/drivers/net/wireless/ath/wil6210/ipa.h
+++ b/drivers/net/wireless/ath/wil6210/ipa.h
@@ -53,6 +53,7 @@ struct wil_ipa {
 	struct wil_ipa_rx_buf rx_buf; /* contiguous memory split into rx bufs */
 	struct msi_msg orig_msi_msg;
 	atomic_t outstanding_pkts;
+	u8 lan_rx_napi;
 };
 
 static inline bool wil_ipa_offload(void) {return ipa_offload; }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1c2227c..cbd2ad5 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -181,7 +181,8 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 	}
 }
 
-/* Device memory access is prohibited while reset or suspend.
+/* Device memory access is prohibited while reset, suspend or
+ * pci linkdown.
  * wil_mem_access_lock protects accessing device memory in these cases
  */
 int wil_mem_access_lock(struct wil6210_priv *wil)
@@ -190,7 +191,8 @@ int wil_mem_access_lock(struct wil6210_priv *wil)
 		return -EBUSY;
 
 	if (test_bit(wil_status_suspending, wil->status) ||
-	    test_bit(wil_status_suspended, wil->status)) {
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_pci_linkdown, wil->status)) {
 		up_read(&wil->mem_lock);
 		return -EBUSY;
 	}
@@ -1307,6 +1309,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 	}
 
 	update_supported_bands(wil);
+
+	wil->ap_ps = test_bit(WIL_PLATFORM_CAPA_AP_PS, wil->platform_capa);
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -1717,6 +1721,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 		wil_dbg_misc(wil, "notify FW to enable SPI for sensing\n");
 		wil_s(wil, RGF_USER_USAGE_6, BIT_SPI_SENSING_SUPPORT);
 		wmi_reset_spi_slave(wil);
+	} else {
+		wil_dbg_misc(wil, "notify FW to disable SPI for sensing\n");
+		wil_c(wil, RGF_USER_USAGE_6, BIT_SPI_SENSING_SUPPORT);
 	}
 
 	if (wil->platform_ops.notify) {
@@ -1745,6 +1752,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	/* Disable device led before reset*/
 	wmi_led_cfg(wil, false);
 
+	down_write(&wil->mem_lock);
+
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
 	if (test_bit(wil_status_suspending, wil->status))
@@ -1800,6 +1809,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 		if  (wil->secured_boot) {
 			wil_err(wil, "secured boot is not supported\n");
+			up_write(&wil->mem_lock);
 			return -ENOTSUPP;
 		}
 
@@ -1830,6 +1840,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 	clear_bit(wil_status_resetting, wil->status);
 
+	up_write(&wil->mem_lock);
+
 	if (load_fw) {
 		wil_unmask_irq(wil);
 
@@ -1883,6 +1895,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 						 ftm->rx_offset);
 		}
 
+		wil->tx_reserved_entries = ((drop_if_ring_full || ac_queues) ?
+					    WIL_DEFAULT_TX_RESERVED_ENTRIES :
+					    0);
+
 		if (wil->platform_ops.notify) {
 			rc = wil->platform_ops.notify(wil->platform_handle,
 						      WIL_PLATFORM_EVT_FW_RDY);
@@ -1897,6 +1913,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	return rc;
 
 out:
+	up_write(&wil->mem_lock);
 	clear_bit(wil_status_resetting, wil->status);
 	return rc;
 }
@@ -1922,9 +1939,7 @@ int __wil_up(struct wil6210_priv *wil)
 
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 
-	down_write(&wil->mem_lock);
 	rc = wil_reset(wil, true);
-	up_write(&wil->mem_lock);
 	if (rc)
 		return rc;
 
@@ -2017,9 +2032,7 @@ int __wil_down(struct wil6210_priv *wil)
 	wil_abort_scan_all_vifs(wil, false);
 	mutex_unlock(&wil->vif_mutex);
 
-	down_write(&wil->mem_lock);
 	rc = wil_reset(wil, false);
-	up_write(&wil->mem_lock);
 
 	return rc;
 }
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 7913124..d94c651 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -12,10 +12,17 @@
 #include "ipa.h"
 #include "config.h"
 
+#define WIL6210_TX_QUEUES (4)
+
 static bool alt_ifname; /* = false; */
 module_param(alt_ifname, bool, 0444);
 MODULE_PARM_DESC(alt_ifname, " use an alternate interface name wigigN instead of wlanN");
 
+/* enable access category for transmit packets, this parameter may be controlled
+ * via wigig.ini config file, default enabled
+ */
+bool ac_queues = true;
+
 bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
 				 struct net_device *ndev, bool up, bool ok)
 {
@@ -94,10 +101,45 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
 	return wil_ioctl(wil, ifr->ifr_data, cmd);
 }
 
+/**
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static u16 wil_select_queue(struct net_device *ndev,
+			    struct sk_buff *skb,
+			    struct net_device *sb_dev,
+			    select_queue_fallback_t fallback)
+{
+	static const u16 wil_1d_to_queue[8] = {1, 0, 0, 1, 2, 2, 3, 3};
+	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	u16 qid;
+
+	if (!ac_queues)
+		return 0;
+
+	/* determine the priority */
+	if (wil_is_special_packet(skb))
+		skb->priority = 7;
+	else if (skb->priority == 0 || skb->priority > 7)
+		skb->priority = cfg80211_classify8021d(skb, NULL);
+
+	qid = wil_1d_to_queue[skb->priority];
+
+	wil_dbg_txrx(wil, "select queue for priority %d -> queue %d\n",
+		     skb->priority, qid);
+
+	return qid;
+}
+
 static const struct net_device_ops wil_netdev_ops = {
 	.ndo_open		= wil_open,
 	.ndo_stop		= wil_stop,
 	.ndo_start_xmit		= wil_start_xmit,
+	.ndo_select_queue	= wil_select_queue,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_do_ioctl		= wil_do_ioctl,
@@ -336,8 +378,12 @@ wil_vif_alloc(struct wil6210_priv *wil, const char *name,
 		return ERR_PTR(-EINVAL);
 	}
 
-	ndev = alloc_netdev(sizeof(*vif), name, name_assign_type,
-			    wil_dev_setup);
+	if (ac_queues)
+		ndev = alloc_netdev_mqs(sizeof(*vif), name, name_assign_type,
+					wil_dev_setup, WIL6210_TX_QUEUES, 1);
+	else
+		ndev = alloc_netdev(sizeof(*vif), name, name_assign_type,
+				    wil_dev_setup);
 	if (!ndev) {
 		dev_err(wil_to_dev(wil), "alloc_netdev failed\n");
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index c49f798..e779f78 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018,2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,7 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/seq_file.h>
 #include "wmi.h"
 #include "wil6210.h"
 #include "txrx.h"
@@ -431,3 +432,28 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
 
 	return newpos;
 }
+
+int wil_pmcring_read(struct seq_file *s, void *data)
+{
+	struct wil6210_priv *wil = s->private;
+	struct pmc_ctx *pmc = &wil->pmc;
+	size_t pmc_ring_size =
+		sizeof(struct vring_rx_desc) * pmc->num_descriptors;
+
+	mutex_lock(&pmc->lock);
+
+	if (!wil_is_pmc_allocated(pmc)) {
+		wil_err(wil, "error, pmc is not allocated!\n");
+		pmc->last_cmd_status = -EPERM;
+		mutex_unlock(&pmc->lock);
+		return -EPERM;
+	}
+
+	wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
+
+	seq_write(s, pmc->pring_va, pmc_ring_size);
+
+	mutex_unlock(&pmc->lock);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
index bebc8d5..3ec8635 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.h
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -25,3 +26,4 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
 int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
 ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
+int wil_pmcring_read(struct seq_file *s, void *data);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d7d761b..659e30c3 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -981,7 +981,8 @@ static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
 	return 0;
 }
 
-void wil_tx_data_init(struct wil_ring_tx_data *txdata)
+void wil_tx_data_init(const struct wil6210_priv *wil,
+		      struct wil_ring_tx_data *txdata)
 {
 	spin_lock_bh(&txdata->lock);
 	txdata->dot1x_open = 0;
@@ -994,6 +995,9 @@ void wil_tx_data_init(struct wil_ring_tx_data *txdata)
 	txdata->agg_amsdu = 0;
 	txdata->addba_in_progress = false;
 	txdata->mid = U8_MAX;
+	txdata->tx_reserved_count = wil->tx_reserved_entries;
+	txdata->tx_reserved_count_used = 0;
+	txdata->tx_reserved_count_not_avail = 0;
 	spin_unlock_bh(&txdata->lock);
 }
 
@@ -1048,7 +1052,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
 		goto out;
 	}
 
-	wil_tx_data_init(txdata);
+	wil_tx_data_init(wil, txdata);
 	vring->is_rx = false;
 	vring->size = size;
 	rc = wil_vring_alloc(wil, vring);
@@ -1217,7 +1221,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 		goto out;
 	}
 
-	wil_tx_data_init(txdata);
+	wil_tx_data_init(wil, txdata);
 	vring->is_rx = false;
 	vring->size = size;
 	rc = wil_vring_alloc(wil, vring);
@@ -1858,6 +1862,20 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	return rc;
 }
 
+bool wil_is_special_packet(const struct sk_buff *skb)
+{
+	if (skb->protocol == cpu_to_be16(ETH_P_ARP) ||
+	    skb->protocol == cpu_to_be16(ETH_P_RARP) ||
+	    (skb->protocol == cpu_to_be16(ETH_P_IP) &&
+	     ip_hdr(skb)->protocol == IPPROTO_ICMP) ||
+	    (skb->protocol == cpu_to_be16(ETH_P_IPV6) &&
+	     ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) ||
+	    skb->protocol == cpu_to_be16(ETH_P_PAE))
+		return true;
+
+	return false;
+}
+
 static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 			 struct wil_ring *ring, struct sk_buff *skb)
 {
@@ -1865,7 +1883,6 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	struct vring_tx_desc dd, *d = &dd;
 	volatile struct vring_tx_desc *_d;
 	u32 swhead = ring->swhead;
-	int avail = wil_ring_avail_tx(ring);
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	uint f = 0;
 	int ring_index = ring - wil->ring_tx;
@@ -1875,6 +1892,11 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	int used;
 	bool mcast = (ring_index == vif->bcast_ring);
 	uint len = skb_headlen(skb);
+	bool special_packet = (wil->tx_reserved_entries != 0 &&
+			       wil_is_special_packet(skb));
+	int avail = wil_ring_avail_tx(ring) -
+		(special_packet ? 0 : txdata->tx_reserved_count);
+	u8 ctx_flags = special_packet ? WIL_CTX_FLAG_RESERVED_USED : 0;
 
 	wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
 		     skb->len, ring_index, nr_frags);
@@ -1883,9 +1905,17 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 		return -EINVAL;
 
 	if (unlikely(avail < 1 + nr_frags)) {
-		wil_err_ratelimited(wil,
-				    "Tx ring[%2d] full. No space for %d fragments\n",
-				    ring_index, 1 + nr_frags);
+		if (special_packet) {
+			txdata->tx_reserved_count_not_avail++;
+			wil_err_ratelimited(wil,
+					    "TX ring[%2d] full. No space for %d fragments for special packet. Tx-reserved-count is %d\n",
+					    ring_index, 1 + nr_frags,
+					    txdata->tx_reserved_count);
+		} else {
+			wil_err_ratelimited(wil,
+					    "Tx ring[%2d] full. No space for %d fragments\n",
+					    ring_index, 1 + nr_frags);
+		}
 		return -ENOMEM;
 	}
 	_d = &ring->va[i].tx.legacy;
@@ -1900,6 +1930,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 	if (unlikely(dma_mapping_error(dev, pa)))
 		return -EINVAL;
 	ring->ctx[i].mapped_as = wil_mapped_as_single;
+	ring->ctx[i].flags = ctx_flags;
 	/* 1-st segment */
 	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
 				   ring_index);
@@ -1938,6 +1969,8 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 			goto dma_error;
 		}
 		ring->ctx[i].mapped_as = wil_mapped_as_page;
+		ring->ctx[i].flags = ctx_flags;
+
 		wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
 					   pa, len, ring_index);
 		/* no need to check return code -
@@ -1970,6 +2003,14 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 			     ring_index, used, used + nr_frags + 1);
 	}
 
+	if (special_packet) {
+		txdata->tx_reserved_count -= (f + 1);
+		txdata->tx_reserved_count_used += (f + 1);
+		wil_dbg_txrx(wil,
+			     "Ring[%2d] tx_reserved_count: %d, reduced by %d\n",
+			     ring_index, txdata->tx_reserved_count, f + 1);
+	}
+
 	/* Make sure to advance the head only after descriptor update is done.
 	 * This will prevent a race condition where the completion thread
 	 * will see the DU bit set from previous run and will handle the
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 0c71827..4e9e6f6 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -650,10 +650,12 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
 						int size, u16 ssn);
 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
 			   struct wil_tid_ampdu_rx *r);
-void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_tx_data_init(const struct wil6210_priv *wil,
+		      struct wil_ring_tx_data *txdata);
 void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
 void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
 			 struct wil_sta_info *sta);
 int wil_get_cid_by_ring(struct wil6210_priv *wil, struct wil_ring *ring);
+bool wil_is_special_packet(const struct sk_buff *skb);
 
 #endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 273e57f..64e7f3a1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -750,7 +750,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
 		     "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
 		     ring_id, cid, tid, wil->tx_sring_idx);
 
-	wil_tx_data_init(txdata);
+	wil_tx_data_init(wil, txdata);
 	ring->size = size;
 	rc = wil_ring_alloc_desc_ring(wil, ring, true);
 	if (rc)
@@ -1080,6 +1080,8 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
 		stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
 		if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
 			stats->rx_per_mcs[stats->last_mcs_rx]++;
+
+		stats->last_cb_mode_rx  = wil_rx_status_get_cb_mode(msg);
 	}
 
 	if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
@@ -1291,6 +1293,9 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
 					  (const void *)&msg, sizeof(msg),
 					  false);
 
+			if (ctx->flags & WIL_CTX_FLAG_RESERVED_USED)
+				txdata->tx_reserved_count++;
+
 			wil_tx_desc_unmap_edma(dev,
 					       (union wil_tx_desc *)d,
 					       ctx);
@@ -1453,7 +1458,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
-	int used, avail = wil_ring_avail_tx(ring);
+	int used, avail = wil_ring_avail_tx(ring) - txdata->tx_reserved_count;
 	int f, hdrlen, headlen;
 	int gso_type;
 	bool is_ipv4;
@@ -1615,7 +1620,7 @@ static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
 		wil_ipa_set_bcast_sring_id(wil, sring_id);
 	}
 
-	wil_tx_data_init(txdata);
+	wil_tx_data_init(wil, txdata);
 	ring->size = size;
 	ring->is_rx = false;
 	rc = wil_ring_alloc_desc_ring(wil, ring, true);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index e9ab926..af6de29 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -354,6 +354,12 @@ static inline u8 wil_rx_status_get_mcs(void *msg)
 			    16, 21);
 }
 
+static inline u8 wil_rx_status_get_cb_mode(void *msg)
+{
+	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+			    22, 23);
+}
+
 static inline u16 wil_rx_status_get_flow_id(void *msg)
 {
 	return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5120b46..743b2d4 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -37,6 +37,7 @@ extern bool drop_if_ring_full;
 extern int n_msi;
 extern uint max_assoc_sta;
 extern bool drop_if_ring_full;
+extern bool ac_queues;
 extern uint rx_ring_order;
 extern uint tx_ring_order;
 extern uint bcast_ring_order;
@@ -105,6 +106,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL6210_MAX_STATUS_RINGS	(8)
 #define WIL6210_MAX_HEADROOM_SIZE      (256)
 #define WIL_WMI_CALL_GENERAL_TO_MS 100
+#define WIL_DEFAULT_TX_RESERVED_ENTRIES (16)
 
 /* Hardware offload block adds the following:
  * 26 bytes - 3-address QoS data header
@@ -473,7 +475,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
  */
 static inline bool wil_cid_valid(int cid)
 {
-	return (cid >= 0 && cid < max_assoc_sta);
+	return (cid >= 0 && cid < max_assoc_sta && cid < WIL6210_MAX_CID);
 }
 
 struct wil6210_mbox_ring {
@@ -523,13 +525,17 @@ enum { /* for wil_ctx.mapped_as */
 	wil_mapped_as_page = 2,
 };
 
+/* for wil_ctx.flags */
+#define WIL_CTX_FLAG_RESERVED_USED 0x01
+
 /**
  * struct wil_ctx - software context for ring descriptor
  */
 struct wil_ctx {
 	struct sk_buff *skb;
 	u8 nr_frags;
-	u8 mapped_as;
+	u8 mapped_as:4;
+	u8 flags:4;
 };
 
 struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
@@ -604,6 +610,7 @@ struct wil_net_stats {
 	unsigned long	rx_amsdu_error; /* eDMA specific */
 	unsigned long	rx_csum_err;
 	u16 last_mcs_rx;
+	u8 last_cb_mode_rx;
 	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 	u32 ft_roams; /* relevant in STA mode */
 };
@@ -662,6 +669,9 @@ struct wil_ring_tx_data {
 	bool addba_in_progress; /* if set, agg_xxx is for request in progress */
 	u8 mid;
 	spinlock_t lock;
+	u32 tx_reserved_count; /* available reserved tx entries */
+	u32 tx_reserved_count_used;
+	u32 tx_reserved_count_not_avail;
 };
 
 enum { /* for wil6210_priv.status */
@@ -866,6 +876,7 @@ struct wil6210_vif {
 	DECLARE_BITMAP(status, wil_vif_status_last);
 	u32 privacy; /* secure connection? */
 	u16 channel; /* relevant in AP mode */
+	u8 wmi_edmg_channel; /* relevant in AP mode */
 	u8 hidden_ssid; /* relevant in AP mode */
 	u32 ap_isolate; /* no intra-BSS communication */
 	bool pbss;
@@ -1041,6 +1052,7 @@ struct wil6210_priv {
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
 	bool keep_radio_on_during_sleep;
+	u8 ap_ps; /* AP mode power save enabled */
 
 	struct pmc_ctx pmc;
 
@@ -1103,6 +1115,8 @@ struct wil6210_priv {
 
 	struct work_struct pci_linkdown_recovery_worker;
 	void *ipa_handle;
+
+	u32 tx_reserved_entries; /* Used only in Talyn code-path */
 };
 
 #define wil_to_wiphy(i) (i->wiphy)
@@ -1396,7 +1410,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil);
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
 int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan,
-		  u8 hidden_ssid, u8 is_go);
+		  u8 edmg_chan, u8 hidden_ssid, u8 is_go);
 int wmi_pcp_stop(struct wil6210_vif *vif);
 int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
 int wmi_abort_scan(struct wil6210_vif *vif);
@@ -1506,6 +1520,7 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
 
 int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch);
 int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch);
+void wil_update_supported_bands(struct wil6210_priv *wil);
 
 int reverse_memcmp(const void *cs, const void *ct, size_t count);
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8..92e503f 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
 
 int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 {
-	int i, rc;
+	int i;
 	const struct fw_map *map;
 	void *data;
 	u32 host_min, dump_size, offset, len;
@@ -62,9 +62,16 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		return -EINVAL;
 	}
 
-	rc = wil_mem_access_lock(wil);
-	if (rc)
-		return rc;
+	down_write(&wil->mem_lock);
+
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_pci_linkdown, wil->status)) {
+		wil_err(wil,
+			"suspend/resume/pci linkdown in progress. cannot copy crash dump\n");
+		up_write(&wil->mem_lock);
+		return -EBUSY;
+	}
 
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +91,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		wil_memcpy_fromio_32((void * __force)(dest + offset),
 				     (const void __iomem * __force)data, len);
 	}
-	wil_mem_access_unlock(wil);
+
+	up_write(&wil->mem_lock);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index d381649..e6730af 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -32,6 +32,7 @@ enum wil_platform_capa {
 	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
 	WIL_PLATFORM_CAPA_EXT_CLK = 2,
 	WIL_PLATFORM_CAPA_SMMU = 3,
+	WIL_PLATFORM_CAPA_AP_PS = 4,
 	WIL_PLATFORM_CAPA_MAX,
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 96dff3d..45d7934 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -199,8 +199,8 @@ const struct fw_map talyn_mb_fw_mapping[] = {
 	{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
 	/* DMA OFU 296b */
 	{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
-	/* ucode debug 4k */
-	{0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+	/* ucode debug 256b */
+	{0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
 	/* upper area 1536k */
 	{0x900000, 0xa80000, 0x900000, "upper", true, true},
 	/* UCODE areas - accessible by debugfs blobs but not by
@@ -2259,8 +2259,8 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
 	return rc;
 }
 
-int wmi_pcp_start(struct wil6210_vif *vif,
-		  int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go)
+int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype,
+		  u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go)
 {
 	struct wil6210_priv *wil = vif_to_wil(vif);
 	int rc;
@@ -2270,6 +2270,7 @@ int wmi_pcp_start(struct wil6210_vif *vif,
 		.network_type = wmi_nettype,
 		.disable_sec_offload = 1,
 		.channel = chan - 1,
+		.edmg_channel = wmi_edmg_chan,
 		.pcp_max_assoc_sta = max_assoc_sta,
 		.hidden_ssid = hidden_ssid,
 		.is_go = is_go,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index ceb0c5b..fc28f4b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -99,6 +99,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_CHANNEL_4			= 26,
 	WMI_FW_CAPABILITY_IPA				= 27,
 	WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF		= 30,
+	WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT		= 32,
 	WMI_FW_CAPABILITY_MAX,
 };
 
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index 5601883..f97c3f0 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -186,6 +186,36 @@ int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
 	}
 }
 
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_qmi_send_get(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_qmi_send_put(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
 void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t)
 {
 	struct cnss_plat_data *plat_priv =
@@ -387,3 +417,37 @@ int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv)
 		return 0;
 	}
 }
+
+int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
+			    u32 *val)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_debug_reg_read(plat_priv->bus_priv, offset,
+					       val);
+	default:
+		cnss_pr_dbg("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return 0;
+	}
+}
+
+int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
+			     u32 val)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_debug_reg_write(plat_priv->bus_priv, offset,
+						val);
+	default:
+		cnss_pr_dbg("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return 0;
+	}
+}
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 8ec4887..7c719f5 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -30,6 +30,8 @@ int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
 void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
 u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
 int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv);
 void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
 void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv,
 				bool in_panic);
@@ -46,5 +48,9 @@ int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
 int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_status status);
 int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv);
+int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
+			    u32 *val);
+int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
+			     u32 val);
 
 #endif /* _CNSS_BUS_H */
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 6f0efc7..4d577aa 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -5,9 +5,12 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include "main.h"
+#include "bus.h"
 #include "debug.h"
 #include "pci.h"
 
+#define MMIO_REG_ACCESS_MEM_TYPE		0xFF
+
 void *cnss_ipc_log_context;
 void *cnss_ipc_log_long_context;
 
@@ -107,6 +110,9 @@ static int cnss_stats_show_state(struct seq_file *s,
 		case CNSS_IN_SUSPEND_RESUME:
 			seq_puts(s, "IN_SUSPEND_RESUME");
 			continue;
+		case CNSS_IN_REBOOT:
+			seq_puts(s, "IN_REBOOT");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -238,6 +244,8 @@ static int cnss_reg_read_debug_show(struct seq_file *s, void *data)
 	mutex_lock(&plat_priv->dev_lock);
 	if (!plat_priv->diag_reg_read_buf) {
 		seq_puts(s, "\nUsage: echo <mem_type> <offset> <data_len> > <debugfs_path>/cnss/reg_read\n");
+		seq_puts(s, "Use mem_type = 0xff for register read by IO access, data_len will be ignored\n");
+		seq_puts(s, "Use other mem_type for register read by QMI\n");
 		mutex_unlock(&plat_priv->dev_lock);
 		return 0;
 	}
@@ -269,16 +277,11 @@ static ssize_t cnss_reg_read_debug_write(struct file *fp,
 	char *sptr, *token;
 	unsigned int len = 0;
 	u32 reg_offset, mem_type;
-	u32 data_len = 0;
+	u32 data_len = 0, reg_val = 0;
 	u8 *reg_buf = NULL;
 	const char *delim = " ";
 	int ret = 0;
 
-	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
-		cnss_pr_err("Firmware is not ready yet\n");
-		return -EINVAL;
-	}
-
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
 		return -EFAULT;
@@ -313,6 +316,20 @@ static ssize_t cnss_reg_read_debug_write(struct file *fp,
 	if (kstrtou32(token, 0, &data_len))
 		return -EINVAL;
 
+	if (mem_type == MMIO_REG_ACCESS_MEM_TYPE) {
+		ret = cnss_bus_debug_reg_read(plat_priv, reg_offset, &reg_val);
+		if (ret)
+			return ret;
+		cnss_pr_dbg("Read 0x%x from register offset 0x%x\n", reg_val,
+			    reg_offset);
+		return count;
+	}
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Firmware is not ready yet\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&plat_priv->dev_lock);
 	kfree(plat_priv->diag_reg_read_buf);
 	plat_priv->diag_reg_read_buf = NULL;
@@ -357,6 +374,8 @@ static const struct file_operations cnss_reg_read_debug_fops = {
 static int cnss_reg_write_debug_show(struct seq_file *s, void *data)
 {
 	seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs_path>/cnss/reg_write\n");
+	seq_puts(s, "Use mem_type = 0xff for register write by IO access\n");
+	seq_puts(s, "Use other mem_type for register write by QMI\n");
 
 	return 0;
 }
@@ -374,11 +393,6 @@ static ssize_t cnss_reg_write_debug_write(struct file *fp,
 	const char *delim = " ";
 	int ret = 0;
 
-	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
-		cnss_pr_err("Firmware is not ready yet\n");
-		return -EINVAL;
-	}
-
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
 		return -EFAULT;
@@ -413,6 +427,20 @@ static ssize_t cnss_reg_write_debug_write(struct file *fp,
 	if (kstrtou32(token, 0, &reg_val))
 		return -EINVAL;
 
+	if (mem_type == MMIO_REG_ACCESS_MEM_TYPE) {
+		ret = cnss_bus_debug_reg_write(plat_priv, reg_offset, reg_val);
+		if (ret)
+			return ret;
+		cnss_pr_dbg("Wrote 0x%x to register offset 0x%x\n", reg_val,
+			    reg_offset);
+		return count;
+	}
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Firmware is not ready yet\n");
+		return -EINVAL;
+	}
+
 	ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, reg_offset, mem_type,
 						sizeof(u32),
 						(u8 *)&reg_val);
@@ -477,6 +505,10 @@ static ssize_t cnss_runtime_pm_debug_write(struct file *fp,
 		cnss_pci_pm_runtime_put_noidle(pci_priv);
 	} else if (sysfs_streq(cmd, "mark_last_busy")) {
 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+	} else if (sysfs_streq(cmd, "resume_bus")) {
+		cnss_pci_resume_bus(pci_priv);
+	} else if (sysfs_streq(cmd, "suspend_bus")) {
+		cnss_pci_suspend_bus(pci_priv);
 	} else {
 		cnss_pr_err("Runtime PM debugfs command is invalid\n");
 		ret = -EINVAL;
@@ -500,6 +532,8 @@ static int cnss_runtime_pm_debug_show(struct seq_file *s, void *data)
 	seq_puts(s, "put_noidle: do runtime PM put noidle\n");
 	seq_puts(s, "put_autosuspend: do runtime PM put autosuspend\n");
 	seq_puts(s, "mark_last_busy: do runtime PM mark last busy\n");
+	seq_puts(s, "resume_bus: do bus resume only\n");
+	seq_puts(s, "suspend_bus: do bus suspend only\n");
 
 	return 0;
 }
@@ -557,6 +591,8 @@ static ssize_t cnss_control_params_debug_write(struct file *fp,
 		plat_priv->ctrl_params.quirks = val;
 	else if (strcmp(cmd, "mhi_timeout") == 0)
 		plat_priv->ctrl_params.mhi_timeout = val;
+	else if (strcmp(cmd, "mhi_m2_timeout") == 0)
+		plat_priv->ctrl_params.mhi_m2_timeout = val;
 	else if (strcmp(cmd, "qmi_timeout") == 0)
 		plat_priv->ctrl_params.qmi_timeout = val;
 	else if (strcmp(cmd, "bdf_type") == 0)
@@ -615,6 +651,12 @@ static int cnss_show_quirks_state(struct seq_file *s,
 		case DISABLE_DRV:
 			seq_puts(s, "DISABLE_DRV");
 			continue;
+		case DISABLE_IO_COHERENCY:
+			seq_puts(s, "DISABLE_IO_COHERENCY");
+			continue;
+		case IGNORE_PCI_LINK_FAILURE:
+			seq_puts(s, "IGNORE_PCI_LINK_FAILURE");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -638,6 +680,8 @@ static int cnss_control_params_debug_show(struct seq_file *s, void *data)
 	seq_puts(s, "\nCurrent value:\n");
 	cnss_show_quirks_state(s, cnss_priv);
 	seq_printf(s, "mhi_timeout: %u\n", cnss_priv->ctrl_params.mhi_timeout);
+	seq_printf(s, "mhi_m2_timeout: %u\n",
+		   cnss_priv->ctrl_params.mhi_m2_timeout);
 	seq_printf(s, "qmi_timeout: %u\n", cnss_priv->ctrl_params.qmi_timeout);
 	seq_printf(s, "bdf_type: %u\n", cnss_priv->ctrl_params.bdf_type);
 	seq_printf(s, "time_sync_period: %u\n",
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 0215f59..f5d3b38 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -7,9 +7,11 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_wakeup.h>
+#include <linux/reboot.h>
 #include <linux/rwsem.h>
 #include <linux/suspend.h>
 #include <linux/timer.h>
+#include <soc/qcom/minidump.h>
 #include <soc/qcom/ramdump.h>
 #include <soc/qcom/subsystem_notif.h>
 
@@ -31,12 +33,13 @@
 #define CNSS_EVENT_PENDING		2989
 #define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS	50
 
-#define CNSS_QUIRKS_DEFAULT		0
+#define CNSS_QUIRKS_DEFAULT		BIT(DISABLE_IO_COHERENCY)
 #ifdef CONFIG_CNSS_EMULATION
 #define CNSS_MHI_TIMEOUT_DEFAULT	90000
 #else
 #define CNSS_MHI_TIMEOUT_DEFAULT	0
 #endif
+#define CNSS_MHI_M2_TIMEOUT_DEFAULT	25
 #define CNSS_QMI_TIMEOUT_DEFAULT	10000
 #define CNSS_BDF_TYPE_DEFAULT		CNSS_BDF_ELF
 #define CNSS_TIME_SYNC_PERIOD_DEFAULT	900000
@@ -268,6 +271,7 @@ EXPORT_SYMBOL(cnss_wlan_enable);
 int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	int ret = 0;
 
 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
 		return 0;
@@ -275,7 +279,10 @@ int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
 		return 0;
 
-	return cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+	ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+	cnss_bus_free_qdss_mem(plat_priv);
+
+	return ret;
 }
 EXPORT_SYMBOL(cnss_wlan_disable);
 
@@ -1048,6 +1055,12 @@ static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
 		goto out;
 	}
 
+	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+		cnss_pr_err("Reboot is in progress, ignore recovery\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_err("Recovery is already in progress\n");
 		ret = -EINVAL;
@@ -1207,6 +1220,54 @@ int cnss_force_collect_rddm(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_force_collect_rddm);
 
+int cnss_qmi_send_get(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+		return 0;
+
+	return cnss_bus_qmi_send_get(plat_priv);
+}
+EXPORT_SYMBOL(cnss_qmi_send_get);
+
+int cnss_qmi_send_put(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+		return 0;
+
+	return cnss_bus_qmi_send_put(plat_priv);
+}
+EXPORT_SYMBOL(cnss_qmi_send_put);
+
+int cnss_qmi_send(struct device *dev, int type, void *cmd,
+		  int cmd_len, void *cb_ctx,
+		  int (*cb)(void *ctx, void *event, int event_len))
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	int ret;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+		return -EINVAL;
+
+	plat_priv->get_info_cb = cb;
+	plat_priv->get_info_cb_ctx = cb_ctx;
+
+	ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
+	if (ret) {
+		plat_priv->get_info_cb = NULL;
+		plat_priv->get_info_cb_ctx = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(cnss_qmi_send);
+
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -1253,6 +1314,7 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
 	}
 
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+	cnss_bus_free_qdss_mem(plat_priv);
 	cnss_release_antenna_sharing(plat_priv);
 	cnss_bus_dev_shutdown(plat_priv);
 	msleep(COLD_BOOT_CAL_SHUTDOWN_DELAY_MS);
@@ -1502,6 +1564,25 @@ static void cnss_driver_event_work(struct work_struct *work)
 	cnss_pm_relax(plat_priv);
 }
 
+int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
+		  phys_addr_t *pa, unsigned long attrs)
+{
+	struct sg_table sgt;
+	int ret;
+
+	ret = dma_get_sgtable_attrs(dev, &sgt, va, dma, size, attrs);
+	if (ret) {
+		cnss_pr_err("Failed to get sgtable for va: 0x%pK, dma: %pa, size: 0x%zx, attrs: 0x%x\n",
+			    va, &dma, size, attrs);
+		return -EINVAL;
+	}
+
+	*pa = page_to_phys(sg_page(sgt.sgl));
+	sg_free_table(&sgt);
+
+	return 0;
+}
+
 int cnss_register_subsys(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -1752,6 +1833,87 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
 	}
 }
 
+int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
+			     enum cnss_fw_dump_type type, int seg_no,
+			     void *va, phys_addr_t pa, size_t size)
+{
+	struct md_region md_entry;
+	int ret;
+
+	switch (type) {
+	case CNSS_FW_IMAGE:
+		snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
+			 seg_no);
+		break;
+	case CNSS_FW_RDDM:
+		snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
+			 seg_no);
+		break;
+	case CNSS_FW_REMOTE_HEAP:
+		snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
+			 seg_no);
+		break;
+	default:
+		cnss_pr_err("Unknown dump type ID: %d\n", type);
+		return -EINVAL;
+	}
+
+	md_entry.phys_addr = pa;
+	md_entry.virt_addr = (uintptr_t)va;
+	md_entry.size = size;
+	md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+
+	cnss_pr_dbg("Mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
+		    md_entry.name, va, &pa, size);
+
+	ret = msm_minidump_add_region(&md_entry);
+	if (ret)
+		cnss_pr_err("Failed to add mini dump region, err = %d\n", ret);
+
+	return ret;
+}
+
+int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
+				enum cnss_fw_dump_type type, int seg_no,
+				void *va, phys_addr_t pa, size_t size)
+{
+	struct md_region md_entry;
+	int ret;
+
+	switch (type) {
+	case CNSS_FW_IMAGE:
+		snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
+			 seg_no);
+		break;
+	case CNSS_FW_RDDM:
+		snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
+			 seg_no);
+		break;
+	case CNSS_FW_REMOTE_HEAP:
+		snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
+			 seg_no);
+		break;
+	default:
+		cnss_pr_err("Unknown dump type ID: %d\n", type);
+		return -EINVAL;
+	}
+
+	md_entry.phys_addr = pa;
+	md_entry.virt_addr = (uintptr_t)va;
+	md_entry.size = size;
+	md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+
+	cnss_pr_dbg("Remove mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
+		    md_entry.name, va, &pa, size);
+
+	ret = msm_minidump_remove_region(&md_entry);
+	if (ret)
+		cnss_pr_err("Failed to remove mini dump region, err = %d\n",
+			    ret);
+
+	return ret;
+}
+
 static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -1873,6 +2035,19 @@ static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
 	destroy_workqueue(plat_priv->event_wq);
 }
 
+static int cnss_reboot_notifier(struct notifier_block *nb,
+				unsigned long action,
+				void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(nb, struct cnss_plat_data, reboot_nb);
+
+	set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
+	cnss_pr_dbg("Reboot is in progress with action %d\n", action);
+
+	return NOTIFY_DONE;
+}
+
 static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 {
 	int ret;
@@ -1880,7 +2055,15 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 	timer_setup(&plat_priv->fw_boot_timer,
 		    cnss_bus_fw_boot_timeout_hdlr, 0);
 
-	register_pm_notifier(&cnss_pm_notifier);
+	ret = register_pm_notifier(&cnss_pm_notifier);
+	if (ret)
+		cnss_pr_err("Failed to register PM notifier, err = %d\n", ret);
+
+	plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
+	ret = register_reboot_notifier(&plat_priv->reboot_nb);
+	if (ret)
+		cnss_pr_err("Failed to register reboot notifier, err = %d\n",
+			    ret);
 
 	ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
 	if (ret)
@@ -1903,6 +2086,7 @@ static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
 	complete_all(&plat_priv->cal_complete);
 	complete_all(&plat_priv->power_up_complete);
 	device_init_wakeup(&plat_priv->plat_dev->dev, false);
+	unregister_reboot_notifier(&plat_priv->reboot_nb);
 	unregister_pm_notifier(&cnss_pm_notifier);
 	del_timer(&plat_priv->fw_boot_timer);
 }
@@ -1911,6 +2095,7 @@ static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
 {
 	plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
 	plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
+	plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
 	plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index d11f099..431ea90 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -129,6 +129,7 @@ struct cnss_fw_mem {
 	phys_addr_t pa;
 	u8 valid;
 	u32 type;
+	unsigned long attrs;
 };
 
 struct wlfw_rf_chip_info {
@@ -203,6 +204,7 @@ enum cnss_driver_state {
 	CNSS_COEX_CONNECTED,
 	CNSS_IMS_CONNECTED,
 	CNSS_IN_SUSPEND_RESUME,
+	CNSS_IN_REBOOT,
 };
 
 struct cnss_recovery_data {
@@ -239,6 +241,8 @@ enum cnss_debug_quirks {
 	FBC_BYPASS,
 	ENABLE_DAEMON_SUPPORT,
 	DISABLE_DRV,
+	DISABLE_IO_COHERENCY,
+	IGNORE_PCI_LINK_FAILURE,
 };
 
 enum cnss_bdf_type {
@@ -260,6 +264,7 @@ struct cnss_cal_info {
 struct cnss_control_params {
 	unsigned long quirks;
 	unsigned int mhi_timeout;
+	unsigned int mhi_m2_timeout;
 	unsigned int qmi_timeout;
 	unsigned int bdf_type;
 	unsigned int time_sync_period;
@@ -303,6 +308,7 @@ struct cnss_plat_data {
 	struct cnss_esoc_info esoc_info;
 	struct cnss_bus_bw_info bus_bw_info;
 	struct notifier_block modem_nb;
+	struct notifier_block reboot_nb;
 	struct cnss_platform_cap cap;
 	struct pm_qos_request qos_request;
 	struct cnss_device_version device_version;
@@ -351,6 +357,8 @@ struct cnss_plat_data {
 	struct qmi_handle ims_qmi;
 	struct qmi_txn txn;
 	u64 dynamic_feature;
+	void *get_info_cb_ctx;
+	int (*get_info_cb)(void *ctx, void *event, int event_len);
 };
 
 #ifdef CONFIG_ARCH_QCOM
@@ -400,5 +408,13 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
 int cnss_get_cpr_info(struct cnss_plat_data *plat_priv);
 int cnss_update_cpr_info(struct cnss_plat_data *plat_priv);
+int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
+		  phys_addr_t *pa, unsigned long attrs);
+int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
+			     enum cnss_fw_dump_type type, int seg_no,
+			     void *va, phys_addr_t pa, size_t size);
+int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
+				enum cnss_fw_dump_type type, int seg_no,
+				void *va, phys_addr_t pa, size_t size);
 
 #endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 841a4aa..6f506d7 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1,12 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. */
 
+#include <linux/cma.h>
 #include <linux/firmware.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/pm_runtime.h>
 #include <linux/memblock.h>
 #include <linux/completion.h>
@@ -55,13 +57,19 @@
 
 static DEFINE_SPINLOCK(pci_link_down_lock);
 static DEFINE_SPINLOCK(pci_reg_window_lock);
+static DEFINE_SPINLOCK(time_sync_lock);
+static DEFINE_SPINLOCK(pm_qos_lock);
 
 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
+#define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
 
 #define FORCE_WAKE_DELAY_MIN_US			4000
 #define FORCE_WAKE_DELAY_MAX_US			6000
 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
 
+#define POWER_ON_RETRY_MAX_TIMES		3
+#define POWER_ON_RETRY_DELAY_MS			200
+
 static struct cnss_pci_reg ce_src[] = {
 	{ "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET },
 	{ "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET },
@@ -387,7 +395,6 @@ static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
 			     u32 offset, u32 *val)
 {
 	int ret;
-	unsigned long flags;
 
 	ret = cnss_pci_check_link_status(pci_priv);
 	if (ret)
@@ -399,12 +406,12 @@ static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
 		return 0;
 	}
 
-	spin_lock_irqsave(&pci_reg_window_lock, flags);
+	spin_lock_bh(&pci_reg_window_lock);
 	cnss_pci_select_window(pci_priv, offset);
 
 	*val = readl_relaxed(pci_priv->bar + WINDOW_START +
 			     (offset & WINDOW_RANGE_MASK));
-	spin_unlock_irqrestore(&pci_reg_window_lock, flags);
+	spin_unlock_bh(&pci_reg_window_lock);
 
 	return 0;
 }
@@ -413,7 +420,6 @@ static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
 			      u32 val)
 {
 	int ret;
-	unsigned long flags;
 
 	ret = cnss_pci_check_link_status(pci_priv);
 	if (ret)
@@ -425,12 +431,12 @@ static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
 		return 0;
 	}
 
-	spin_lock_irqsave(&pci_reg_window_lock, flags);
+	spin_lock_bh(&pci_reg_window_lock);
 	cnss_pci_select_window(pci_priv, offset);
 
 	writel_relaxed(val, pci_priv->bar + WINDOW_START +
 		       (offset & WINDOW_RANGE_MASK));
-	spin_unlock_irqrestore(&pci_reg_window_lock, flags);
+	spin_unlock_bh(&pci_reg_window_lock);
 
 	return 0;
 }
@@ -443,7 +449,8 @@ static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
 
 	ret = cnss_pci_force_wake_request(dev);
 	if (ret) {
-		cnss_pr_err("Failed to request force wake\n");
+		if (ret != -EAGAIN)
+			cnss_pr_err("Failed to request force wake\n");
 		return ret;
 	}
 
@@ -467,12 +474,74 @@ static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
 	int ret;
 
 	ret = cnss_pci_force_wake_release(dev);
-	if (ret)
+	if (ret && ret != -EAGAIN)
 		cnss_pr_err("Failed to release force wake\n");
 
 	return ret;
 }
 
+int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
+			    u32 *val)
+{
+	int ret = 0;
+
+	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
+	if (ret)
+		goto out;
+
+	ret = cnss_pci_pm_runtime_get_sync(pci_priv);
+	if (ret < 0)
+		goto runtime_pm_put;
+
+	cnss_pci_force_wake_get(pci_priv);
+
+	ret = cnss_pci_reg_read(pci_priv, offset, val);
+	if (ret) {
+		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
+			    offset, ret);
+		goto force_wake_put;
+	}
+
+force_wake_put:
+	cnss_pci_force_wake_put(pci_priv);
+runtime_pm_put:
+	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+	cnss_pci_pm_runtime_put_autosuspend(pci_priv);
+out:
+	return ret;
+}
+
+int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
+			     u32 val)
+{
+	int ret = 0;
+
+	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
+	if (ret)
+		goto out;
+
+	ret = cnss_pci_pm_runtime_get_sync(pci_priv);
+	if (ret < 0)
+		goto runtime_pm_put;
+
+	cnss_pci_force_wake_get(pci_priv);
+
+	ret = cnss_pci_reg_write(pci_priv, offset, val);
+	if (ret) {
+		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
+			    val, offset, ret);
+		goto force_wake_put;
+	}
+
+force_wake_put:
+	cnss_pci_force_wake_put(pci_priv);
+runtime_pm_put:
+	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+	cnss_pci_pm_runtime_put_autosuspend(pci_priv);
+out:
+	return ret;
+}
+
 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
 {
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
@@ -644,8 +713,10 @@ int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
 	}
 
 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
-	if (ret)
+	if (ret) {
+		ret = -EAGAIN;
 		goto out;
+	}
 
 	pci_priv->pci_link_state = PCI_LINK_UP;
 
@@ -738,6 +809,18 @@ int cnss_pci_is_device_down(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_pci_is_device_down);
 
+void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
+{
+	spin_lock_bh(&pci_reg_window_lock);
+}
+EXPORT_SYMBOL(cnss_pci_lock_reg_window);
+
+void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
+{
+	spin_unlock_bh(&pci_reg_window_lock);
+}
+EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
+
 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
 {
 	switch (mhi_state) {
@@ -747,6 +830,8 @@ static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
 		return "DEINIT";
 	case CNSS_MHI_POWER_ON:
 		return "POWER_ON";
+	case CNSS_MHI_POWERING_OFF:
+		return "POWERING_OFF";
 	case CNSS_MHI_POWER_OFF:
 		return "POWER_OFF";
 	case CNSS_MHI_FORCE_POWER_OFF:
@@ -807,6 +892,8 @@ static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
 		    pci_priv->mhi_state);
+	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
+		CNSS_ASSERT(0);
 
 	return -EINVAL;
 }
@@ -824,9 +911,13 @@ static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
 	case CNSS_MHI_POWER_ON:
 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
 		break;
+	case CNSS_MHI_POWERING_OFF:
+		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
+		break;
 	case CNSS_MHI_POWER_OFF:
 	case CNSS_MHI_FORCE_POWER_OFF:
 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
 		break;
@@ -877,6 +968,11 @@ static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 		break;
 	case CNSS_MHI_POWER_ON:
 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
+		/* -ETIMEDOUT means MHI power up has succeeded but timed out
+		 * for firmware mission mode event, so handle it properly.
+		 */
+		if (ret == -ETIMEDOUT)
+			ret = 0;
 		break;
 	case CNSS_MHI_POWER_OFF:
 		mhi_power_down(pci_priv->mhi_ctrl, true);
@@ -937,6 +1033,7 @@ int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
 
 	if (MHI_TIMEOUT_OVERWRITE_MS)
 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
+	pci_priv->mhi_ctrl->m2_timeout_ms = MHI_M2_TIMEOUT_MS;
 
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
 	if (ret)
@@ -960,6 +1057,7 @@ static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
 		return;
 
 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
 
 	if (!pci_priv->pci_link_down_ind)
 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
@@ -989,11 +1087,6 @@ static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
 		return -EINVAL;
 	}
 
-	cnss_pci_reg_write(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5,
-			   QCA6390_TIME_SYNC_CLEAR);
-	cnss_pci_reg_write(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5,
-			   QCA6390_TIME_SYNC_ENABLE);
-
 	cnss_pci_reg_read(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL3, &low);
 	cnss_pci_reg_read(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL4, &high);
 
@@ -1004,9 +1097,22 @@ static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
 	return 0;
 }
 
+static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
+{
+	cnss_pci_reg_write(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5,
+			   QCA6390_TIME_SYNC_ENABLE);
+}
+
+static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
+{
+	cnss_pci_reg_write(pci_priv, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5,
+			   QCA6390_TIME_SYNC_CLEAR);
+}
+
 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	unsigned long flags = 0;
 	u64 host_time_us, device_time_us, offset;
 	u32 low, high;
 	int ret;
@@ -1019,8 +1125,13 @@ static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
 	if (ret)
 		return ret;
 
+	spin_lock_irqsave(&time_sync_lock, flags);
+	cnss_pci_clear_time_sync_counter(pci_priv);
+	cnss_pci_enable_time_sync_counter(pci_priv);
 	host_time_us = cnss_get_host_timestamp(plat_priv);
 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
+	cnss_pci_clear_time_sync_counter(pci_priv);
+	spin_unlock_irqrestore(&time_sync_lock, flags);
 	if (ret)
 		goto force_wake_put;
 
@@ -1109,6 +1220,73 @@ static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
 }
 
+static int cnss_pci_pm_qos_notify(struct notifier_block *nb,
+				  unsigned long curr_val, void *cpus)
+{
+	struct cnss_pci_data *pci_priv =
+		container_of(nb, struct cnss_pci_data, pm_qos_nb);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+
+	if (!pci_priv->runtime_pm_prevented &&
+	    curr_val != PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE) {
+		cnss_pci_pm_runtime_get_noresume(pci_priv);
+		pci_priv->runtime_pm_prevented = true;
+	} else if (pci_priv->runtime_pm_prevented &&
+		   curr_val == PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE) {
+		cnss_pci_pm_runtime_put_noidle(pci_priv);
+		pci_priv->runtime_pm_prevented = false;
+	}
+
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	return NOTIFY_DONE;
+}
+
+static int cnss_pci_pm_qos_add_notifier(struct cnss_pci_data *pci_priv)
+{
+	int ret;
+
+	if (pci_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	pci_priv->pm_qos_nb.notifier_call = cnss_pci_pm_qos_notify;
+	ret = pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
+				  &pci_priv->pm_qos_nb);
+	if (ret)
+		cnss_pr_err("Failed to add qos notifier, err = %d\n",
+			    ret);
+
+	return ret;
+}
+
+static int cnss_pci_pm_qos_remove_notifier(struct cnss_pci_data *pci_priv)
+{
+	int ret;
+	unsigned long flags;
+
+	if (pci_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	ret = pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
+				     &pci_priv->pm_qos_nb);
+	if (ret)
+		cnss_pr_dbg("Failed to remove qos notifier, err = %d\n",
+			    ret);
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+
+	if (pci_priv->runtime_pm_prevented) {
+		cnss_pci_pm_runtime_put_noidle(pci_priv);
+		pci_priv->runtime_pm_prevented = false;
+	}
+
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	return ret;
+}
+
 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -1170,6 +1348,7 @@ int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
 	}
 
 	cnss_pci_start_time_sync_update(pci_priv);
+	cnss_pci_pm_qos_add_notifier(pci_priv);
 
 	return 0;
 
@@ -1187,8 +1366,6 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
 
 	plat_priv = pci_priv->plat_priv;
 
-	cnss_pci_stop_time_sync_update(pci_priv);
-
 	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
@@ -1201,6 +1378,9 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
 		return -EINVAL;
 	}
 
+	cnss_pci_pm_qos_remove_notifier(pci_priv);
+	cnss_pci_stop_time_sync_update(pci_priv);
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
@@ -1217,6 +1397,9 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
 		}
 	}
 
+	plat_priv->get_info_cb_ctx = NULL;
+	plat_priv->get_info_cb = NULL;
+
 	return 0;
 }
 
@@ -1469,13 +1652,14 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
 	int ret = 0;
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 	unsigned int timeout;
+	int retry = 0;
 
-	if (plat_priv->ramdump_info_v2.dump_data_valid ||
-	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+	if (plat_priv->ramdump_info_v2.dump_data_valid) {
 		cnss_pci_clear_dump_info(pci_priv);
 		cnss_pci_deinit_mhi(pci_priv);
 	}
 
+retry:
 	ret = cnss_power_on_device(plat_priv);
 	if (ret) {
 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
@@ -1485,6 +1669,18 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
 	ret = cnss_resume_pci_link(pci_priv);
 	if (ret) {
 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		if (test_bit(IGNORE_PCI_LINK_FAILURE,
+			     &plat_priv->ctrl_params.quirks)) {
+			cnss_pr_dbg("Ignore PCI link resume failure\n");
+			ret = 0;
+			goto out;
+		}
+		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
+			cnss_power_off_device(plat_priv);
+			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
+			msleep(POWER_ON_RETRY_DELAY_MS * retry);
+			goto retry;
+		}
 		goto power_off;
 	}
 
@@ -1513,6 +1709,8 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
 		if (ret)
 			goto stop_mhi;
 	} else if (timeout) {
+		if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
+			timeout = timeout << 1;
 		mod_timer(&plat_priv->fw_boot_timer,
 			  jiffies + msecs_to_jiffies(timeout << 1));
 	}
@@ -1554,16 +1752,21 @@ static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
 		cnss_pci_collect_dump(pci_priv);
 	}
 
+	if (!cnss_is_device_powered_on(plat_priv)) {
+		cnss_pr_dbg("Device is already powered off, ignore\n");
+		goto skip_power_off;
+	}
+
 	cnss_pci_power_off_mhi(pci_priv);
 	ret = cnss_suspend_pci_link(pci_priv);
 	if (ret)
 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-	if (!plat_priv->ramdump_info_v2.dump_data_valid &&
-	    !test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state))
+	if (!plat_priv->ramdump_info_v2.dump_data_valid)
 		cnss_pci_deinit_mhi(pci_priv);
 
 	cnss_power_off_device(plat_priv);
 
+skip_power_off:
 	pci_priv->remap_window = 0;
 
 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
@@ -1780,7 +1983,7 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 
 	timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev);
 	ret = wait_for_completion_timeout(&plat_priv->cal_complete,
-					  msecs_to_jiffies(timeout));
+					  msecs_to_jiffies(timeout) << 2);
 	if (!ret) {
 		cnss_pr_err("Timeout waiting for calibration to complete\n");
 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
@@ -2009,7 +2212,7 @@ static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
 	return ret;
 }
 
-static int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
 	int ret = 0;
@@ -2054,7 +2257,7 @@ static int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
 	return ret;
 }
 
-static int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
 	int ret = 0;
@@ -2480,11 +2683,16 @@ int cnss_auto_suspend(struct device *dev)
 	if (!plat_priv)
 		return -ENODEV;
 
+	mutex_lock(&pci_priv->bus_lock);
 	ret = cnss_pci_suspend_bus(pci_priv);
-	if (ret)
+	if (ret) {
+		mutex_unlock(&pci_priv->bus_lock);
 		return ret;
+	}
 
 	cnss_pci_set_auto_suspended(pci_priv, 1);
+	mutex_unlock(&pci_priv->bus_lock);
+
 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
 
 	bus_bw_info = &plat_priv->bus_bw_info;
@@ -2510,11 +2718,15 @@ int cnss_auto_resume(struct device *dev)
 	if (!plat_priv)
 		return -ENODEV;
 
+	mutex_lock(&pci_priv->bus_lock);
 	ret = cnss_pci_resume_bus(pci_priv);
-	if (ret)
+	if (ret) {
+		mutex_unlock(&pci_priv->bus_lock);
 		return ret;
+	}
 
 	cnss_pci_set_auto_suspended(pci_priv, 0);
+	mutex_unlock(&pci_priv->bus_lock);
 
 	bus_bw_info = &plat_priv->bus_bw_info;
 	msm_bus_scale_client_update_request(bus_bw_info->bus_client,
@@ -2607,18 +2819,60 @@ int cnss_pci_force_wake_release(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_pci_force_wake_release);
 
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	mutex_lock(&pci_priv->bus_lock);
+	if (!cnss_pci_get_auto_suspended(pci_priv))
+		goto out;
+
+	cnss_pr_vdbg("Starting to handle get info prepare\n");
+
+	ret = cnss_pci_resume_bus(pci_priv);
+
+out:
+	mutex_unlock(&pci_priv->bus_lock);
+	return ret;
+}
+
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	mutex_lock(&pci_priv->bus_lock);
+	if (!cnss_pci_get_auto_suspended(pci_priv))
+		goto out;
+
+	cnss_pr_vdbg("Starting to handle get info done\n");
+
+	ret = cnss_pci_suspend_bus(pci_priv);
+
+out:
+	mutex_unlock(&pci_priv->bus_lock);
+	return ret;
+}
+
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	struct device *dev = &pci_priv->pci_dev->dev;
 	int i;
 
 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
 		if (!fw_mem[i].va && fw_mem[i].size) {
 			fw_mem[i].va =
-				dma_alloc_coherent(&pci_priv->pci_dev->dev,
-						   fw_mem[i].size,
-						   &fw_mem[i].pa, GFP_KERNEL);
+				dma_alloc_attrs(dev, fw_mem[i].size,
+						&fw_mem[i].pa, GFP_KERNEL,
+						fw_mem[i].attrs);
+
 			if (!fw_mem[i].va) {
 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
 					    fw_mem[i].size, fw_mem[i].type);
@@ -2631,6 +2885,31 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
 	return 0;
 }
 
+static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	struct device *dev = &pci_priv->pci_dev->dev;
+	int i;
+
+	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		if (fw_mem[i].va && fw_mem[i].size) {
+			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+				    fw_mem[i].va, &fw_mem[i].pa,
+				    fw_mem[i].size, fw_mem[i].type);
+			dma_free_attrs(dev, fw_mem[i].size,
+				       fw_mem[i].va, fw_mem[i].pa,
+				       fw_mem[i].attrs);
+			fw_mem[i].va = NULL;
+			fw_mem[i].pa = 0;
+			fw_mem[i].size = 0;
+			fw_mem[i].type = 0;
+		}
+	}
+
+	plat_priv->fw_mem_seg_len = 0;
+}
+
 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -2688,30 +2967,6 @@ void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
 	plat_priv->qdss_mem_seg_len = 0;
 }
 
-static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
-{
-	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
-	int i;
-
-	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
-		if (fw_mem[i].va && fw_mem[i].size) {
-			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
-				    fw_mem[i].va, &fw_mem[i].pa,
-				    fw_mem[i].size, fw_mem[i].type);
-			dma_free_coherent(&pci_priv->pci_dev->dev,
-					  fw_mem[i].size, fw_mem[i].va,
-					  fw_mem[i].pa);
-			fw_mem[i].va = NULL;
-			fw_mem[i].pa = 0;
-			fw_mem[i].size = 0;
-			fw_mem[i].type = 0;
-		}
-	}
-
-	plat_priv->fw_mem_seg_len = 0;
-}
-
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -2765,45 +3020,24 @@ static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
 	m3_mem->size = 0;
 }
 
-int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
-{
-	int ret;
-	struct cnss_plat_data *plat_priv;
-
-	if (!pci_priv)
-		return -ENODEV;
-
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		return -ENODEV;
-
-	cnss_auto_resume(&pci_priv->pci_dev->dev);
-	cnss_pci_dump_misc_reg(pci_priv);
-	cnss_pci_dump_shadow_reg(pci_priv);
-
-	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
-	if (ret) {
-		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
-		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-				       CNSS_REASON_DEFAULT);
-		return ret;
-	}
-
-	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
-		mod_timer(&plat_priv->fw_boot_timer,
-			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
-	}
-
-	return 0;
-}
-
 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
 {
+	struct cnss_plat_data *plat_priv;
+
 	if (!pci_priv)
 		return;
 
 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
 
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return;
+
+	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
+		return;
+	}
+
 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 			       CNSS_REASON_TIMEOUT);
 }
@@ -2814,7 +3048,7 @@ static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
 {
 	struct cnss_pci_data *pci_priv = handler_token;
 
-	cnss_pr_err("SMMU fault happened with IOVA 0x%lx\n", iova);
+	cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
 
 	if (!pci_priv) {
 		cnss_pr_err("pci_priv is NULL\n");
@@ -2902,6 +3136,7 @@ int cnss_smmu_map(struct device *dev,
 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
 {
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+	struct cnss_plat_data *plat_priv;
 	unsigned long iova;
 	size_t len;
 	int ret = 0;
@@ -2919,6 +3154,8 @@ int cnss_smmu_map(struct device *dev,
 		return -EINVAL;
 	}
 
+	plat_priv = pci_priv->plat_priv;
+
 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
 	iova = roundup(pci_priv->smmu_iova_ipa_start, PAGE_SIZE);
 
@@ -2931,15 +3168,23 @@ int cnss_smmu_map(struct device *dev,
 		return -ENOMEM;
 	}
 
-	root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
-	root_of_node = root_port->dev.of_node;
-	if (root_of_node->parent) {
-		dma_coherent = of_property_read_bool(root_of_node->parent,
-						     "dma-coherent");
-		cnss_pr_dbg("dma-coherent is %s\n",
-			    dma_coherent ? "enabled" : "disabled");
-		if (dma_coherent)
-			flag |= IOMMU_CACHE;
+	if (!test_bit(DISABLE_IO_COHERENCY,
+		      &plat_priv->ctrl_params.quirks)) {
+		root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
+		if (!root_port) {
+			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
+		} else {
+			root_of_node = root_port->dev.of_node;
+			if (root_of_node && root_of_node->parent) {
+				dma_coherent =
+				    of_property_read_bool(root_of_node->parent,
+							  "dma-coherent");
+			cnss_pr_dbg("dma-coherent is %s\n",
+					dma_coherent ? "enabled" : "disabled");
+			if (dma_coherent)
+				flag |= IOMMU_CACHE;
+			}
+		}
 	}
 
 	ret = iommu_map(pci_priv->iommu_domain, iova,
@@ -3310,6 +3555,84 @@ static void cnss_pci_dump_registers(struct cnss_pci_data *pci_priv)
 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
 }
 
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
+{
+	int ret;
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
+
+	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
+	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
+		return -EINVAL;
+
+	cnss_auto_resume(&pci_priv->pci_dev->dev);
+	cnss_pci_dump_misc_reg(pci_priv);
+	cnss_pci_dump_shadow_reg(pci_priv);
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
+	if (ret) {
+		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
+		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
+			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
+			return 0;
+		}
+		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_pci_dump_registers(pci_priv);
+		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+				       CNSS_REASON_DEFAULT);
+		return ret;
+	}
+
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+	}
+
+	return 0;
+}
+
+static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
+				  struct cnss_dump_seg *dump_seg,
+				  enum cnss_fw_dump_type type, int seg_no,
+				  void *va, dma_addr_t dma, size_t size)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct device *dev = &pci_priv->pci_dev->dev;
+	phys_addr_t pa;
+
+	dump_seg->address = dma;
+	dump_seg->v_address = va;
+	dump_seg->size = size;
+	dump_seg->type = type;
+
+	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
+		    seg_no, va, &dma, size);
+
+	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
+		return;
+
+	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
+}
+
+static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
+				     struct cnss_dump_seg *dump_seg,
+				     enum cnss_fw_dump_type type, int seg_no,
+				     void *va, dma_addr_t dma, size_t size)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct device *dev = &pci_priv->pci_dev->dev;
+	phys_addr_t pa;
+
+	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
+	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
+}
+
 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -3319,7 +3642,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 		plat_priv->ramdump_info_v2.dump_data_vaddr;
 	struct image_info *fw_image, *rddm_image;
 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
-	int ret, i;
+	int ret, i, j;
 
 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
 		cnss_pr_dbg("RAM dump is already collected, skip\n");
@@ -3348,13 +3671,10 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 		    fw_image->entries);
 
 	for (i = 0; i < fw_image->entries; i++) {
-		dump_seg->address = fw_image->mhi_buf[i].dma_addr;
-		dump_seg->v_address = fw_image->mhi_buf[i].buf;
-		dump_seg->size = fw_image->mhi_buf[i].len;
-		dump_seg->type = CNSS_FW_IMAGE;
-		cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
-			    i, dump_seg->address,
-			    dump_seg->v_address, dump_seg->size);
+		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
+				      fw_image->mhi_buf[i].buf,
+				      fw_image->mhi_buf[i].dma_addr,
+				      fw_image->mhi_buf[i].len);
 		dump_seg++;
 	}
 
@@ -3364,31 +3684,28 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 		    rddm_image->entries);
 
 	for (i = 0; i < rddm_image->entries; i++) {
-		dump_seg->address = rddm_image->mhi_buf[i].dma_addr;
-		dump_seg->v_address = rddm_image->mhi_buf[i].buf;
-		dump_seg->size = rddm_image->mhi_buf[i].len;
-		dump_seg->type = CNSS_FW_RDDM;
-		cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
-			    i, dump_seg->address,
-			    dump_seg->v_address, dump_seg->size);
+		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
+				      rddm_image->mhi_buf[i].buf,
+				      rddm_image->mhi_buf[i].dma_addr,
+				      rddm_image->mhi_buf[i].len);
 		dump_seg++;
 	}
 
 	dump_data->nentries += rddm_image->entries;
 
+	mhi_dump_sfr(pci_priv->mhi_ctrl);
+
 	cnss_pr_dbg("Collect remote heap dump segment\n");
 
-	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
-			dump_seg->address = fw_mem[i].pa;
-			dump_seg->v_address = fw_mem[i].va;
-			dump_seg->size = fw_mem[i].size;
-			dump_seg->type = CNSS_FW_REMOTE_HEAP;
-			cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
-				    i, dump_seg->address, dump_seg->v_address,
-				    dump_seg->size);
+			cnss_pci_add_dump_seg(pci_priv, dump_seg,
+					      CNSS_FW_REMOTE_HEAP, j,
+					      fw_mem[i].va, fw_mem[i].pa,
+					      fw_mem[i].size);
 			dump_seg++;
 			dump_data->nentries++;
+			j++;
 		}
 	}
 
@@ -3402,6 +3719,41 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_dump_seg *dump_seg =
+		plat_priv->ramdump_info_v2.dump_data_vaddr;
+	struct image_info *fw_image, *rddm_image;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	int i, j;
+
+	fw_image = pci_priv->mhi_ctrl->fbc_image;
+	rddm_image = pci_priv->mhi_ctrl->rddm_image;
+
+	for (i = 0; i < fw_image->entries; i++) {
+		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
+					 fw_image->mhi_buf[i].buf,
+					 fw_image->mhi_buf[i].dma_addr,
+					 fw_image->mhi_buf[i].len);
+		dump_seg++;
+	}
+
+	for (i = 0; i < rddm_image->entries; i++) {
+		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
+					 rddm_image->mhi_buf[i].buf,
+					 rddm_image->mhi_buf[i].dma_addr,
+					 rddm_image->mhi_buf[i].len);
+		dump_seg++;
+	}
+
+	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
+			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
+						 CNSS_FW_REMOTE_HEAP, j,
+						 fw_mem[i].va, fw_mem[i].pa,
+						 fw_mem[i].size);
+			dump_seg++;
+			j++;
+		}
+	}
 
 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
 	plat_priv->ramdump_info_v2.dump_data_valid = false;
@@ -3544,7 +3896,7 @@ static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
 	return 0;
 }
 
-static void cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
+static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
@@ -3560,14 +3912,25 @@ static void cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
 		    plat_priv->device_version.major_version,
 		    plat_priv->device_version.minor_version);
 
-	if (pci_priv->device_id == QCA6390_DEVICE_ID &&
-	    plat_priv->device_version.major_version >= FW_V2_NUMBER) {
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
+			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
+				    pci_priv->device_id,
+				    plat_priv->device_version.major_version);
+			return -EINVAL;
+		}
 		scnprintf(plat_priv->firmware_name,
 			  sizeof(plat_priv->firmware_name), FW_V2_FILE_NAME);
 		mhi_ctrl->fw_image = plat_priv->firmware_name;
+		break;
+	default:
+		break;
 	}
 
 	cnss_pr_dbg("Firmware name is %s\n", mhi_ctrl->fw_image);
+
+	return 0;
 }
 
 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
@@ -3601,8 +3964,8 @@ static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 
 	ret = cnss_pci_get_mhi_msi(pci_priv);
 	if (ret) {
-		cnss_pr_err("Failed to get MSI for MHI\n");
-		return ret;
+		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
+		goto free_mhi_ctrl;
 	}
 
 	if (pci_priv->smmu_s1_enable) {
@@ -3632,12 +3995,25 @@ static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 	ret = of_register_mhi_controller(mhi_ctrl);
 	if (ret) {
 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
-		return ret;
+		goto destroy_ipc;
 	}
 
-	cnss_pci_update_fw_name(pci_priv);
+	ret = cnss_pci_update_fw_name(pci_priv);
+	if (ret)
+		goto unreg_mhi;
 
 	return 0;
+
+unreg_mhi:
+	mhi_unregister_mhi_controller(mhi_ctrl);
+destroy_ipc:
+	if (mhi_ctrl->log_buf)
+		ipc_log_context_destroy(mhi_ctrl->log_buf);
+	kfree(mhi_ctrl->irq);
+free_mhi_ctrl:
+	mhi_free_controller(mhi_ctrl);
+
+	return ret;
 }
 
 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
@@ -3645,8 +4021,10 @@ static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
 
 	mhi_unregister_mhi_controller(mhi_ctrl);
-	ipc_log_context_destroy(mhi_ctrl->log_buf);
+	if (mhi_ctrl->log_buf)
+		ipc_log_context_destroy(mhi_ctrl->log_buf);
 	kfree(mhi_ctrl->irq);
+	mhi_free_controller(mhi_ctrl);
 }
 
 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
@@ -3680,12 +4058,12 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
 	int ret = 0;
 	struct cnss_pci_data *pci_priv;
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+	struct device *dev = &pci_dev->dev;
 
 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
 		    id->vendor, pci_dev->device);
 
-	pci_priv = devm_kzalloc(&pci_dev->dev, sizeof(*pci_priv),
-				GFP_KERNEL);
+	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
 	if (!pci_priv) {
 		ret = -ENOMEM;
 		goto out;
@@ -3702,6 +4080,14 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
 	plat_priv->bus_priv = pci_priv;
 	snprintf(plat_priv->firmware_name, sizeof(plat_priv->firmware_name),
 		 DEFAULT_FW_FILE_NAME);
+	mutex_init(&pci_priv->bus_lock);
+
+	ret = of_reserved_mem_device_init(dev);
+	if (ret)
+		cnss_pr_err("Failed to init reserved mem device, err = %d\n",
+			    ret);
+	if (dev->cma_area)
+		cnss_pr_dbg("CMA area is %s\n", cma_get_name(dev->cma_area));
 
 	ret = cnss_register_subsys(plat_priv);
 	if (ret)
@@ -3818,9 +4204,14 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
 	cnss_pci_disable_bus(pci_priv);
 	cnss_dereg_pci_event(pci_priv);
 	cnss_pci_deinit_smmu(pci_priv);
-	cnss_unregister_ramdump(plat_priv);
-	cnss_unregister_subsys(plat_priv);
-	plat_priv->bus_priv = NULL;
+	if (plat_priv) {
+		cnss_unregister_ramdump(plat_priv);
+		cnss_unregister_subsys(plat_priv);
+		plat_priv->bus_priv = NULL;
+	} else {
+		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
+	}
+
 }
 
 static const struct pci_device_id cnss_pci_id_table[] = {
@@ -3876,7 +4267,16 @@ int cnss_pci_init(struct cnss_plat_data *plat_priv)
 		goto out;
 	}
 
+	if (!plat_priv->bus_priv) {
+		cnss_pr_err("Failed to probe PCI driver\n");
+		ret = -ENODEV;
+		goto unreg_pci;
+	}
+
 	return 0;
+
+unreg_pci:
+	pci_unregister_driver(&cnss_pci_driver);
 out:
 	return ret;
 }
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 8dcb14a6..31a91b4 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -16,6 +16,7 @@ enum cnss_mhi_state {
 	CNSS_MHI_INIT,
 	CNSS_MHI_DEINIT,
 	CNSS_MHI_POWER_ON,
+	CNSS_MHI_POWERING_OFF,
 	CNSS_MHI_POWER_OFF,
 	CNSS_MHI_FORCE_POWER_OFF,
 	CNSS_MHI_SUSPEND,
@@ -71,6 +72,8 @@ struct cnss_pci_data {
 	struct pci_saved_state *saved_state;
 	struct pci_saved_state *default_state;
 	struct msm_pcie_register_event msm_pci_event;
+	struct notifier_block pm_qos_nb;
+	u8 runtime_pm_prevented;
 	atomic_t auto_suspended;
 	atomic_t drv_connected;
 	u8 drv_connected_last;
@@ -92,6 +95,7 @@ struct cnss_pci_data {
 	struct timer_list dev_rddm_timer;
 	struct delayed_work time_sync_work;
 	u8 disable_pc;
+	struct mutex bus_lock; /* mutex for suspend and resume bus */
 	struct cnss_pci_debug_reg *debug_reg;
 	struct cnss_misc_reg *wcss_reg;
 	u32 wcss_reg_size;
@@ -173,6 +177,8 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv);
 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv);
 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv);
 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv);
@@ -196,5 +202,11 @@ void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv);
 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
 			   enum cnss_driver_status status);
 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv);
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv);
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv);
+int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
+			    u32 *val);
+int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
+			     u32 val);
 
 #endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index bb2bffc..77654ef 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -92,6 +92,8 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
 	req->qdss_trace_save_enable = 1;
 	req->qdss_trace_free_enable_valid = 1;
 	req->qdss_trace_free_enable = 1;
+	req->respond_get_info_enable_valid = 1;
+	req->respond_get_info_enable = 1;
 
 	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
 			   wlfw_ind_register_resp_msg_v01_ei, resp);
@@ -489,7 +491,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
 	struct qmi_txn txn;
 	char filename[MAX_BDF_FILE_NAME];
-	const struct firmware *fw_entry;
+	const struct firmware *fw_entry = NULL;
 	const u8 *temp;
 	unsigned int remaining;
 	int ret = 0;
@@ -539,7 +541,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 		req->data_valid = 1;
 		req->end_valid = 1;
 		req->bdf_type_valid = 1;
-		req->bdf_type = plat_priv->ctrl_params.bdf_type;
+		req->bdf_type = bdf_type;
 
 		if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
 			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
@@ -594,7 +596,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 	return 0;
 
 err_send:
-	if (plat_priv->ctrl_params.bdf_type != CNSS_BDF_DUMMY)
+	if (bdf_type != CNSS_BDF_DUMMY)
 		release_firmware(fw_entry);
 err_req_fw:
 	if (bdf_type != CNSS_BDF_REGDB)
@@ -1458,6 +1460,77 @@ int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
 	return ret;
 }
 
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+				 void *cmd, int cmd_len)
+{
+	struct wlfw_get_info_req_msg_v01 *req;
+	struct wlfw_get_info_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret = 0;
+
+	cnss_pr_vdbg("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
+		     type, cmd_len, plat_priv->driver_state);
+
+	if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->type = type;
+	req->data_len = cmd_len;
+	memcpy(req->data, cmd, req->data_len);
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_get_info_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+			       QMI_WLFW_GET_INFO_REQ_V01,
+			       WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_get_info_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Failed to send get info request, err: %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Get info request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
 {
 	cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1487,6 +1560,9 @@ static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
 			    ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
 		plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
 		plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
+		if (plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
+			plat_priv->fw_mem[i].attrs |=
+				DMA_ATTR_FORCE_CONTIGUOUS;
 	}
 
 	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
@@ -1717,6 +1793,31 @@ static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
 			       0, NULL);
 }
 
+static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
+					      struct sockaddr_qrtr *sq,
+					      struct qmi_txn *txn,
+					      const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
+
+	cnss_pr_vdbg("Received QMI WLFW respond get info indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	cnss_pr_vdbg("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
+		     ind_msg->data_len, ind_msg->type,
+		     ind_msg->is_last, ind_msg->seq_no);
+
+	if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
+		plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
+				       (void *)ind_msg->data,
+				       ind_msg->data_len);
+}
 static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -1785,6 +1886,14 @@ static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
 		sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
 		.fn = cnss_wlfw_qdss_trace_free_ind_cb
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
+		.ei = wlfw_respond_get_info_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_respond_get_info_ind_msg_v01),
+		.fn = cnss_wlfw_respond_get_info_ind_cb
+	},
 	{}
 };
 
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index a064660..fc2a2c6 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -59,6 +59,8 @@ int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
 int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+				 void *cmd, int cmd_len);
 int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 03a418e..682a20b 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -704,6 +704,24 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
 					   qdss_trace_free_enable),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x1F,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   respond_get_info_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x1F,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   respond_get_info_enable),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -3608,3 +3626,143 @@ struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[] = {
 	},
 };
 
+struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_get_info_req_msg_v01,
+					   type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_get_info_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_get_info_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_get_info_resp_msg_v01,
+					   resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   type_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   type),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   is_last_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   is_last),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   seq_no_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_respond_get_info_ind_msg_v01,
+					   seq_no),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index dacdfdb..0e18d80 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -13,6 +13,7 @@
 #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
 #define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044
+#define QMI_WLFW_GET_INFO_REQ_V01 0x004A
 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
 #define QMI_WLFW_CAL_DONE_IND_V01 0x003E
 #define QMI_WLFW_WFC_CALL_STATUS_RESP_V01 0x0049
@@ -23,6 +24,7 @@
 #define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
 #define QMI_WLFW_ANTENNA_GRANT_RESP_V01 0x0048
 #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_RESPOND_GET_INFO_IND_V01 0x004B
 #define QMI_WLFW_M3_INFO_RESP_V01 0x003C
 #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
 #define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
@@ -49,6 +51,7 @@
 #define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
 #define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
 #define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_GET_INFO_RESP_V01 0x004A
 #define QMI_WLFW_QDSS_TRACE_MODE_RESP_V01 0x0045
 #define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
 #define QMI_WLFW_FW_READY_IND_V01 0x0021
@@ -272,9 +275,11 @@ struct wlfw_ind_register_req_msg_v01 {
 	u8 qdss_trace_save_enable;
 	u8 qdss_trace_free_enable_valid;
 	u8 qdss_trace_free_enable;
+	u8 respond_get_info_enable_valid;
+	u8 respond_get_info_enable;
 };
 
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 66
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 70
 extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
 
 struct wlfw_ind_register_resp_msg_v01 {
@@ -949,4 +954,34 @@ struct wlfw_wfc_call_status_resp_msg_v01 {
 #define WLFW_WFC_CALL_STATUS_RESP_MSG_V01_MAX_MSG_LEN 7
 extern struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[];
 
+struct wlfw_get_info_req_msg_v01 {
+	u8 type;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN 6153
+extern struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[];
+
+struct wlfw_get_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_GET_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[];
+
+struct wlfw_respond_get_info_ind_msg_v01 {
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 type_valid;
+	u8 type;
+	u8 is_last_valid;
+	u8 is_last;
+	u8 seq_no_valid;
+	u32 seq_no;
+};
+
+#define WLFW_RESPOND_GET_INFO_IND_MSG_V01_MAX_MSG_LEN 6164
+extern struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[];
+
 #endif
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 91ca77c..b4347806 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -77,10 +77,13 @@
 #define IWL_22000_HR_FW_PRE		"iwlwifi-Qu-a0-hr-a0-"
 #define IWL_22000_HR_CDB_FW_PRE		"iwlwifi-QuIcp-z0-hrcdb-a0-"
 #define IWL_22000_HR_A_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_HR_B_FW_PRE		"iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_F0_FW_PRE	"iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_QU_B_HR_B_FW_PRE	"iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_FW_PRE		"iwlwifi-QuQnj-b0-hr-b0-"
 #define IWL_22000_JF_B0_FW_PRE		"iwlwifi-QuQnj-a0-jf-b0-"
 #define IWL_22000_HR_A0_FW_PRE		"iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE		"iwlwifi-su-z0-"
+#define IWL_QU_B_JF_B_FW_PRE		"iwlwifi-Qu-b0-jf-b0-"
 
 #define IWL_22000_HR_MODULE_FIRMWARE(api) \
 	IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -88,7 +91,11 @@
 	IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
 	IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
+	IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api)	\
 	IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
 	IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
@@ -96,6 +103,8 @@
 	IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
 	IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+	IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_22000		10
 
@@ -190,7 +199,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
 
 const struct iwl_cfg iwl22000_2ax_cfg_hr = {
 	.name = "Intel(R) Dual Band Wireless AX 22000",
-	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000.  That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
+	.name = "Intel(R) Wireless-AC 9461",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
+	.name = "Intel(R) Wireless-AC 9462",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
+	.name = "Intel(R) Wireless-AC 9560",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
+	.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
+	.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_jf = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
 	IWL_DEVICE_22500,
 	/*
 	 * This device doesn't support receiving BlockAck with a large bitmap
@@ -264,7 +320,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
 MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 12fddcf..2e9fd7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -574,11 +574,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
-#endif /* CONFIG_IWLMVM */
+#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 3fe7605..9cb9f05 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -843,11 +843,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
 	 * firmware versions.  Unfortunately, we don't have a TLV API
 	 * flag to rely on, so rely on the major version which is in
 	 * the first byte of ucode_ver.  This was implemented
-	 * initially on version 38 and then backported to 36, 29 and
-	 * 17.
+	 * initially on version 38 and then backported to29 and 17.
+	 * The intention was to have it in 36 as well, but not all
+	 * 8000 family got this feature enabled.  The 8000 family is
+	 * the only one using version 36, so skip this version
+	 * entirely.
 	 */
 	return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
-	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
 	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
 	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index d1c1a80..5e1e671 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -315,7 +315,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
 }
 
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-		     enum nl80211_band band)
+		     enum nl80211_band band, bool update)
 {
 	struct ieee80211_hw *hw = mvm->hw;
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -324,7 +324,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	struct ieee80211_supported_band *sband;
 	struct iwl_tlc_config_cmd cfg_cmd = {
 		.sta_id = mvmsta->sta_id,
-		.max_ch_width = rs_fw_bw_from_sta_bw(sta),
+		.max_ch_width = update ?
+			rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
 		.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
 		.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
 		.max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 6b9c670..6f4508d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4113,7 +4113,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 			  enum nl80211_band band, bool update)
 {
 	if (iwl_mvm_has_tlc_offload(mvm))
-		rs_fw_rate_init(mvm, sta, band);
+		rs_fw_rate_init(mvm, sta, band, update);
 	else
 		rs_drv_rate_init(mvm, sta, band, update);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 8e7f993..d0f4789 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
 
 void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-		     enum nl80211_band band);
+		     enum nl80211_band band, bool update);
 int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 			bool enable);
 void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ffae299..5615ce5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -671,7 +671,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
 		    info.control.vif->type == NL80211_IFTYPE_AP ||
 		    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
-			if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+			if (!ieee80211_is_data(hdr->frame_control))
 				sta_id = mvmvif->bcast_sta.sta_id;
 			else
 				sta_id = mvmvif->mcast_sta.sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 5d65500..0982bd9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -601,6 +601,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
@@ -696,34 +697,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
 	{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
 	{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
-	{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+	{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+	{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
 	{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd428c..ce2dd06 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
 		hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 				  cb->nlh->nlmsg_seq, &hwsim_genl_family,
 				  NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
-		if (!hdr)
+		if (hdr) {
+			genl_dump_check_consistent(cb, hdr);
+			genlmsg_end(skb, hdr);
+		} else {
 			res = -EMSGSIZE;
-		genl_dump_check_consistent(cb, hdr);
-		genlmsg_end(skb, hdr);
+		}
 	}
 
 done:
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 3dbfce9..9e82ec1 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = {
 	{ MODEL_8388, "libertas/usb8388_v5.bin", NULL },
 	{ MODEL_8388, "libertas/usb8388.bin", NULL },
 	{ MODEL_8388, "usb8388.bin", NULL },
-	{ MODEL_8682, "libertas/usb8682.bin", NULL }
+	{ MODEL_8682, "libertas/usb8682.bin", NULL },
+	{ 0, NULL, NULL }
 };
 
 static const struct usb_device_id if_usb_table[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 801a2d7..a3f4a5e 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
 		}
 
 		vs_ie = (struct ieee_types_header *)vendor_ie;
+		if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+			IEEE_MAX_IE_SIZE)
+			return -EINVAL;
 		memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
 		       vs_ie, vs_ie->len + 2);
 		le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18f7d9b..0939a8c 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 
 	rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
 	if (rate_ie) {
+		if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+			return;
 		memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
 		rate_len = rate_ie->len;
 	}
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 	rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
 					   params->beacon.tail,
 					   params->beacon.tail_len);
-	if (rate_ie)
+	if (rate_ie) {
+		if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+			return;
 		memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+	}
 
 	return;
 }
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
 					    params->beacon.tail_len);
 	if (vendor_ie) {
 		wmm_ie = vendor_ie;
+		if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+			return;
 		memcpy(&bss_cfg->wmm_info, wmm_ie +
 		       sizeof(struct ieee_types_header), *(wmm_ie + 1));
 		priv->wmm_enabled = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 0a3e046..da2ba51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -369,7 +369,7 @@ static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
 	mt76x0_chip_onoff(dev, false, false);
 }
 
-int mt76x0_init_hardware(struct mt76x0_dev *dev)
+int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset)
 {
 	static const u16 beacon_offsets[16] = {
 		/* 512 byte per beacon */
@@ -382,7 +382,7 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
 
 	dev->beacon_offsets = beacon_offsets;
 
-	mt76x0_chip_onoff(dev, true, true);
+	mt76x0_chip_onoff(dev, true, reset);
 
 	ret = mt76x0_wait_asic_ready(dev);
 	if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index fc9857f..f9dfe50 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -279,7 +279,7 @@ void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
 
 /* Init */
 struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
-int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset);
 int mt76x0_register_device(struct mt76x0_dev *dev);
 void mt76x0_cleanup(struct mt76x0_dev *dev);
 void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 54ae1f1..5aacb1f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -300,7 +300,7 @@ static int mt76x0_probe(struct usb_interface *usb_intf,
 	if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
 		dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
 
-	ret = mt76x0_init_hardware(dev);
+	ret = mt76x0_init_hardware(dev, true);
 	if (ret)
 		goto err;
 
@@ -354,7 +354,7 @@ static int mt76x0_resume(struct usb_interface *usb_intf)
 	struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
 	int ret;
 
-	ret = mt76x0_init_hardware(dev);
+	ret = mt76x0_init_hardware(dev, false);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
index 6542644..cec31f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -402,7 +402,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
 		ccmp_pn[6] = pn >> 32;
 		ccmp_pn[7] = pn >> 40;
 		txwi->iv = *((__le32 *)&ccmp_pn[0]);
-		txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+		txwi->eiv = *((__le32 *)&ccmp_pn[4]);
 	}
 
 	spin_lock_bh(&dev->mt76.lock);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 479a4cf..5f998ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -775,6 +775,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
 				return;
 			} else {
 				noa_num = (noa_len - 2) / 13;
+				if (noa_num > P2P_MAX_NOA_NUM)
+					noa_num = P2P_MAX_NOA_NUM;
+
 			}
 			noa_index = ie[3];
 			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -869,6 +872,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
 				return;
 			} else {
 				noa_num = (noa_len - 2) / 13;
+				if (noa_num > P2P_MAX_NOA_NUM)
+					noa_num = P2P_MAX_NOA_NUM;
+
 			}
 			noa_index = ie[3];
 			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index f360690..14e56be 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -643,7 +643,6 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter,
 	kfree(rsi_dev->tx_buffer);
 
 fail_eps:
-	kfree(rsi_dev);
 
 	return status;
 }
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 82add0a..27b6b14 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -718,7 +718,6 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 	xenvif_unmap_frontend_data_rings(queue);
 	netif_napi_del(&queue->napi);
 err:
-	module_put(THIS_MODULE);
 	return err;
 }
 
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5081ff..1c84910 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
 			nskb = xenvif_alloc_skb(0);
 			if (unlikely(nskb == NULL)) {
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				xenvif_tx_err(queue, &txreq, extra_count, idx);
 				if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
 				/* Failure in xenvif_set_skb_gso is fatal. */
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				kfree_skb(nskb);
 				break;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5b97cc9..6b4675a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -890,9 +890,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
 	return 0;
 }
 
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
-				  struct sk_buff *skb,
-				  struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+			     struct sk_buff *skb,
+			     struct sk_buff_head *list)
 {
 	RING_IDX cons = queue->rx.rsp_cons;
 	struct sk_buff *nskb;
@@ -909,9 +909,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 		}
 		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-			queue->rx.rsp_cons = ++cons;
+			queue->rx.rsp_cons = ++cons + skb_queue_len(list);
 			kfree_skb(nskb);
-			return ~0U;
+			return -ENOENT;
 		}
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -922,7 +922,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 		kfree_skb(nskb);
 	}
 
-	return cons;
+	queue->rx.rsp_cons = cons;
+
+	return 0;
 }
 
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1048,8 +1050,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
 		skb->data_len = rx->status;
 		skb->len += rx->status;
 
-		i = xennet_fill_frags(queue, skb, &tmpq);
-		if (unlikely(i == ~0U))
+		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
 			goto err;
 
 		if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1059,7 +1060,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
 
 		__skb_queue_tail(&rxq, skb);
 
-		queue->rx.rsp_cons = ++i;
+		i = ++queue->rx.rsp_cons;
 		work_done++;
 	}
 
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 7581eb8..1d49640 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -41,21 +41,9 @@ static const struct of_device_id msm_match_table[] = {
 
 MODULE_DEVICE_TABLE(of, msm_match_table);
 
-#define DEV_COUNT	1
-#define DEVICE_NAME	"nq-nci"
-#define CLASS_NAME	"nqx"
-#define MAX_BUFFER_SIZE			(320)
-#define WAKEUP_SRC_TIMEOUT		(2000)
-#define MAX_RETRY_COUNT			3
-#define NCI_RESET_CMD_LEN		4
-#define NCI_RESET_RSP_LEN		4
-#define NCI_RESET_NTF_LEN		13
-#define NCI_GET_VERSION_CMD_LEN		8
-#define NCI_GET_VERSION_RSP_LEN		12
-#define MAX_IRQ_WAIT_TIME		(90)	//in ms
-
 struct nqx_dev {
 	wait_queue_head_t	read_wq;
+	wait_queue_head_t	cold_reset_read_wq;
 	struct	mutex		read_mutex;
 	struct	mutex		dev_ref_mutex;
 	struct	i2c_client	*client;
@@ -72,10 +60,14 @@ struct nqx_dev {
 	unsigned int		ese_gpio;
 	/* NFC VEN pin state powered by Nfc */
 	bool			nfc_ven_enabled;
+	/* NFC state reflected from MW */
+	bool			nfc_enabled;
 	/* NFC_IRQ state */
 	bool			irq_enabled;
 	/* NFC_IRQ wake-up state */
 	bool			irq_wake_up;
+	bool			cold_reset_rsp_pending;
+	uint8_t			cold_reset_status;
 	spinlock_t		irq_enabled_lock;
 	unsigned int		count_irq;
 	/* NFC_IRQ Count */
@@ -97,6 +89,9 @@ static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
 static int nqx_clock_select(struct nqx_dev *nqx_dev);
 /*clock disable function*/
 static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len);
+
 static struct notifier_block nfcc_notifier = {
 	.notifier_call	= nfcc_reboot,
 	.next			= NULL,
@@ -159,6 +154,92 @@ static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static int is_data_available_for_read(struct nqx_dev *nqx_dev)
+{
+	int ret;
+
+	nqx_enable_irq(nqx_dev);
+	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
+		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+	return ret;
+}
+
+static int send_cold_reset_cmd(struct nqx_dev *nqx_dev)
+{
+	int ret;
+	char *cold_reset_cmd = NULL;
+
+	if (gpio_get_value(nqx_dev->firm_gpio)) {
+		dev_err(&nqx_dev->client->dev, "FW download in-progress\n");
+		return -EBUSY;
+	}
+	if (!gpio_get_value(nqx_dev->en_gpio)) {
+		dev_err(&nqx_dev->client->dev, "VEN LOW - NFCC powered off\n");
+		return -ENODEV;
+	}
+	cold_reset_cmd = kzalloc(COLD_RESET_CMD_LEN, GFP_DMA | GFP_KERNEL);
+	if (!cold_reset_cmd)
+		return -ENOMEM;
+
+	cold_reset_cmd[0] = COLD_RESET_CMD_GID;
+	cold_reset_cmd[1] = COLD_RESET_OID;
+	cold_reset_cmd[2] = COLD_RESET_CMD_PAYLOAD_LEN;
+
+	ret = nqx_standby_write(nqx_dev, cold_reset_cmd, COLD_RESET_CMD_LEN);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: write failed after max retry\n", __func__);
+	}
+	kfree(cold_reset_cmd);
+	return ret;
+}
+
+static void read_cold_reset_rsp(struct nqx_dev *nqx_dev, bool isNfcEnabled,
+				char *header)
+{
+	int ret = -1;
+	char *cold_reset_rsp = NULL;
+
+	cold_reset_rsp = kzalloc(COLD_RESET_RSP_LEN, GFP_DMA | GFP_KERNEL);
+	if (!cold_reset_rsp)
+		return;
+
+	/*
+	 * read header also if NFC is disabled
+	 * for enable case, will be taken care by nfc_read thread
+	 */
+	if (!isNfcEnabled) {
+		ret = i2c_master_recv(nqx_dev->client, cold_reset_rsp,
+					NCI_HEADER_LEN);
+		if (ret != NCI_HEADER_LEN) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: failure to read cold reset rsp header\n",
+				__func__);
+			goto error;
+		}
+	} else {
+		memcpy(cold_reset_rsp, header, NCI_HEADER_LEN);
+	}
+
+	if ((NCI_HEADER_LEN + cold_reset_rsp[2]) > COLD_RESET_RSP_LEN) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: - invalid response for cold_reset\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+	ret = i2c_master_recv(nqx_dev->client, &cold_reset_rsp[NCI_PAYLOAD_IDX],
+				cold_reset_rsp[2]);
+	if (ret != cold_reset_rsp[2]) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: failure to read cold reset rsp status\n",
+			__func__);
+		goto error;
+	}
+	nqx_dev->cold_reset_status = cold_reset_rsp[NCI_PAYLOAD_IDX];
+error:
+	kfree(cold_reset_rsp);
+}
+
 static ssize_t nfc_read(struct file *filp, char __user *buf,
 					size_t count, loff_t *offset)
 {
@@ -232,6 +313,24 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
 		ret = -EIO;
 		goto err;
 	}
+	/* check if it's response of cold reset command
+	 * NFC HAL process shouldn't receive this data as
+	 * command was sent by eSE HAL
+	 */
+	if (nqx_dev->cold_reset_rsp_pending
+		&& (tmp[0] == COLD_RESET_RSP_GID)
+		&& (tmp[1] == COLD_RESET_OID)) {
+		read_cold_reset_rsp(nqx_dev, true, tmp);
+		nqx_dev->cold_reset_rsp_pending = false;
+		wake_up_interruptible(&nqx_dev->cold_reset_read_wq);
+		mutex_unlock(&nqx_dev->read_mutex);
+		/*
+		 * NFC process doesn't know about cold reset command
+		 * being sent as it was initiated by eSE process
+		 * we shouldn't return any data to NFC process
+		 */
+		return 0;
+	}
 #ifdef NFC_KERNEL_BU
 		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
 			__func__, tmp[0], tmp[1], tmp[2]);
@@ -326,17 +425,19 @@ static int nqx_standby_write(struct nqx_dev *nqx_dev,
 	return ret;
 }
 
+
 /*
  * Power management of the SN100 eSE
  * eSE and NFCC both are powered using VEN gpio in SN100,
  * VEN HIGH - eSE and NFCC both are powered on
  * VEN LOW - eSE and NFCC both are power down
  */
+
 static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 {
 	int r = -1;
 
-	if (arg == 0) {
+	if (arg == ESE_POWER_ON) {
 		/**
 		 * Let's store the NFC VEN pin state
 		 * will check stored value in case of eSE power off request,
@@ -355,7 +456,7 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 			dev_dbg(&nqx_dev->client->dev, "en_gpio already HIGH\n");
 		}
 		r = 0;
-	} else if (arg == 1) {
+	} else if (arg == ESE_POWER_OFF) {
 		if (!nqx_dev->nfc_ven_enabled) {
 			dev_dbg(&nqx_dev->client->dev, "NFC not enabled, disabling en_gpio\n");
 			gpio_set_value(nqx_dev->en_gpio, 0);
@@ -365,7 +466,40 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
 			dev_dbg(&nqx_dev->client->dev, "keep en_gpio high as NFC is enabled\n");
 		}
 		r = 0;
-	} else if (arg == 3) {
+	} else if (arg == ESE_COLD_RESET) {
+		// set default value for status as failure
+		nqx_dev->cold_reset_status = EIO;
+
+		r = send_cold_reset_cmd(nqx_dev);
+		if (r <= 0) {
+			dev_err(&nqx_dev->client->dev,
+				"failed to send cold reset command\n");
+			return nqx_dev->cold_reset_status;
+		}
+		nqx_dev->cold_reset_rsp_pending = true;
+		// check if NFC is enabled
+		if (nqx_dev->nfc_enabled) {
+			/*
+			 * nfc_read thread will initiate cold reset response
+			 * and it will signal for data available
+			 */
+			wait_event_interruptible(nqx_dev->cold_reset_read_wq,
+				!nqx_dev->cold_reset_rsp_pending);
+		} else {
+			/*
+			 * Read data as NFC thread is not active
+			 */
+			r = is_data_available_for_read(nqx_dev);
+			if (r <= 0) {
+				nqx_disable_irq(nqx_dev);
+				nqx_dev->cold_reset_rsp_pending = false;
+				return nqx_dev->cold_reset_status;
+			}
+			read_cold_reset_rsp(nqx_dev, false, NULL);
+			nqx_dev->cold_reset_rsp_pending = false;
+		}
+		r = nqx_dev->cold_reset_status;
+	} else if (arg == ESE_POWER_STATE) {
 		// eSE power state
 		r = gpio_get_value(nqx_dev->en_gpio);
 	}
@@ -556,7 +690,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 	int r = 0;
 	struct nqx_dev *nqx_dev = filp->private_data;
 
-	if (arg == 0) {
+	if (arg == NFC_POWER_OFF) {
 		/*
 		 * We are attempting a hardware reset so let us disable
 		 * interrupts to avoid spurious notifications to upper
@@ -590,7 +724,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 				dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
 		}
 		nqx_dev->nfc_ven_enabled = false;
-	} else if (arg == 1) {
+	} else if (arg == NFC_POWER_ON) {
 		nqx_enable_irq(nqx_dev);
 		dev_dbg(&nqx_dev->client->dev,
 			"gpio_set_value enable: %s: info: %p\n",
@@ -607,7 +741,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 				dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
 		}
 		nqx_dev->nfc_ven_enabled = true;
-	} else if (arg == 2) {
+	} else if (arg == NFC_FW_DWL_VEN_TOGGLE) {
 		/*
 		 * We are switching to Dowload Mode, toggle the enable pin
 		 * in order to set the NFCC in the new mode
@@ -629,7 +763,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 		usleep_range(10000, 10100);
 		gpio_set_value(nqx_dev->en_gpio, 1);
 		usleep_range(10000, 10100);
-	} else if (arg == 4) {
+	} else if (arg == NFC_FW_DWL_HIGH) {
 		/*
 		 * Setting firmware download gpio to HIGH for SN100U
 		 * before FW download start
@@ -641,7 +775,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 		} else
 			dev_err(&nqx_dev->client->dev,
 				"firm_gpio is invalid\n");
-	} else if (arg == 6) {
+	} else if (arg == NFC_FW_DWL_LOW) {
 		/*
 		 * Setting firmware download gpio to LOW for SN100U
 		 * FW download finished
@@ -654,6 +788,16 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
 			dev_err(&nqx_dev->client->dev,
 				"firm_gpio is invalid\n");
 		}
+	} else if (arg == NFC_ENABLE) {
+		/*
+		 * Setting flag true when NFC is enabled
+		 */
+		nqx_dev->nfc_enabled = true;
+	} else if (arg == NFC_DISABLE) {
+		/*
+		 * Setting flag false when NFC is disabled
+		 */
+		nqx_dev->nfc_enabled = false;
 	} else {
 		r = -ENOIOCTLCMD;
 	}
@@ -897,9 +1041,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 		}
 		goto err_nfcc_reset_failed;
 	}
-	nqx_enable_irq(nqx_dev);
-	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
-		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+	ret = is_data_available_for_read(nqx_dev);
 	if (ret <= 0) {
 		nqx_disable_irq(nqx_dev);
 		goto err_nfcc_hw_check;
@@ -915,9 +1058,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 			goto reset_enable_gpio;
 		goto err_nfcc_hw_check;
 	}
-	nqx_enable_irq(nqx_dev);
-	ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
-		!nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+	ret = is_data_available_for_read(nqx_dev);
 	if (ret <= 0) {
 		nqx_disable_irq(nqx_dev);
 		goto err_nfcc_hw_check;
@@ -1280,6 +1422,7 @@ static int nqx_probe(struct i2c_client *client,
 
 	/* init mutex and queues */
 	init_waitqueue_head(&nqx_dev->read_wq);
+	init_waitqueue_head(&nqx_dev->cold_reset_read_wq);
 	mutex_init(&nqx_dev->read_mutex);
 	mutex_init(&nqx_dev->dev_ref_mutex);
 	spin_lock_init(&nqx_dev->irq_enabled_lock);
@@ -1362,6 +1505,8 @@ static int nqx_probe(struct i2c_client *client,
 	device_set_wakeup_capable(&client->dev, true);
 	i2c_set_clientdata(client, nqx_dev);
 	nqx_dev->irq_wake_up = false;
+	nqx_dev->cold_reset_rsp_pending = false;
+	nqx_dev->nfc_enabled = false;
 
 	dev_err(&client->dev,
 	"%s: probing NFCC NQxxx exited successfully\n",
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index 12b0737..8d807ec 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -24,12 +24,65 @@
 #define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
 #define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
 
+#define DEV_COUNT			1
+#define DEVICE_NAME			"nq-nci"
+#define CLASS_NAME			"nqx"
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define NCI_HEADER_LEN			3
+#define NCI_PAYLOAD_IDX			3
+#define MAX_RETRY_COUNT			3
+#define NCI_RESET_CMD_LEN		4
+#define NCI_RESET_RSP_LEN		4
+#define NCI_RESET_NTF_LEN		13
+#define NCI_GET_VERSION_CMD_LEN		8
+#define NCI_GET_VERSION_RSP_LEN		12
+#define MAX_IRQ_WAIT_TIME		(90)	//in ms
+#define COLD_RESET_CMD_LEN		3
+#define COLD_RESET_RSP_LEN		4
+#define COLD_RESET_CMD_GID		0x2F
+#define COLD_RESET_CMD_PAYLOAD_LEN	0x00
+#define COLD_RESET_RSP_GID		0x4F
+#define COLD_RESET_OID			0x1E
+
 #define NFC_RX_BUFFER_CNT_START		(0x0)
 #define PAYLOAD_HEADER_LENGTH		(0x3)
 #define PAYLOAD_LENGTH_MAX		(256)
 #define BYTE				(0x8)
 #define NCI_IDENTIFIER			(0x10)
 
+enum ese_ioctl_request {
+	/* eSE POWER ON */
+	ESE_POWER_ON = 0,
+	/* eSE POWER OFF */
+	ESE_POWER_OFF,
+	/* eSE COLD RESET */
+	ESE_COLD_RESET,
+	/* eSE POWER STATE */
+	ESE_POWER_STATE
+};
+
+enum nfcc_ioctl_request {
+	/* NFC disable request with VEN LOW */
+	NFC_POWER_OFF = 0,
+	/* NFC enable request with VEN Toggle */
+	NFC_POWER_ON,
+	/* firmware download request with VEN Toggle */
+	NFC_FW_DWL_VEN_TOGGLE,
+	/* ISO reset request */
+	NFC_ISO_RESET,
+	/* request for firmware download gpio HIGH */
+	NFC_FW_DWL_HIGH,
+	/* hard reset request */
+	NFC_HARD_RESET,
+	/* request for firmware download gpio LOW */
+	NFC_FW_DWL_LOW,
+	/* NFC enable without VEN gpio modification */
+	NFC_ENABLE,
+	/* NFC disable without VEN gpio modification */
+	NFC_DISABLE
+};
+
 enum nfcc_initial_core_reset_ntf {
 	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
 	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f55d082..5d6e7e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
 
 		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
 					    skb->len - 2, GFP_KERNEL);
+		if (!transaction)
+			return -ENOMEM;
 
 		transaction->aid_len = skb->data[1];
 		memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index acdce23..569475a 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -335,6 +335,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
 
 		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
 						   skb->len - 2, GFP_KERNEL);
+		if (!transaction)
+			return -ENOMEM;
 
 		transaction->aid_len = skb->data[1];
 		memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 2a9d6b0..80508da3 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1373,7 +1373,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
 	int ret;
 
 	/* Get outbound MW parameters and map it */
-	ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
+	ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
 				   &peer->outbuf_size);
 	if (ret)
 		return ret;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2ba22cd..54a633e 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
 	sector_t sector;
 
 	/* make sure device is a region */
-	if (!is_nd_pmem(dev))
+	if (!is_memory(dev))
 		return 0;
 
 	nd_region = to_nd_region(dev);
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index f9130cc..22224b2 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -42,7 +42,7 @@ static int nd_region_probe(struct device *dev)
 	if (rc)
 		return rc;
 
-	if (is_nd_pmem(&nd_region->dev)) {
+	if (is_memory(&nd_region->dev)) {
 		struct resource ndr_res;
 
 		if (devm_init_badblocks(dev, &nd_region->bb))
@@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
 		struct nd_region *nd_region = to_nd_region(dev);
 		struct resource res;
 
-		if (is_nd_pmem(&nd_region->dev)) {
+		if (is_memory(&nd_region->dev)) {
 			res.start = nd_region->ndr_start;
 			res.end = nd_region->ndr_start +
 				nd_region->ndr_size - 1;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 0303296..609fc45 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -633,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
 	if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
 		return 0;
 
-	if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
+	if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
 		return 0;
 
 	if (a == &dev_attr_resource.attr) {
-		if (is_nd_pmem(dev))
+		if (is_memory(dev))
 			return 0400;
 		else
 			return 0;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e26d119..5d0f99b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -111,10 +111,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
 	 */
 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 		return;
-	revalidate_disk(ns->disk);
 	blk_set_queue_dying(ns->queue);
 	/* Forcibly unquiesce queues to avoid blocking dispatch */
 	blk_mq_unquiesce_queue(ns->queue);
+	/*
+	 * Revalidate after unblocking dispatchers that may be holding bd_butex
+	 */
+	revalidate_disk(ns->disk);
 }
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -1183,6 +1186,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	 */
 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 		mutex_lock(&ctrl->scan_lock);
+		mutex_lock(&ctrl->subsys->lock);
+		nvme_mpath_start_freeze(ctrl->subsys);
+		nvme_mpath_wait_freeze(ctrl->subsys);
 		nvme_start_freeze(ctrl);
 		nvme_wait_freeze(ctrl);
 	}
@@ -1213,6 +1219,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
 		nvme_update_formats(ctrl);
 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 		nvme_unfreeze(ctrl);
+		nvme_mpath_unfreeze(ctrl->subsys);
+		mutex_unlock(&ctrl->subsys->lock);
 		mutex_unlock(&ctrl->scan_lock);
 	}
 	if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1557,6 +1565,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 	if (ns->head->disk) {
 		nvme_update_disk_info(ns->head->disk, ns, id);
 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+		revalidate_disk(ns->head->disk);
 	}
 #endif
 }
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 67dec88..565bddc 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -206,7 +206,7 @@ static LIST_HEAD(nvme_fc_lport_list);
 static DEFINE_IDA(nvme_fc_local_port_cnt);
 static DEFINE_IDA(nvme_fc_ctrl_cnt);
 
-
+static struct workqueue_struct *nvme_fc_wq;
 
 /*
  * These items are short-term. They will eventually be moved into
@@ -2053,7 +2053,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 	 */
 	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
 		active = atomic_xchg(&ctrl->err_work_active, 1);
-		if (!active && !schedule_work(&ctrl->err_work)) {
+		if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
 			atomic_set(&ctrl->err_work_active, 0);
 			WARN_ON(1);
 		}
@@ -3321,6 +3321,10 @@ static int __init nvme_fc_init_module(void)
 {
 	int ret;
 
+	nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+	if (!nvme_fc_wq)
+		return -ENOMEM;
+
 	/*
 	 * NOTE:
 	 * It is expected that in the future the kernel will combine
@@ -3338,7 +3342,8 @@ static int __init nvme_fc_init_module(void)
 	fc_class = class_create(THIS_MODULE, "fc");
 	if (IS_ERR(fc_class)) {
 		pr_err("couldn't register class fc\n");
-		return PTR_ERR(fc_class);
+		ret = PTR_ERR(fc_class);
+		goto out_destroy_wq;
 	}
 
 	/*
@@ -3362,6 +3367,9 @@ static int __init nvme_fc_init_module(void)
 	device_destroy(fc_class, MKDEV(0, 0));
 out_destroy_class:
 	class_destroy(fc_class);
+out_destroy_wq:
+	destroy_workqueue(nvme_fc_wq);
+
 	return ret;
 }
 
@@ -3378,6 +3386,7 @@ static void __exit nvme_fc_exit_module(void)
 
 	device_destroy(fc_class, MKDEV(0, 0));
 	class_destroy(fc_class);
+	destroy_workqueue(nvme_fc_wq);
 }
 
 module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a11e210..892ef52 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -20,6 +20,36 @@ module_param(multipath, bool, 0444);
 MODULE_PARM_DESC(multipath,
 	"turn on native support for multiple controllers per subsystem");
 
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+	struct nvme_ns_head *h;
+
+	lockdep_assert_held(&subsys->lock);
+	list_for_each_entry(h, &subsys->nsheads, entry)
+		if (h->disk)
+			blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+	struct nvme_ns_head *h;
+
+	lockdep_assert_held(&subsys->lock);
+	list_for_each_entry(h, &subsys->nsheads, entry)
+		if (h->disk)
+			blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+	struct nvme_ns_head *h;
+
+	lockdep_assert_held(&subsys->lock);
+	list_for_each_entry(h, &subsys->nsheads, entry)
+		if (h->disk)
+			blk_freeze_queue_start(h->disk->queue);
+}
+
 /*
  * If multipathing is enabled we need to always use the subsystem instance
  * number for numbering our devices to avoid conflicts between subsystems that
@@ -293,6 +323,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
 				 "failed to create id group.\n");
 	}
 
+	synchronize_srcu(&ns->head->srcu);
 	kblockd_schedule_work(&ns->head->requeue_work);
 }
 
@@ -373,14 +404,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
 	down_write(&ctrl->namespaces_rwsem);
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
-		if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
+		unsigned nsid = le32_to_cpu(desc->nsids[n]);
+
+		if (ns->head->ns_id < nsid)
 			continue;
-		nvme_update_ns_ana_state(desc, ns);
+		if (ns->head->ns_id == nsid)
+			nvme_update_ns_ana_state(desc, ns);
 		if (++n == nr_nsids)
 			break;
 	}
 	up_write(&ctrl->namespaces_rwsem);
-	WARN_ON_ONCE(n < nr_nsids);
 	return 0;
 }
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d5e29b5..2653e1f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -469,6 +469,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
 	return ctrl->ana_log_buf != NULL;
 }
 
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
 			struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
@@ -553,6 +556,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
 {
 }
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0a5d064..a64a8bc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2468,7 +2468,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
 {
 	struct nvme_dev *dev = data;
 
-	nvme_reset_ctrl_sync(&dev->ctrl);
+	flush_work(&dev->ctrl.reset_work);
 	flush_work(&dev->ctrl.scan_work);
 	nvme_put_ctrl(&dev->ctrl);
 }
@@ -2535,6 +2535,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
+	nvme_reset_ctrl(&dev->ctrl);
 	nvme_get_ctrl(&dev->ctrl);
 	async_schedule(nvme_async_probe, dev);
 
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2008fa6..a8eb878 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -68,9 +68,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
 		goto out;
 
 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
-	data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+	data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+		sectors[READ]), 1000);
 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
-	data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+	data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+		sectors[WRITE]), 1000);
 
 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -98,11 +100,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
 		if (!ns->bdev)
 			continue;
 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
-		data_units_read +=
-			part_stat_read(ns->bdev->bd_part, sectors[READ]);
+		data_units_read += DIV_ROUND_UP(
+			part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
-		data_units_written +=
-			part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+		data_units_written += DIV_ROUND_UP(
+			part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
 
 	}
 	rcu_read_unlock();
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9908082..137a27f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -678,6 +678,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
 	mutex_lock(&nvme_loop_ports_mutex);
 	list_del_init(&port->entry);
 	mutex_unlock(&nvme_loop_ports_mutex);
+
+	/*
+	 * Ensure any ctrls that are in the process of being
+	 * deleted are in fact deleted before we return
+	 * and free the port. This is to prevent active
+	 * ctrls from using a port after it's freed.
+	 */
+	flush_workqueue(nvme_delete_wq);
 }
 
 static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
index 978bacd..5a9e1e8 100644
--- a/drivers/nvmem/nvmem-sysfs.c
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -198,10 +198,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
 	if (!config->base_dev)
 		return -EINVAL;
 
-	if (nvmem->read_only)
-		nvmem->eeprom = bin_attr_ro_root_nvmem;
-	else
-		nvmem->eeprom = bin_attr_rw_root_nvmem;
+	if (nvmem->read_only) {
+		if (config->root_only)
+			nvmem->eeprom = bin_attr_ro_root_nvmem;
+		else
+			nvmem->eeprom = bin_attr_ro_nvmem;
+	} else {
+		if (config->root_only)
+			nvmem->eeprom = bin_attr_rw_root_nvmem;
+		else
+			nvmem->eeprom = bin_attr_rw_nvmem;
+	}
 	nvmem->eeprom.attr.name = "eeprom";
 	nvmem->eeprom.size = nvmem->size;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 7390fb8..29df6ab 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -160,6 +160,15 @@ struct dino_device
 	(struct dino_device *)__pdata; })
 
 
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+	struct dino_device *dino_dev;
+
+	dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+	return is_card_dino(&dino_dev->hba.dev->id);
+}
+
 /*
  * Dino Configuration Space Accessor Functions
  */
@@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
 
+#ifdef CONFIG_TULIP
+static void pci_fixup_tulip(struct pci_dev *dev)
+{
+	if (!pci_dev_is_behind_card_dino(dev))
+		return;
+	if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
+		return;
+	pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
+		pci_name(dev));
+	/* Disable this card by zeroing the PCI resources */
+	memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+	memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
+#endif /* CONFIG_TULIP */
 
 static void __init
 dino_bios_init(void)
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index cee5f2f..14a6ba4 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
 
 	ep->phy = devm_of_phy_get(dev, np, NULL);
 	if (IS_ERR(ep->phy)) {
-		if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
+		if (PTR_ERR(ep->phy) != -ENODEV)
 			return PTR_ERR(ep->phy);
 
 		ep->phy = NULL;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 3826b44..3b2ceb5 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -807,8 +807,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
 
 	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
 	if (IS_ERR(imx6_pcie->vpcie)) {
-		if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+			return PTR_ERR(imx6_pcie->vpcie);
 		imx6_pcie->vpcie = NULL;
 	}
 
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index acd5092..b57ee79 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -356,7 +356,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		dev_err(dev, "Missing *config* reg space\n");
 	}
 
-	bridge = pci_alloc_host_bridge(0);
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
 	if (!bridge)
 		return -ENOMEM;
 
@@ -367,7 +367,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 
 	ret = devm_request_pci_bus_resources(dev, &bridge->windows);
 	if (ret)
-		goto error;
+		return ret;
 
 	/* Get the I/O and memory ranges from DT */
 	resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
@@ -411,8 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 						resource_size(pp->cfg));
 		if (!pci->dbi_base) {
 			dev_err(dev, "Error with ioremap\n");
-			ret = -ENOMEM;
-			goto error;
+			return -ENOMEM;
 		}
 	}
 
@@ -423,8 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 					pp->cfg0_base, pp->cfg0_size);
 		if (!pp->va_cfg0_base) {
 			dev_err(dev, "Error with ioremap in function\n");
-			ret = -ENOMEM;
-			goto error;
+			return -ENOMEM;
 		}
 	}
 
@@ -434,8 +432,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 						pp->cfg1_size);
 		if (!pp->va_cfg1_base) {
 			dev_err(dev, "Error with ioremap\n");
-			ret = -ENOMEM;
-			goto error;
+			return -ENOMEM;
 		}
 	}
 
@@ -458,14 +455,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
 			    pp->num_vectors == 0) {
 				dev_err(dev,
 					"Invalid number of vectors\n");
-				goto error;
+				return -EINVAL;
 			}
 		}
 
 		if (!pp->ops->msi_host_init) {
 			ret = dw_pcie_allocate_domains(pp);
 			if (ret)
-				goto error;
+				return ret;
 
 			if (pp->msi_irq)
 				irq_set_chained_handler_and_data(pp->msi_irq,
@@ -474,7 +471,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 		} else {
 			ret = pp->ops->msi_host_init(pp);
 			if (ret < 0)
-				goto error;
+				return ret;
 		}
 	}
 
@@ -514,8 +511,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
 err_free_msi:
 	if (pci_msi_enabled() && !pp->ops->msi_host_init)
 		dw_pcie_free_msi(pp);
-error:
-	pci_free_host_bridge(bridge);
 	return ret;
 }
 
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 7b32e61..a348983 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
 
 	hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
 	if (IS_ERR(hipcie->vpcie)) {
-		if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+			return PTR_ERR(hipcie->vpcie);
 		hipcie->vpcie = NULL;
 	}
 
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 5352e0c..9b59929 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci,
 	return 0;
 }
 
-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
-				      struct platform_device *pdev)
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+			       struct platform_device *pdev)
 {
 	int ret;
 
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 87a8887..e292801 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1091,7 +1091,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
 	int ret;
 
-	pm_runtime_get_sync(pci->dev);
 	qcom_ep_reset_assert(pcie);
 
 	ret = pcie->ops->init(pcie);
@@ -1128,7 +1127,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
 	phy_power_off(pcie->phy);
 err_deinit:
 	pcie->ops->deinit(pcie);
-	pm_runtime_put(pci->dev);
 
 	return ret;
 }
@@ -1218,6 +1216,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_disable(dev);
+		return ret;
+	}
+
 	pci->dev = dev;
 	pci->ops = &dw_pcie_ops;
 	pp = &pci->pp;
@@ -1226,45 +1230,57 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 
 	pcie->ops = of_device_get_match_data(dev);
 
-	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
-	if (IS_ERR(pcie->reset))
-		return PTR_ERR(pcie->reset);
+	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+	if (IS_ERR(pcie->reset)) {
+		ret = PTR_ERR(pcie->reset);
+		goto err_pm_runtime_put;
+	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
 	pcie->parf = devm_ioremap_resource(dev, res);
-	if (IS_ERR(pcie->parf))
-		return PTR_ERR(pcie->parf);
+	if (IS_ERR(pcie->parf)) {
+		ret = PTR_ERR(pcie->parf);
+		goto err_pm_runtime_put;
+	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
-	if (IS_ERR(pci->dbi_base))
-		return PTR_ERR(pci->dbi_base);
+	if (IS_ERR(pci->dbi_base)) {
+		ret = PTR_ERR(pci->dbi_base);
+		goto err_pm_runtime_put;
+	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
 	pcie->elbi = devm_ioremap_resource(dev, res);
-	if (IS_ERR(pcie->elbi))
-		return PTR_ERR(pcie->elbi);
+	if (IS_ERR(pcie->elbi)) {
+		ret = PTR_ERR(pcie->elbi);
+		goto err_pm_runtime_put;
+	}
 
 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
-	if (IS_ERR(pcie->phy))
-		return PTR_ERR(pcie->phy);
+	if (IS_ERR(pcie->phy)) {
+		ret = PTR_ERR(pcie->phy);
+		goto err_pm_runtime_put;
+	}
 
 	ret = pcie->ops->get_resources(pcie);
 	if (ret)
-		return ret;
+		goto err_pm_runtime_put;
 
 	pp->ops = &qcom_pcie_dw_ops;
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
-		if (pp->msi_irq < 0)
-			return pp->msi_irq;
+		if (pp->msi_irq < 0) {
+			ret = pp->msi_irq;
+			goto err_pm_runtime_put;
+		}
 	}
 
 	ret = phy_init(pcie->phy);
 	if (ret) {
 		pm_runtime_disable(&pdev->dev);
-		return ret;
+		goto err_pm_runtime_put;
 	}
 
 	platform_set_drvdata(pdev, pcie);
@@ -1273,10 +1289,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 	if (ret) {
 		dev_err(dev, "cannot initialize host\n");
 		pm_runtime_disable(&pdev->dev);
-		return ret;
+		goto err_pm_runtime_put;
 	}
 
 	return 0;
+
+err_pm_runtime_put:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+
+	return ret;
 }
 
 static const struct of_device_id qcom_pcie_match[] = {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 5dadc96..5c28498 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2706,8 +2706,8 @@ static int hv_pci_remove(struct hv_device *hdev)
 		/* Remove the bus from PCI's point of view. */
 		pci_lock_rescan_remove();
 		pci_stop_root_bus(hbus->pci_bus);
-		pci_remove_root_bus(hbus->pci_bus);
 		hv_pci_remove_slots(hbus);
+		pci_remove_root_bus(hbus->pci_bus);
 		pci_unlock_rescan_remove();
 		hbus->state = hv_pcibus_removed;
 	}
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index 4db3c0e..12aba0c 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <soc/qcom/subsystem_notif.h>
 
 #include "../pci.h"
 
@@ -43,6 +44,7 @@
 #define PCIE20_PARF_DBI_BASE_ADDR (0x350)
 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE (0x358)
 
+#define PCIE_GEN3_PRESET_DEFAULT (0x55555555)
 #define PCIE_GEN3_SPCIE_CAP (0x0154)
 #define PCIE_GEN3_GEN2_CTRL (0x080c)
 #define PCIE_GEN3_RELATED (0x0890)
@@ -66,6 +68,10 @@
 #define PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER (0x180)
 #define PCIE20_PARF_DEBUG_INT_EN (0x190)
 
+#define PCIE20_PARF_CLKREQ_OVERRIDE (0x2b0)
+#define PCIE20_PARF_CLKREQ_IN_VALUE (BIT(3))
+#define PCIE20_PARF_CLKREQ_IN_ENABLE (BIT(1))
+
 #define PCIE20_ELBI_SYS_CTRL (0x04)
 #define PCIE20_ELBI_SYS_STTS (0x08)
 
@@ -715,6 +721,7 @@ struct msm_pcie_dev_t {
 	uint32_t phy_status_offset;
 	uint32_t phy_status_bit;
 	uint32_t phy_power_down_offset;
+	uint32_t core_preset;
 	uint32_t cpl_timeout;
 	uint32_t current_bdf;
 	uint32_t perst_delay_us_min;
@@ -1372,6 +1379,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
 		dev->phy_status_bit);
 	PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
 		dev->phy_power_down_offset);
+	PCIE_DBG_FS(dev, "core_preset: 0x%x\n",
+		dev->core_preset);
 	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
 		dev->cpl_timeout);
 	PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
@@ -1452,6 +1461,7 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
 	static void __iomem *loopback_lbar_vir;
 	int ret, i;
 	u32 base_sel_size = 0;
+	u32 wr_ofst = 0;
 
 	switch (testcase) {
 	case MSM_PCIE_OUTPUT_PCIE_INFO:
@@ -1646,22 +1656,24 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
 			break;
 		}
 
+		wr_ofst = wr_offset;
+
 		PCIE_DBG_FS(dev,
 			"base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
 			dev->res[base_sel - 1].name,
 			dev->res[base_sel - 1].base,
-			wr_offset, wr_mask, wr_value);
+			wr_ofst, wr_mask, wr_value);
 
 		base_sel_size = resource_size(dev->res[base_sel - 1].resource);
 
-		if (wr_offset >  base_sel_size - 4 ||
-			msm_pcie_check_align(dev, wr_offset))
+		if (wr_ofst >  base_sel_size - 4 ||
+			msm_pcie_check_align(dev, wr_ofst))
 			PCIE_DBG_FS(dev,
 				"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
-				dev->rc_idx, wr_offset, base_sel_size - 4);
+				dev->rc_idx, wr_ofst, base_sel_size - 4);
 		else
 			msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
-				wr_offset, wr_mask, wr_value);
+				wr_ofst, wr_mask, wr_value);
 
 		break;
 	case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
@@ -4088,7 +4100,7 @@ static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
 	msm_pcie_write_reg_field(dev->dm_core,
 		PCIE_GEN3_MISC_CONTROL, BIT(0), 1);
 	msm_pcie_write_reg(dev->dm_core,
-		PCIE_GEN3_SPCIE_CAP, 0x77777777);
+		PCIE_GEN3_SPCIE_CAP, dev->core_preset);
 	msm_pcie_write_reg_field(dev->dm_core,
 		PCIE_GEN3_MISC_CONTROL, BIT(0), 0);
 
@@ -5931,12 +5943,22 @@ static int msm_pcie_setup_drv(struct msm_pcie_dev_t *pcie_dev,
 	struct msm_pcie_drv_msg *msg;
 	struct msm_pcie_drv_tre *pkt;
 	struct msm_pcie_drv_header *hdr;
+	u32 drv_l1ss_timeout_us = 0;
+	int ret;
 
 	drv_info = devm_kzalloc(&pcie_dev->pdev->dev, sizeof(*drv_info),
 				GFP_KERNEL);
 	if (!drv_info)
 		return -ENOMEM;
 
+	ret = of_property_read_u32(of_node, "qcom,drv-l1ss-timeout-us",
+					&drv_l1ss_timeout_us);
+	if (ret)
+		drv_l1ss_timeout_us = L1SS_TIMEOUT_US;
+
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: DRV L1ss timeout: %dus\n",
+		pcie_dev->rc_idx, drv_l1ss_timeout_us);
+
 	drv_info->dev_id = pcie_dev->rc_idx;
 
 	/* cache frequent command for communication */
@@ -5951,7 +5973,7 @@ static int msm_pcie_setup_drv(struct msm_pcie_dev_t *pcie_dev,
 
 	pkt->dword[0] = MSM_PCIE_DRV_CMD_ENABLE;
 	pkt->dword[1] = hdr->dev_id;
-	pkt->dword[2] = L1SS_TIMEOUT_US / 1000;
+	pkt->dword[2] = drv_l1ss_timeout_us / 1000;
 
 	msg = &drv_info->drv_disable;
 	pkt = &msg->pkt;
@@ -6129,6 +6151,13 @@ static int msm_pcie_probe(struct platform_device *pdev)
 	PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
 		pcie_dev->rc_idx, pcie_dev->phy_power_down_offset);
 
+	pcie_dev->core_preset = PCIE_GEN3_PRESET_DEFAULT;
+	of_property_read_u32(pdev->dev.of_node,
+				"qcom,core-preset",
+				&pcie_dev->core_preset);
+	PCIE_DBG(pcie_dev, "RC%d: core-preset: 0x%x.\n",
+		pcie_dev->rc_idx, pcie_dev->core_preset);
+
 	of_property_read_u32(of_node, "qcom,cpl-timeout",
 				&pcie_dev->cpl_timeout);
 	PCIE_DBG(pcie_dev, "RC%d: cpl-timeout: 0x%x.\n",
@@ -6531,36 +6560,45 @@ static int msm_pcie_drv_rpmsg_probe(struct rpmsg_device *rpdev)
 	return 0;
 }
 
-static void msm_pcie_drv_rpmsg_remove(struct rpmsg_device *rpdev)
+static void msm_pcie_drv_notify_client(struct pcie_drv_sta *pcie_drv,
+					enum msm_pcie_event event)
 {
-	struct pcie_drv_sta *pcie_drv = dev_get_drvdata(&rpdev->dev);
 	struct msm_pcie_dev_t *pcie_dev = pcie_drv->msm_pcie_dev;
 	int i;
 
-	pcie_drv->rpdev = NULL;
-	flush_work(&pcie_drv->drv_connect);
-
 	for (i = 0; i < MAX_RC_NUM; i++, pcie_dev++) {
 		struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
 		struct msm_pcie_register_event *event_reg =
 			pcie_dev->event_reg;
 
+		PCIE_DBG(pcie_dev, "PCIe: RC%d: event %d received\n",
+			pcie_dev->rc_idx, event);
+
 		/* does not support DRV or has not been probed yet */
 		if (!drv_info)
 			continue;
 
-		if (!event_reg ||
-		    !(event_reg->events & MSM_PCIE_EVENT_DRV_DISCONNECT))
+		if (!event_reg || !(event_reg->events & event))
 			continue;
 
 		if (drv_info->ep_connected) {
-			msm_pcie_notify_client(pcie_dev,
-					       MSM_PCIE_EVENT_DRV_DISCONNECT);
-			drv_info->ep_connected = false;
+			msm_pcie_notify_client(pcie_dev, event);
+			if (event & MSM_PCIE_EVENT_DRV_DISCONNECT)
+				drv_info->ep_connected = false;
 		}
 	}
 }
 
+static void msm_pcie_drv_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+	struct pcie_drv_sta *pcie_drv = dev_get_drvdata(&rpdev->dev);
+
+	pcie_drv->rpdev = NULL;
+	flush_work(&pcie_drv->drv_connect);
+
+	msm_pcie_drv_notify_client(pcie_drv, MSM_PCIE_EVENT_DRV_DISCONNECT);
+}
+
 static int msm_pcie_drv_rpmsg_cb(struct rpmsg_device *rpdev, void *data,
 				int len, void *priv, u32 src)
 {
@@ -6667,6 +6705,15 @@ static struct rpmsg_driver msm_pcie_drv_rpmsg_driver = {
 	},
 };
 
+static void msm_pcie_early_notifier(void *data)
+{
+	struct pcie_drv_sta *pcie_drv = data;
+
+	pcie_drv->rpdev = NULL;
+
+	msm_pcie_drv_notify_client(pcie_drv, MSM_PCIE_EVENT_WAKEUP);
+};
+
 static void msm_pcie_drv_connect_worker(struct work_struct *work)
 {
 	struct pcie_drv_sta *pcie_drv = container_of(work, struct pcie_drv_sta,
@@ -6699,6 +6746,9 @@ static void msm_pcie_drv_connect_worker(struct work_struct *work)
 				       MSM_PCIE_EVENT_DRV_CONNECT);
 		drv_info->ep_connected = true;
 	}
+
+	subsys_register_early_notifier("adsp", PCIE_DRV_LAYER_NOTIF,
+				msm_pcie_early_notifier, pcie_drv);
 }
 
 static int __init pcie_init(void)
@@ -7139,6 +7189,14 @@ static int msm_pcie_drv_resume(struct msm_pcie_dev_t *pcie_dev)
 		}
 	}
 
+	/* always ungate clkreq */
+	msm_pcie_write_reg_field(pcie_dev->parf,
+				PCIE20_PARF_CLKREQ_OVERRIDE,
+				PCIE20_PARF_CLKREQ_IN_ENABLE, 0);
+	msm_pcie_write_reg_field(pcie_dev->parf,
+				PCIE20_PARF_CLKREQ_OVERRIDE,
+				PCIE20_PARF_CLKREQ_IN_VALUE, 0);
+
 	pcie_dev->user_suspend = false;
 	spin_lock_irq(&pcie_dev->cfg_lock);
 	pcie_dev->cfg_access = true;
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index f4f53d0..976eaa9 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1975,14 +1975,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 		err = of_pci_get_devfn(port);
 		if (err < 0) {
 			dev_err(dev, "failed to parse address: %d\n", err);
-			return err;
+			goto err_node_put;
 		}
 
 		index = PCI_SLOT(err);
 
 		if (index < 1 || index > soc->num_ports) {
 			dev_err(dev, "invalid port number: %d\n", index);
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_node_put;
 		}
 
 		index--;
@@ -1991,12 +1992,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 		if (err < 0) {
 			dev_err(dev, "failed to parse # of lanes: %d\n",
 				err);
-			return err;
+			goto err_node_put;
 		}
 
 		if (value > 16) {
 			dev_err(dev, "invalid # of lanes: %u\n", value);
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_node_put;
 		}
 
 		lanes |= value << (index << 3);
@@ -2010,13 +2012,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 		lane += value;
 
 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
-		if (!rp)
-			return -ENOMEM;
+		if (!rp) {
+			err = -ENOMEM;
+			goto err_node_put;
+		}
 
 		err = of_address_to_resource(port, 0, &rp->regs);
 		if (err < 0) {
 			dev_err(dev, "failed to parse address: %d\n", err);
-			return err;
+			goto err_node_put;
 		}
 
 		INIT_LIST_HEAD(&rp->list);
@@ -2043,6 +2047,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 		return err;
 
 	return 0;
+
+err_node_put:
+	of_node_put(port);
+	return err;
 }
 
 /*
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 1372d27..5ce8e63 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
 
 	rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
 	if (IS_ERR(rockchip->vpcie12v)) {
-		if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie12v);
 		dev_info(dev, "no vpcie12v regulator found\n");
 	}
 
 	rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
 	if (IS_ERR(rockchip->vpcie3v3)) {
-		if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie3v3);
 		dev_info(dev, "no vpcie3v3 regulator found\n");
 	}
 
 	rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
 	if (IS_ERR(rockchip->vpcie1v8)) {
-		if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie1v8);
 		dev_info(dev, "no vpcie1v8 regulator found\n");
 	}
 
 	rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
 	if (IS_ERR(rockchip->vpcie0v9)) {
-		if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie0v9);
 		dev_info(dev, "no vpcie0v9 regulator found\n");
 	}
 
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index fd2dbd7..65eaa6b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -31,6 +31,9 @@
 #define PCI_REG_VMLOCK		0x70
 #define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
 
+#define MB2_SHADOW_OFFSET	0x2000
+#define MB2_SHADOW_SIZE		16
+
 enum vmd_features {
 	/*
 	 * Device may contain registers which hint the physical location of the
@@ -94,6 +97,7 @@ struct vmd_dev {
 	struct resource		resources[3];
 	struct irq_domain	*irq_domain;
 	struct pci_bus		*bus;
+	u8			busn_start;
 
 #ifdef CONFIG_X86_DEV_DMA_OPS
 	struct dma_map_ops	dma_ops;
@@ -465,7 +469,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 				  unsigned int devfn, int reg, int len)
 {
 	char __iomem *addr = vmd->cfgbar +
-			     (bus->number << 20) + (devfn << 12) + reg;
+			     ((bus->number - vmd->busn_start) << 20) +
+			     (devfn << 12) + reg;
 
 	if ((addr - vmd->cfgbar) + len >=
 	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
@@ -588,7 +593,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	unsigned long flags;
 	LIST_HEAD(resources);
 	resource_size_t offset[2] = {0};
-	resource_size_t membar2_offset = 0x2000, busn_start = 0;
+	resource_size_t membar2_offset = 0x2000;
 
 	/*
 	 * Shadow registers may exist in certain VMD device ids which allow
@@ -600,7 +605,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		u32 vmlock;
 		int ret;
 
-		membar2_offset = 0x2018;
+		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
 		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
 		if (ret || vmlock == ~0)
 			return -ENODEV;
@@ -612,9 +617,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 			if (!membar2)
 				return -ENOMEM;
 			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
-						readq(membar2 + 0x2008);
+					readq(membar2 + MB2_SHADOW_OFFSET);
 			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
-						readq(membar2 + 0x2010);
+					readq(membar2 + MB2_SHADOW_OFFSET + 8);
 			pci_iounmap(vmd->dev, membar2);
 		}
 	}
@@ -630,14 +635,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
 		if (BUS_RESTRICT_CAP(vmcap) &&
 		    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
-			busn_start = 128;
+			vmd->busn_start = 128;
 	}
 
 	res = &vmd->dev->resource[VMD_CFGBAR];
 	vmd->resources[0] = (struct resource) {
 		.name  = "VMD CFGBAR",
-		.start = busn_start,
-		.end   = busn_start + (resource_size(res) >> 20) - 1,
+		.start = vmd->busn_start,
+		.end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 	};
 
@@ -705,8 +710,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
 	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
 
-	vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
-				       sd, &resources);
+	vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+					&vmd_ops, sd, &resources);
 	if (!vmd->bus) {
 		pci_free_resource_list(&resources);
 		irq_domain_remove(vmd->irq_domain);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 857c358..cc860c5 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
 	struct of_drc_info drc;
 	const __be32 *value;
 	char cell_drc_name[MAX_DRC_NAME_LEN];
-	int j, fndit;
+	int j;
 
 	info = of_find_property(dn->parent, "ibm,drc-info", NULL);
 	if (info == NULL)
@@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
 
 		/* Should now know end of current entry */
 
-		if (my_index > drc.last_drc_index)
-			continue;
-
-		fndit = 1;
-		break;
+		/* Found it */
+		if (my_index <= drc.last_drc_index) {
+			sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
+				my_index);
+			break;
+		}
 	}
-	/* Found it */
-
-	if (fndit)
-		sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, 
-			my_index);
 
 	if (((drc_name == NULL) ||
 	     (drc_name && !strcmp(drc_name, cell_drc_name))) &&
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f06ca58..54a8b23 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
 #ifdef CONFIG_PCI_IOV
 static inline bool pci_device_can_probe(struct pci_dev *pdev)
 {
-	return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+	return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
+		pdev->driver_override);
 }
 #else
 static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -908,6 +909,9 @@ static int pci_pm_resume(struct device *dev)
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int error = 0;
 
+	if (pci_dev->no_d3hot)
+		goto skip_pci_pm_restore;
+
 	/*
 	 * This is necessary for the suspend error path in which resume is
 	 * called without restoring the standard config registers of the device.
@@ -915,6 +919,7 @@ static int pci_pm_resume(struct device *dev)
 	if (pci_dev->state_saved)
 		pci_restore_standard_config(pci_dev);
 
+skip_pci_pm_restore:
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume(dev);
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c654653..2baf1f8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -926,19 +926,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 }
 
 /**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
-	if (platform_pci_power_manageable(dev))
-		platform_pci_set_power_state(dev, PCI_D0);
-
-	pci_raw_set_power_state(dev, PCI_D0);
-	pci_update_current_state(dev, PCI_D0);
-}
-
-/**
  * pci_platform_power_transition - Use platform to change device power state
  * @dev: PCI device to handle.
  * @state: State to put the device into.
@@ -1117,6 +1104,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 EXPORT_SYMBOL(pci_set_power_state);
 
 /**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+	__pci_start_power_transition(dev, PCI_D0);
+	pci_raw_set_power_state(dev, PCI_D0);
+	pci_update_current_state(dev, PCI_D0);
+}
+
+/**
  * pci_choose_state - Choose the power state of a PCI device
  * @dev: PCI device to be suspended
  * @state: target sleep state for the whole system. This is the value
@@ -1366,7 +1364,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
 		res = pdev->resource + bar_idx;
-		size = order_base_2((resource_size(res) >> 20) | 1) - 1;
+		size = ilog2(resource_size(res)) - 20;
 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 28c64f84..06be529 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5082,59 +5082,95 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
 	pci_iounmap(pdev, mmio);
 	pci_disable_device(pdev);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
-			quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
-			quirk_switchtec_ntb_dma_alias);
+#define SWITCHTEC_QUIRK(vid) \
+	DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
+				quirk_switchtec_ntb_dma_alias)
+
+SWITCHTEC_QUIRK(0x8531);  /* PFX 24xG3 */
+SWITCHTEC_QUIRK(0x8532);  /* PFX 32xG3 */
+SWITCHTEC_QUIRK(0x8533);  /* PFX 48xG3 */
+SWITCHTEC_QUIRK(0x8534);  /* PFX 64xG3 */
+SWITCHTEC_QUIRK(0x8535);  /* PFX 80xG3 */
+SWITCHTEC_QUIRK(0x8536);  /* PFX 96xG3 */
+SWITCHTEC_QUIRK(0x8541);  /* PSX 24xG3 */
+SWITCHTEC_QUIRK(0x8542);  /* PSX 32xG3 */
+SWITCHTEC_QUIRK(0x8543);  /* PSX 48xG3 */
+SWITCHTEC_QUIRK(0x8544);  /* PSX 64xG3 */
+SWITCHTEC_QUIRK(0x8545);  /* PSX 80xG3 */
+SWITCHTEC_QUIRK(0x8546);  /* PSX 96xG3 */
+SWITCHTEC_QUIRK(0x8551);  /* PAX 24XG3 */
+SWITCHTEC_QUIRK(0x8552);  /* PAX 32XG3 */
+SWITCHTEC_QUIRK(0x8553);  /* PAX 48XG3 */
+SWITCHTEC_QUIRK(0x8554);  /* PAX 64XG3 */
+SWITCHTEC_QUIRK(0x8555);  /* PAX 80XG3 */
+SWITCHTEC_QUIRK(0x8556);  /* PAX 96XG3 */
+SWITCHTEC_QUIRK(0x8561);  /* PFXL 24XG3 */
+SWITCHTEC_QUIRK(0x8562);  /* PFXL 32XG3 */
+SWITCHTEC_QUIRK(0x8563);  /* PFXL 48XG3 */
+SWITCHTEC_QUIRK(0x8564);  /* PFXL 64XG3 */
+SWITCHTEC_QUIRK(0x8565);  /* PFXL 80XG3 */
+SWITCHTEC_QUIRK(0x8566);  /* PFXL 96XG3 */
+SWITCHTEC_QUIRK(0x8571);  /* PFXI 24XG3 */
+SWITCHTEC_QUIRK(0x8572);  /* PFXI 32XG3 */
+SWITCHTEC_QUIRK(0x8573);  /* PFXI 48XG3 */
+SWITCHTEC_QUIRK(0x8574);  /* PFXI 64XG3 */
+SWITCHTEC_QUIRK(0x8575);  /* PFXI 80XG3 */
+SWITCHTEC_QUIRK(0x8576);  /* PFXI 96XG3 */
+
+/*
+ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+ * not always reset the secondary Nvidia GPU between reboots if the system
+ * is configured to use Hybrid Graphics mode.  This results in the GPU
+ * being left in whatever state it was in during the *previous* boot, which
+ * causes spurious interrupts from the GPU, which in turn causes us to
+ * disable the wrong IRQ and end up breaking the touchpad.  Unsurprisingly,
+ * this also completely breaks nouveau.
+ *
+ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
+ * clean state and fixes all these issues.
+ *
+ * When the machine is configured in Dedicated display mode, the issue
+ * doesn't occur.  Fortunately the GPU advertises NoReset+ when in this
+ * mode, so we can detect that and avoid resetting it.
+ */
+static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
+{
+	void __iomem *map;
+	int ret;
+
+	if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
+	    pdev->subsystem_device != 0x222e ||
+	    !pdev->reset_fn)
+		return;
+
+	if (pci_enable_device_mem(pdev))
+		return;
+
+	/*
+	 * Based on nvkm_device_ctor() in
+	 * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+	 */
+	map = pci_iomap(pdev, 0, 0x23000);
+	if (!map) {
+		pci_err(pdev, "Can't map MMIO space\n");
+		goto out_disable;
+	}
+
+	/*
+	 * Make sure the GPU looks like it's been POSTed before resetting
+	 * it.
+	 */
+	if (ioread32(map + 0x2240c) & 0x2) {
+		pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
+		ret = pci_reset_bus(pdev);
+		if (ret < 0)
+			pci_err(pdev, "Failed to reset GPU: %d\n", ret);
+	}
+
+	iounmap(map);
+out_disable:
+	pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+			      PCI_CLASS_DISPLAY_VGA, 8,
+			      quirk_reset_lenovo_thinkpad_p50_nvgpu);
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 62b15b1..9ee110c 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -8,5 +8,6 @@
 obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qrbtc-sdm845.o
 obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4.o
 obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4-lito.o
+obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qmp-v3-660.o
 obj-$(CONFIG_PHY_QCOM_USB_HS) 		+= phy-qcom-usb-hs.o
 obj-$(CONFIG_PHY_QCOM_USB_HSIC) 	+= phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 6bb62b3..50502de8 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, 2019 Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,8 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
 
 #define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
 ({ \
@@ -80,6 +82,12 @@ struct ufs_qcom_phy_vreg {
 	bool is_always_on;
 };
 
+struct ufs_qcom_phy_rpmh_rsc {
+	const char *qphy_rsc_name;
+	u32 qphy_rsc_addr;
+	bool enabled;
+};
+
 struct ufs_qcom_phy {
 	struct list_head list;
 	struct device *dev;
@@ -97,6 +105,7 @@ struct ufs_qcom_phy {
 	struct ufs_qcom_phy_vreg vdda_pll;
 	struct ufs_qcom_phy_vreg vdda_phy;
 	struct ufs_qcom_phy_vreg vddp_ref_clk;
+	struct ufs_qcom_phy_rpmh_rsc rpmh_rsc;
 
 	/* Number of lanes available (1 or 2) for Rx/Tx */
 	u32 lanes_per_direction;
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
new file mode 100644
index 0000000..b3c4271
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3-660.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3_660"
+
+static
+int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B, bool is_g4)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	if ((major == 0x3) && (minor == 0x001) && (step >= 0x001)) {
+		tbl_A = phy_cal_table_rate_A_3_1_1;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3_660 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_660_exit(struct phy *generic_phy)
+{
+	return 0;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+						   u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+						bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* Make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_660_start_serdes(
+					struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_660_is_pcs_ready(
+				struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+	return err;
+}
+
+static void ufs_qcom_phy_qmp_v3_660_dbg_register_dump(
+					struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE, RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE, TX_SIZE,
+					"PHY TX0 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_660_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_660_init,
+	.exit		= ufs_qcom_phy_qmp_v3_660_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_660_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_660_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_660_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qmp_v3_660_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_660_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_660_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_660_power_control,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v3_660_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v3_660_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3_660 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_660_phy_ops,
+				&phy_v3_660_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_660_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3-660"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_660_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_660_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_660_probe,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_660_of_match,
+		.name = "ufs_qcom_phy_qmp_v3_660",
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_660_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3 660");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
new file mode 100644
index 0000000..e7ff88b
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2016,2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_660_H_
+#define UFS_QCOM_PHY_QMP_V3_660_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_OFF(x)	(COM_BASE + x)
+#define COM_SIZE	0x1C0
+
+#define TX_BASE		0x400
+#define TX_OFF(x)	(TX_BASE + x)
+#define TX_SIZE		0x128
+
+#define RX_BASE		0x600
+#define RX_OFF(x)	(RX_BASE + x)
+#define RX_SIZE		0x1FC
+
+#define PHY_BASE	0xC00
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define PHY_SIZE	0x1B4
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0X4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0X50)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0X54)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0X58)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0X5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0X60)
+#define QSERDES_COM_CMD_RSVD0			COM_OFF(0x64)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL	COM_OFF(0x68)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x6C)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x74)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
+#define QSERDES_COM_CMN_RSVD1			COM_OFF(0x80)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x88)
+#define QSERDES_COM_CMN_RSVD2			COM_OFF(0x8C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
+#define QSERDES_COM_CMN_RSVD3			COM_OFF(0x98)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x9C)
+#define QSERDES_COM_PHASE_SEL_CTRL		COM_OFF(0xA0)
+#define QSERDES_COM_PHASE_SEL_DC		COM_OFF(0xA4)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0xA8)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0xB0)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0xB8)
+#define QSERDES_COM_RESTRIM_CTRL		COM_OFF(0xBC)
+#define QSERDES_COM_RESTRIM_CTRL2		COM_OFF(0xC0)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xD4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xD8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0xF8)
+#define QSERDES_COM_CMN_RSVD4			COM_OFF(0xFC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0x100)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2		COM_OFF(0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
+#define QSERDES_COM_SAR				COM_OFF(0x14C)
+#define QSERDES_COM_SAR_CLK			COM_OFF(0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS		COM_OFF(0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS	COM_OFF(0x158)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x15C)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x16C)
+#define QSERDES_COM_BG_CTRL			COM_OFF(0x170)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x17C)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x180)
+#define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x190)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x1B4)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
+#define QSERDES_COM_CMN_RSVD5			COM_OFF(0x1C0)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x140)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN	TX_OFF(0x68)
+#define	QSERDES_TX_LANE_MODE				TX_OFF(0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0x3C)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0x48)
+#define QSERDES_RX_RX_TERM_BW			RX_OFF(0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB		RX_OFF(0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB		RX_OFF(0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2	RX_OFF(0xD8)
+#define QSERDES_RX_SIGDET_CNTRL			RX_OFF(0x114)
+#define QSERDES_RX_SIGDET_LVL			RX_OFF(0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0x12C)
+
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 660 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3_660 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
index a11b552..5b50f77 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
@@ -76,6 +76,34 @@ static int ufs_qcom_phy_qmp_v4_lito_exit(struct phy *generic_phy)
 	return 0;
 }
 
+static inline
+void ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(struct ufs_qcom_phy *phy,
+						bool enable)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
+	if (enable)
+		temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	else
+		temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	writel_relaxed(temp, phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
+
+	if (phy->lanes_per_direction == 1)
+		goto out;
+
+	temp = readl_relaxed(phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
+	if (enable)
+		temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	else
+		temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
+	writel_relaxed(temp, phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
+
+out:
+	/* ensure register value is committed */
+	mb();
+}
+
 static
 void ufs_qcom_phy_qmp_v4_lito_power_control(struct ufs_qcom_phy *phy,
 					 bool power_ctrl)
@@ -88,7 +116,9 @@ void ufs_qcom_phy_qmp_v4_lito_power_control(struct ufs_qcom_phy *phy,
 		 * powered OFF.
 		 */
 		mb();
+		ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, true);
 	} else {
+		ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, false);
 		/* bring PHY out of analog power collapse */
 		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
index de03667..f29c0f4 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -87,6 +87,9 @@
 #define QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1	TX_OFF(0, 0x170)
 #define QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1	TX_OFF(0, 0x174)
 #define QSERDES_TX0_LANE_MODE_1			TX_OFF(0, 0x84)
+#define QSERDES_TX0_LANE_MODE_3			TX_OFF(0, 0x8C)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX	TX_OFF(0, 0x3C)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_RX	TX_OFF(0, 0x40)
 #define QSERDES_TX0_TRAN_DRVR_EMP_EN		TX_OFF(0, 0xC0)
 
 #define QSERDES_TX1_PWM_GEAR_1_DIVIDER_BAND0_1	TX_OFF(1, 0x168)
@@ -94,6 +97,9 @@
 #define QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1	TX_OFF(1, 0x170)
 #define QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1	TX_OFF(1, 0x174)
 #define QSERDES_TX1_LANE_MODE_1			TX_OFF(1, 0x84)
+#define QSERDES_TX1_LANE_MODE_3			TX_OFF(1, 0x8C)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_TX	TX_OFF(1, 0x3C)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_RX	TX_OFF(1, 0x40)
 #define QSERDES_TX1_TRAN_DRVR_EMP_EN		TX_OFF(1, 0xC0)
 
 /* UFS PHY RX registers */
@@ -132,10 +138,13 @@
 #define QSERDES_RX0_RX_MODE_10_HIGH3			RX_OFF(0, 0x190)
 #define QSERDES_RX0_RX_MODE_10_HIGH4			RX_OFF(0, 0x194)
 #define QSERDES_RX0_DCC_CTRL1				RX_OFF(0, 0x1A8)
+#define QSERDES_RX0_VGA_CAL_CNTRL2			RX_OFF(0, 0xD8)
+
 #define QSERDES_RX0_GM_CAL				RX_OFF(0, 0xDC)
 #define QSERDES_RX0_AC_JTAG_ENABLE			RX_OFF(0, 0x68)
 #define QSERDES_RX0_UCDR_FO_GAIN			RX_OFF(0, 0x08)
 #define QSERDES_RX0_UCDR_SO_GAIN			RX_OFF(0, 0x14)
+#define QSERDES_RX0_RX_INTERFACE_MODE			RX_OFF(0, 0x134)
 
 #define QSERDES_RX1_SIGDET_LVL				RX_OFF(1, 0x120)
 #define QSERDES_RX1_SIGDET_CNTRL			RX_OFF(1, 0x11C)
@@ -172,12 +181,15 @@
 #define QSERDES_RX1_RX_MODE_10_HIGH3			RX_OFF(1, 0x190)
 #define QSERDES_RX1_RX_MODE_10_HIGH4			RX_OFF(1, 0x194)
 #define QSERDES_RX1_DCC_CTRL1				RX_OFF(1, 0x1A8)
+#define QSERDES_RX1_VGA_CAL_CNTRL2			RX_OFF(1, 0xD8)
 #define QSERDES_RX1_GM_CAL				RX_OFF(1, 0xDC)
 #define QSERDES_RX1_AC_JTAG_ENABLE			RX_OFF(1, 0x68)
 #define QSERDES_RX1_UCDR_FO_GAIN			RX_OFF(1, 0x08)
 #define QSERDES_RX1_UCDR_SO_GAIN			RX_OFF(1, 0x14)
+#define QSERDES_RX1_RX_INTERFACE_MODE			RX_OFF(1, 0x134)
 
 #define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+#define QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT	BIT(5)
 
 /*
  * This structure represents the v4 lito specific phy.
@@ -221,7 +233,10 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x35),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0xF5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_3, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_RX, 0x09),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_TRAN_DRVR_EMP_EN, 0x0C),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
@@ -232,7 +247,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CTRL2, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0C),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_GAIN, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x1B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
@@ -242,11 +257,11 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_MEASURE_TIME, 0x10),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_LOW, 0xC0),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_HIGH, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x6D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x6D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0xED),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH3, 0x3B),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x3C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x64),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x64),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH3, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x1F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_LOW, 0xE0),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH, 0xC8),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH2, 0xC8),
@@ -258,6 +273,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH3, 0x3B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH4, 0xB1),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_DCC_CTRL1, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_VGA_CAL_CNTRL2, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -279,7 +295,10 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x35),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0xF5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_3, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_TX, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_RX, 0x09),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_TRAN_DRVR_EMP_EN, 0x0C),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
@@ -290,7 +309,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CTRL2, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0C),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_GAIN, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x1B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
@@ -300,11 +319,11 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_MEASURE_TIME, 0x10),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_LOW, 0xC0),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_HIGH, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x6D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x6D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0xED),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH3, 0x3B),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x3C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x64),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x64),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH3, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x1F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_LOW, 0xE0),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH, 0xC8),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH2, 0xC8),
@@ -316,6 +335,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH3, 0x3B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH4, 0xB1),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_DCC_CTRL1, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_VGA_CAL_CNTRL2, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
 };
 
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index d7de831..a15d375 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, 2019 Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -146,6 +146,20 @@ struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
 		common_cfg->lanes_per_direction =
 			UFS_PHY_DEFAULT_LANES_PER_DIRECTION;
 
+	if (of_property_read_string(dev->of_node, "qcom,rpmh-resource-name",
+			     &common_cfg->rpmh_rsc.qphy_rsc_name))
+		dev_dbg(dev, "%s rpmh-resource-name missing in DT node or n/a\n",
+				__func__);
+
+	if (common_cfg->rpmh_rsc.qphy_rsc_name) {
+		err = cmd_db_ready();
+		if (err) {
+			dev_err(dev, "%s: Command DB not ready, err: %d\n",
+				__func__, err);
+			goto out;
+		}
+	}
+
 	/*
 	 * UFS PHY power management is managed by its parent (UFS host
 	 * controller) hence set the no the no runtime PM callbacks flag
@@ -316,6 +330,15 @@ int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
 	ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
 		"vddp-ref-clk");
 
+	if (phy_common->rpmh_rsc.qphy_rsc_name) {
+		phy_common->rpmh_rsc.qphy_rsc_addr =
+			cmd_db_read_addr(phy_common->rpmh_rsc.qphy_rsc_name);
+		if (!phy_common->rpmh_rsc.qphy_rsc_addr) {
+			dev_err(phy_common->dev, "%s: Invalid rpmh resource address\n",
+					__func__);
+			err = EINVAL;
+		}
+	}
 out:
 	return err;
 }
@@ -526,6 +549,31 @@ static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
 	}
 }
 
+static int ufs_qcom_phy_setup_rpmh_rsc(struct device *dev,
+				struct ufs_qcom_phy_rpmh_rsc *rpmh_rsc,
+				bool on)
+{
+	struct tcs_cmd cmd = {0};
+	int err = 0;
+
+	if (!rpmh_rsc->qphy_rsc_addr)
+		goto out;
+
+	if (rpmh_rsc->enabled == on)
+		goto out;
+
+	cmd.addr = rpmh_rsc->qphy_rsc_addr;
+	cmd.data = on;
+	cmd.wait = true;
+
+	err = rpmh_write_async(dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
+	if (!err)
+		rpmh_rsc->enabled = on;
+
+out:
+	return err;
+}
+
 #define UFS_REF_CLK_EN	(1 << 5)
 
 static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
@@ -723,6 +771,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
 	if (phy_common->is_powered_on)
 		return 0;
 
+	err = ufs_qcom_phy_setup_rpmh_rsc(dev, &phy_common->rpmh_rsc, 1);
+	if (err) {
+		dev_err(dev, "%s enable rpmh resource failed, err=%d\n",
+			__func__, err);
+		goto out;
+	}
+
 	err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
 	if (err) {
 		dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
@@ -798,6 +853,10 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
 
 	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
 	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
+
+	ufs_qcom_phy_setup_rpmh_rsc(phy_common->dev,
+				&phy_common->rpmh_rsc, 0);
+
 	phy_common->is_powered_on = false;
 
 	return 0;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index fb8f05e..6fb2b69 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -66,6 +66,7 @@
 					 USB2_OBINT_IDDIGCHG)
 
 /* VBCTRL */
+#define USB2_VBCTRL_OCCLREN		BIT(16)
 #define USB2_VBCTRL_DRVVBUSSEL		BIT(8)
 
 /* LINECTRL1 */
@@ -289,6 +290,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
 	u32 val;
 
 	val = readl(usb2_base + USB2_VBCTRL);
+	val &= ~USB2_VBCTRL_OCCLREN;
 	writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
 	writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
 	val = readl(usb2_base + USB2_OBINTEN);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index b7e272d..227646e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1524,7 +1524,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1532,7 +1531,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1540,7 +1538,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1548,7 +1545,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{}
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 4edeb4c..c4c70dc 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -198,8 +198,8 @@ static const unsigned int uart_rts_b_pins[]	= { GPIODV_27 };
 
 static const unsigned int uart_tx_c_pins[]	= { GPIOY_13 };
 static const unsigned int uart_rx_c_pins[]	= { GPIOY_14 };
-static const unsigned int uart_cts_c_pins[]	= { GPIOX_11 };
-static const unsigned int uart_rts_c_pins[]	= { GPIOX_12 };
+static const unsigned int uart_cts_c_pins[]	= { GPIOY_11 };
+static const unsigned int uart_rts_c_pins[]	= { GPIOY_12 };
 
 static const unsigned int i2c_sck_a_pins[]	= { GPIODV_25 };
 static const unsigned int i2c_sda_a_pins[]	= { GPIODV_24 };
@@ -445,10 +445,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
 	GROUP(pwm_f_x,		3,	18),
 
 	/* Bank Y */
-	GROUP(uart_cts_c,	1,	19),
-	GROUP(uart_rts_c,	1,	18),
-	GROUP(uart_tx_c,	1,	17),
-	GROUP(uart_rx_c,	1,	16),
+	GROUP(uart_cts_c,	1,	17),
+	GROUP(uart_rts_c,	1,	16),
+	GROUP(uart_tx_c,	1,	19),
+	GROUP(uart_rx_c,	1,	18),
 	GROUP(pwm_a_y,		1,	21),
 	GROUP(pwm_f_y,		1,	20),
 	GROUP(i2s_out_ch23_y,	1,	5),
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa48b3f..3aac640 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
 	PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
 		      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
 		      18, 2, "gpio", "uart"),
-	PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-	PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-	PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-	PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+	PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+	PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+	PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+	PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -218,11 +218,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-					  unsigned int offset)
+					  unsigned int *offset)
 {
 	/* We never have more than 2 registers */
-	if (offset >= GPIO_PER_REG) {
-		offset -= GPIO_PER_REG;
+	if (*offset >= GPIO_PER_REG) {
+		*offset -= GPIO_PER_REG;
 		*reg += sizeof(u32);
 	}
 }
@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
 	int offset = irqd_to_hwirq(d);
 
-	armada_37xx_update_reg(reg, offset);
+	armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int val, mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 	regmap_read(info->regmap, reg, &val);
 
@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
 	unsigned int reg = OUTPUT_EN;
 	unsigned int mask, val, ret;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
 	unsigned int reg = INPUT_VAL;
 	unsigned int val, mask;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 
 	regmap_read(info->regmap, reg, &val);
@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
 	unsigned int reg = OUTPUT_VAL;
 	unsigned int mask, val;
 
-	armada_37xx_update_reg(&reg, offset);
+	armada_37xx_update_reg(&reg, &offset);
 	mask = BIT(offset);
 	val = value ? mask : 0;
 
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1425c28..cd7a5d9 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -569,15 +569,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
 			    !(regval & BIT(INTERRUPT_MASK_OFF)))
 				continue;
 			irq = irq_find_mapping(gc->irq.domain, irqnr + i);
-			generic_handle_irq(irq);
+			if (irq != 0)
+				generic_handle_irq(irq);
 
 			/* Clear interrupt.
 			 * We must read the pin register again, in case the
 			 * value was changed while executing
 			 * generic_handle_irq() above.
+			 * If we didn't find a mapping for the interrupt,
+			 * disable it in order to avoid a system hang caused
+			 * by an interrupt storm.
 			 */
 			raw_spin_lock_irqsave(&gpio_dev->lock, flags);
 			regval = readl(regs + i);
+			if (irq == 0) {
+				regval &= ~BIT(INTERRUPT_ENABLE_OFF);
+				dev_dbg(&gpio_dev->pdev->dev,
+					"Disabling spurious GPIO IRQ %d\n",
+					irqnr + i);
+			}
 			writel(regval, regs + i);
 			raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 			ret = IRQ_HANDLED;
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 5193808..22eb7fe 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -174,6 +174,15 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc LITO platform.
 
+config PINCTRL_LAGOON
+	tristate "Qualcomm Technologies Inc LAGOON pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc LAGOON platform.
+
 config PINCTRL_BENGAL
 	tristate "Qualcomm Technologies Inc BENGAL pin controller driver"
 	depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 3fb4f48..e6e4de9 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -23,3 +23,4 @@
 obj-$(CONFIG_PINCTRL_KONA) += pinctrl-kona.o
 obj-$(CONFIG_PINCTRL_LITO) += pinctrl-lito.o
 obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o
+obj-$(CONFIG_PINCTRL_LAGOON) += pinctrl-lagoon.o
diff --git a/drivers/pinctrl/qcom/pinctrl-bengal.c b/drivers/pinctrl/qcom/pinctrl-bengal.c
index 7b70942..a93ddca 100644
--- a/drivers/pinctrl/qcom/pinctrl-bengal.c
+++ b/drivers/pinctrl/qcom/pinctrl-bengal.c
@@ -1,15 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
@@ -32,7 +23,7 @@
 #define EAST	0x00900000
 #define DUMMY	0x0
 #define REG_SIZE 0x1000
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit)	\
 	{						\
 		.name = "gpio" #id,			\
 		.pins = gpio##id##_pins,		\
@@ -71,6 +62,8 @@
 		.intr_polarity_bit = 1,		\
 		.intr_detection_bit = 2,	\
 		.intr_detection_width = 2,	\
+		.wake_reg = base + wake_off,	\
+		.wake_bit = bit,		\
 	}
 
 #define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
@@ -372,14 +365,14 @@ static const unsigned int sdc2_data_pins[] = { 119 };
 static const unsigned int ufs_reset_pins[] = { 120 };
 
 enum bengal_functions {
-	msm_mux_ddr_bist,
-	msm_mux_m_voc,
-	msm_mux_gpio,
 	msm_mux_qup0,
+	msm_mux_gpio,
+	msm_mux_ddr_bist,
 	msm_mux_phase_flag0,
 	msm_mux_qdss_gpio8,
 	msm_mux_atest_tsens,
 	msm_mux_mpm_pwr,
+	msm_mux_m_voc,
 	msm_mux_phase_flag1,
 	msm_mux_qdss_gpio9,
 	msm_mux_atest_tsens2,
@@ -555,11 +548,8 @@ enum bengal_functions {
 	msm_mux_NA,
 };
 
-static const char * const ddr_bist_groups[] = {
-	"gpio0", "gpio1", "gpio2", "gpio3",
-};
-static const char * const m_voc_groups[] = {
-	"gpio0",
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
 };
 static const char * const gpio_groups[] = {
 	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
@@ -580,8 +570,8 @@ static const char * const gpio_groups[] = {
 	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
 	"gpio111", "gpio112",
 };
-static const char * const qup0_groups[] = {
-	"gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
+static const char * const ddr_bist_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
 };
 static const char * const phase_flag0_groups[] = {
 	"gpio0",
@@ -595,6 +585,9 @@ static const char * const atest_tsens_groups[] = {
 static const char * const mpm_pwr_groups[] = {
 	"gpio1",
 };
+static const char * const m_voc_groups[] = {
+	"gpio0",
+};
 static const char * const phase_flag1_groups[] = {
 	"gpio1",
 };
@@ -1114,14 +1107,14 @@ static const char * const dac_calib25_groups[] = {
 };
 
 static const struct msm_function bengal_functions[] = {
-	FUNCTION(ddr_bist),
-	FUNCTION(m_voc),
-	FUNCTION(gpio),
 	FUNCTION(qup0),
+	FUNCTION(gpio),
+	FUNCTION(ddr_bist),
 	FUNCTION(phase_flag0),
 	FUNCTION(qdss_gpio8),
 	FUNCTION(atest_tsens),
 	FUNCTION(mpm_pwr),
+	FUNCTION(m_voc),
 	FUNCTION(phase_flag1),
 	FUNCTION(qdss_gpio9),
 	FUNCTION(atest_tsens2),
@@ -1303,190 +1296,223 @@ static const struct msm_function bengal_functions[] = {
  */
 static const struct msm_pingroup bengal_groups[] = {
 	[0] = PINGROUP(0, WEST, qup0, m_voc, ddr_bist, NA, phase_flag0,
-		       qdss_gpio8, atest_tsens, NA, NA),
+		       qdss_gpio8, atest_tsens, NA, NA, 0x71000, 1),
 	[1] = PINGROUP(1, WEST, qup0, mpm_pwr, ddr_bist, NA, phase_flag1,
-		       qdss_gpio9, atest_tsens2, NA, NA),
+		       qdss_gpio9, atest_tsens2, NA, NA, 0, -1),
 	[2] = PINGROUP(2, WEST, qup0, ddr_bist, NA, phase_flag2, qdss_gpio10,
-		       dac_calib0, atest_usb10, NA, NA),
+		       dac_calib0, atest_usb10, NA, NA, 0, -1),
 	[3] = PINGROUP(3, WEST, qup0, ddr_bist, NA, phase_flag3, qdss_gpio11,
-		       dac_calib1, atest_usb11, NA, NA),
+		       dac_calib1, atest_usb11, NA, NA, 0x71000, 2),
 	[4] = PINGROUP(4, WEST, qup1, CRI_TRNG0, NA, phase_flag4, dac_calib2,
-		       atest_usb12, NA, NA, NA),
+		       atest_usb12, NA, NA, NA, 0x71000, 3),
 	[5] = PINGROUP(5, WEST, qup1, CRI_TRNG1, NA, phase_flag5, dac_calib3,
-		       atest_usb13, NA, NA, NA),
+		       atest_usb13, NA, NA, NA, 0, -1),
 	[6] = PINGROUP(6, WEST, qup2, NA, phase_flag6, dac_calib4, atest_usb1,
-		       NA, NA, NA, NA),
-	[7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA),
+		       NA, NA, NA, NA, 0x71000, 4),
+	[7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
 	[8] = PINGROUP(8, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA,
-		       tsense_pwm, NA, NA),
+		       tsense_pwm, NA, NA, 0x71000, 0),
 	[9] = PINGROUP(9, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, NA,
-		       NA, NA),
+		       NA, NA, 0, -1),
 	[10] = PINGROUP(10, EAST, qup3, AGERA_PLL, NA, pbs0, qdss_gpio0, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0, -1),
 	[11] = PINGROUP(11, EAST, qup3, AGERA_PLL, NA, pbs1, qdss_gpio1, NA,
-			NA, NA, NA),
-	[12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA),
-	[13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, 0x71000, 1),
+	[12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 5),
 	[14] = PINGROUP(14, WEST, qup5, tgu_ch2, NA, phase_flag7, qdss_gpio4,
-			dac_calib5, NA, NA, NA),
+			dac_calib5, NA, NA, NA, 0x71000, 6),
 	[15] = PINGROUP(15, WEST, qup5, tgu_ch3, NA, phase_flag8, qdss_gpio5,
-			dac_calib6, NA, NA, NA),
+			dac_calib6, NA, NA, NA, 0, -1),
 	[16] = PINGROUP(16, WEST, qup5, NA, phase_flag9, qdss_gpio6,
-			dac_calib7, NA, NA, NA, NA),
+			dac_calib7, NA, NA, NA, NA, 0, -1),
 	[17] = PINGROUP(17, WEST, qup5, NA, phase_flag10, qdss_gpio7,
-			dac_calib8, NA, NA, NA, NA),
+			dac_calib8, NA, NA, NA, NA, 0x71000, 7),
 	[18] = PINGROUP(18, EAST, SDC2_TB, CRI_TRNG, pbs2, qdss_gpio2, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0x71000, 2),
 	[19] = PINGROUP(19, EAST, SDC1_TB, pbs3, qdss_gpio3, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0x71000, 3),
 	[20] = PINGROUP(20, EAST, cam_mclk, pbs4, qdss_gpio4, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[21] = PINGROUP(21, EAST, cam_mclk, adsp_ext, pbs5, qdss_gpio5, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0, -1),
 	[22] = PINGROUP(22, EAST, cci_i2c, prng_rosc, NA, pbs6, phase_flag11,
-			qdss_gpio6, dac_calib9, atest_usb20, NA),
+			qdss_gpio6, dac_calib9, atest_usb20, NA, 0, -1),
 	[23] = PINGROUP(23, EAST, cci_i2c, prng_rosc, NA, pbs7, phase_flag12,
-			qdss_gpio7, dac_calib10, atest_usb21, NA),
+			qdss_gpio7, dac_calib10, atest_usb21, NA, 0, -1),
 	[24] = PINGROUP(24, EAST, CCI_TIMER1, GCC_GP1, NA, pbs8, phase_flag13,
-			qdss_gpio8, dac_calib11, atest_usb22, NA),
+			qdss_gpio8, dac_calib11, atest_usb22, NA, 0x71000, 4),
 	[25] = PINGROUP(25, EAST, cci_async, CCI_TIMER0, NA, pbs9,
-			phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA),
+			phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA,
+			0x71000, 5),
 	[26] = PINGROUP(26, EAST, NA, pbs10, phase_flag15, qdss_gpio10,
-			dac_calib13, atest_usb2, vsense_trigger, NA, NA),
+			dac_calib13, atest_usb2, vsense_trigger, NA, NA, 0, -1),
 	[27] = PINGROUP(27, EAST, cam_mclk, qdss_cti, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0x71000, 6),
 	[28] = PINGROUP(28, EAST, cam_mclk, CCI_TIMER2, qdss_cti, NA, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0x71000, 7),
 	[29] = PINGROUP(29, EAST, cci_i2c, NA, phase_flag16, dac_calib14,
-			atest_char, NA, NA, NA, NA),
+			atest_char, NA, NA, NA, NA, 0, -1),
 	[30] = PINGROUP(30, EAST, cci_i2c, NA, phase_flag17, dac_calib15,
-			atest_char0, NA, NA, NA, NA),
+			atest_char0, NA, NA, NA, NA, 0, -1),
 	[31] = PINGROUP(31, EAST, GP_PDM0, NA, phase_flag18, dac_calib16,
-			atest_char1, NA, NA, NA, NA),
+			atest_char1, NA, NA, NA, NA, 0x71000, 8),
 	[32] = PINGROUP(32, EAST, CCI_TIMER3, GP_PDM1, NA, phase_flag19,
-			dac_calib17, atest_char2, NA, NA, NA),
+			dac_calib17, atest_char2, NA, NA, NA, 0x71000, 9),
 	[33] = PINGROUP(33, EAST, GP_PDM2, NA, phase_flag20, dac_calib18,
-			atest_char3, NA, NA, NA, NA),
-	[34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[35] = PINGROUP(35, EAST, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA),
-	[36] = PINGROUP(36, EAST, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA),
-	[37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA),
-	[43] = PINGROUP(43, EAST, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA),
-	[44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA),
-	[45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA),
-	[46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			atest_char3, NA, NA, NA, NA, 0x71000, 10),
+	[34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 11),
+	[35] = PINGROUP(35, EAST, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 12),
+	[36] = PINGROUP(36, EAST, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 13),
+	[37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 14),
+	[40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[43] = PINGROUP(43, EAST, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 15),
 	[47] = PINGROUP(47, EAST, NA, NAV_GPIO, pbs14, qdss_gpio14, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[48] = PINGROUP(48, EAST, NA, vfr_1, NA, pbs15, qdss_gpio15, NA, NA,
-			NA, NA),
-	[49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA),
-	[50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, 0, -1),
+	[49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
 	[52] = PINGROUP(52, EAST, NA, NAV_GPIO, pbs_out, NA, NA, NA, NA, NA,
-			NA),
-	[53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
-	[54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
-	[60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
-	[61] = PINGROUP(61, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0, -1),
+	[53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[61] = PINGROUP(61, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 16),
 	[63] = PINGROUP(63, EAST, pll_reset, NA, phase_flag26, ddr_pxi0, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0x71000, 17),
 	[64] = PINGROUP(64, EAST, gsm0_tx, NA, phase_flag27, ddr_pxi0, NA, NA,
-			NA, NA, NA),
-	[65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, 0x71000, 18),
+	[65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 8),
+	[66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 9),
+	[67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 10),
+	[68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
 	[69] = PINGROUP(69, WEST, qup1, GCC_GP2, qdss_gpio12, ddr_pxi1, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0x71000, 11),
 	[70] = PINGROUP(70, WEST, qup1, GCC_GP3, qdss_gpio13, ddr_pxi1, NA, NA,
-			NA, NA, NA),
-	[71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, 0x71000, 12),
+	[71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
 	[72] = PINGROUP(72, SOUTH, uim2_data, qdss_cti, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0x71000, 3),
 	[73] = PINGROUP(73, SOUTH, uim2_clk, NA, qdss_cti, NA, NA, NA, NA, NA,
-			NA),
-	[74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0, -1),
+	[74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
 	[75] = PINGROUP(75, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA,
-			NA),
-	[76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
-	[77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	[78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0x71000, 4),
+	[76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
 	[79] = PINGROUP(79, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0x71000, 5),
 	[80] = PINGROUP(80, WEST, qup2, dac_calib19, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0x71000, 13),
 	[81] = PINGROUP(81, WEST, mdp_vsync, mdp_vsync, mdp_vsync, dac_calib20,
-			NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, NA, 0x71000, 14),
 	[82] = PINGROUP(82, WEST, qup0, dac_calib21, NA, NA, NA, NA, NA, NA,
-			NA),
-	[83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0, -1),
+	[83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 15),
+	[84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 16),
+	[85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 17),
 	[86] = PINGROUP(86, WEST, qup0, GCC_GP1, atest_bbrx1, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0x71000, 18),
 	[87] = PINGROUP(87, EAST, pbs11, qdss_gpio11, NA, NA, NA, NA, NA, NA,
-			NA),
-	[88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0x71000, 19),
+	[88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 20),
 	[89] = PINGROUP(89, WEST, usb_phy, atest_bbrx0, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0x71000, 19),
 	[90] = PINGROUP(90, EAST, mss_lte, pbs12, qdss_gpio12, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[91] = PINGROUP(91, EAST, mss_lte, pbs13, qdss_gpio13, NA, NA, NA, NA,
-			NA, NA),
-	[92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, 0x71000, 21),
+	[92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 20),
 	[94] = PINGROUP(94, WEST, NA, qdss_gpio14, wlan1_adc0, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0x71000, 21),
 	[95] = PINGROUP(95, WEST, NAV_GPIO, GP_PDM0, qdss_gpio15, wlan1_adc1,
-			NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, NA, 0x71000, 22),
 	[96] = PINGROUP(96, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM1, sd_write,
-			JITTER_BIST, qdss_cti, qdss_cti, NA),
+			JITTER_BIST, qdss_cti, qdss_cti, NA, 0x71000, 23),
 	[97] = PINGROUP(97, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM2,
-			JITTER_BIST, qdss_cti, qdss_cti, NA, NA),
-	[98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			JITTER_BIST, qdss_cti, qdss_cti, NA, NA, 0x71000, 24),
+	[98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x71000, 6),
 	[100] = PINGROUP(100, SOUTH, atest_gpsadc_dtest0_native, NA, NA, NA,
-			 NA, NA, NA, NA, NA),
+			 NA, NA, NA, NA, NA, 0, -1),
 	[101] = PINGROUP(101, SOUTH, atest_gpsadc_dtest1_native, NA, NA, NA,
-			 NA, NA, NA, NA, NA),
+			 NA, NA, NA, NA, NA, 0, -1),
 	[102] = PINGROUP(102, SOUTH, NA, phase_flag28, dac_calib22, ddr_pxi2,
-			 NA, NA, NA, NA, NA),
+			 NA, NA, NA, NA, NA, 0x71000, 7),
 	[103] = PINGROUP(103, SOUTH, NA, phase_flag29, dac_calib23, ddr_pxi2,
-			 NA, NA, NA, NA, NA),
+			 NA, NA, NA, NA, NA, 0x71000, 8),
 	[104] = PINGROUP(104, SOUTH, NA, phase_flag30, qdss_gpio1, dac_calib24,
-			 ddr_pxi3, NA, NA, NA, NA),
+			 ddr_pxi3, NA, NA, NA, NA, 0x71000, 9),
 	[105] = PINGROUP(105, SOUTH, NA, phase_flag31, qdss_gpio, dac_calib25,
-			 ddr_pxi3, NA, NA, NA, NA),
+			 ddr_pxi3, NA, NA, NA, NA, 0x71000, 10),
 	[106] = PINGROUP(106, SOUTH, NAV_GPIO, GCC_GP3, qdss_gpio, NA, NA, NA,
-			 NA, NA, NA),
+			 NA, NA, NA, 0x71000, 11),
 	[107] = PINGROUP(107, SOUTH, NAV_GPIO, GCC_GP2, qdss_gpio0, NA, NA, NA,
-			 NA, NA, NA),
-	[108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA),
+			 NA, NA, NA, 0x71000, 12),
+	[108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
 	[109] = PINGROUP(109, SOUTH, NA, qdss_gpio2, NA, NA, NA, NA, NA, NA,
-			 NA),
+			 NA, 0x71000, 13),
 	[110] = PINGROUP(110, SOUTH, NA, qdss_gpio3, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			 NA, 0, -1),
+	[111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x71000, 14),
 	[113] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x175000, 15, 0),
 	[114] = SDC_QDSD_PINGROUP(sdc1_clk, 0x175000, 13, 6),
 	[115] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x175000, 11, 3),
 	[116] = SDC_QDSD_PINGROUP(sdc1_data, 0x175000, 9, 0),
-	[117] = SDC_QDSD_PINGROUP(sdc2_clk, 0x173000, 14, 6),
-	[118] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x173000, 11, 3),
-	[119] = SDC_QDSD_PINGROUP(sdc2_data, 0x173000, 9, 0),
-	[120] = UFS_RESET(ufs_reset, 0x177000),
+	[117] = SDC_QDSD_PINGROUP(sdc2_clk, 0x573000, 14, 6),
+	[118] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x573000, 11, 3),
+	[119] = SDC_QDSD_PINGROUP(sdc2_data, 0x573000, 9, 0),
+	[120] = UFS_RESET(ufs_reset, 0x178000),
 };
 
 static const struct msm_pinctrl_soc_data bengal_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-lagoon.c b/drivers/pinctrl/qcom/pinctrl-lagoon.c
new file mode 100644
index 0000000..5e07dcb0
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-lagoon.c
@@ -0,0 +1,1654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)			                \
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = REG_BASE + REG_SIZE * id,			\
+		.io_reg = REG_BASE + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id,		\
+		.intr_status_reg = REG_BASE + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.egpio_enable = 12,		\
+		.egpio_present = 11,		\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+		.wake_reg = REG_BASE + wake_off,	\
+		.wake_bit = bit,		\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc lagoon_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "GPIO_127"),
+	PINCTRL_PIN(128, "GPIO_128"),
+	PINCTRL_PIN(129, "GPIO_129"),
+	PINCTRL_PIN(130, "GPIO_130"),
+	PINCTRL_PIN(131, "GPIO_131"),
+	PINCTRL_PIN(132, "GPIO_132"),
+	PINCTRL_PIN(133, "GPIO_133"),
+	PINCTRL_PIN(134, "GPIO_134"),
+	PINCTRL_PIN(135, "GPIO_135"),
+	PINCTRL_PIN(136, "GPIO_136"),
+	PINCTRL_PIN(137, "GPIO_137"),
+	PINCTRL_PIN(138, "GPIO_138"),
+	PINCTRL_PIN(139, "GPIO_139"),
+	PINCTRL_PIN(140, "GPIO_140"),
+	PINCTRL_PIN(141, "GPIO_141"),
+	PINCTRL_PIN(142, "GPIO_142"),
+	PINCTRL_PIN(143, "GPIO_143"),
+	PINCTRL_PIN(144, "GPIO_144"),
+	PINCTRL_PIN(145, "GPIO_145"),
+	PINCTRL_PIN(146, "GPIO_146"),
+	PINCTRL_PIN(147, "GPIO_147"),
+	PINCTRL_PIN(148, "GPIO_148"),
+	PINCTRL_PIN(149, "GPIO_149"),
+	PINCTRL_PIN(150, "GPIO_150"),
+	PINCTRL_PIN(151, "GPIO_151"),
+	PINCTRL_PIN(152, "GPIO_152"),
+	PINCTRL_PIN(153, "GPIO_153"),
+	PINCTRL_PIN(154, "GPIO_154"),
+	PINCTRL_PIN(155, "GPIO_155"),
+	PINCTRL_PIN(156, "SDC1_RCLK"),
+	PINCTRL_PIN(157, "SDC1_CLK"),
+	PINCTRL_PIN(158, "SDC1_CMD"),
+	PINCTRL_PIN(159, "SDC1_DATA"),
+	PINCTRL_PIN(160, "SDC2_CLK"),
+	PINCTRL_PIN(161, "SDC2_CMD"),
+	PINCTRL_PIN(162, "SDC2_DATA"),
+	PINCTRL_PIN(163, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+
+static const unsigned int sdc1_rclk_pins[] = { 156 };
+static const unsigned int sdc1_clk_pins[] = { 157 };
+static const unsigned int sdc1_cmd_pins[] = { 158 };
+static const unsigned int sdc1_data_pins[] = { 159 };
+static const unsigned int sdc2_clk_pins[] = { 160 };
+static const unsigned int sdc2_cmd_pins[] = { 161 };
+static const unsigned int sdc2_data_pins[] = { 162 };
+static const unsigned int ufs_reset_pins[] = { 163 };
+
+enum lagoon_functions {
+	msm_mux_ibi_i3c,
+	msm_mux_gpio,
+	msm_mux_cri_trng,
+	msm_mux_qup00,
+	msm_mux_CCI_I2C,
+	msm_mux_qdss_cti,
+	msm_mux_sp_cmu,
+	msm_mux_dbg_out,
+	msm_mux_qup14,
+	msm_mux_sdc1_tb,
+	msm_mux_sdc2_tb,
+	msm_mux_MDP_VSYNC,
+	msm_mux_GP_PDM1,
+	msm_mux_qdss_gpio,
+	msm_mux_m_voc,
+	msm_mux_dp_hot,
+	msm_mux_phase_flag0,
+	msm_mux_qup10,
+	msm_mux_pll_bypassnl,
+	msm_mux_pll_reset,
+	msm_mux_phase_flag1,
+	msm_mux_phase_flag2,
+	msm_mux_qup12,
+	msm_mux_ddr_bist,
+	msm_mux_gcc_gp2,
+	msm_mux_gcc_gp3,
+	msm_mux_edp_lcd,
+	msm_mux_qup13,
+	msm_mux_qup11,
+	msm_mux_PLL_BIST,
+	msm_mux_qdss_gpio14,
+	msm_mux_qdss_gpio15,
+	msm_mux_CAM_MCLK0,
+	msm_mux_CAM_MCLK1,
+	msm_mux_CAM_MCLK2,
+	msm_mux_CAM_MCLK3,
+	msm_mux_CAM_MCLK4,
+	msm_mux_cci_timer0,
+	msm_mux_phase_flag3,
+	msm_mux_qdss_gpio12,
+	msm_mux_cci_timer1,
+	msm_mux_CCI_ASYNC,
+	msm_mux_phase_flag4,
+	msm_mux_qdss_gpio13,
+	msm_mux_cci_timer2,
+	msm_mux_phase_flag5,
+	msm_mux_cci_timer3,
+	msm_mux_GP_PDM0,
+	msm_mux_phase_flag6,
+	msm_mux_cci_timer4,
+	msm_mux_phase_flag7,
+	msm_mux_qdss_gpio2,
+	msm_mux_phase_flag8,
+	msm_mux_qdss_gpio0,
+	msm_mux_phase_flag9,
+	msm_mux_qdss_gpio1,
+	msm_mux_phase_flag10,
+	msm_mux_phase_flag11,
+	msm_mux_qdss_gpio3,
+	msm_mux_phase_flag12,
+	msm_mux_qdss_gpio4,
+	msm_mux_phase_flag13,
+	msm_mux_qdss_gpio5,
+	msm_mux_qup02,
+	msm_mux_phase_flag14,
+	msm_mux_qdss_gpio6,
+	msm_mux_phase_flag15,
+	msm_mux_qdss_gpio7,
+	msm_mux_mdp_vsync0,
+	msm_mux_phase_flag16,
+	msm_mux_mdp_vsync1,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag17,
+	msm_mux_qdss_gpio8,
+	msm_mux_vfr_1,
+	msm_mux_phase_flag18,
+	msm_mux_qdss_gpio9,
+	msm_mux_phase_flag19,
+	msm_mux_qdss_gpio10,
+	msm_mux_phase_flag20,
+	msm_mux_qdss_gpio11,
+	msm_mux_phase_flag21,
+	msm_mux_phase_flag22,
+	msm_mux_mdp_vsync2,
+	msm_mux_phase_flag23,
+	msm_mux_mdp_vsync3,
+	msm_mux_GP_PDM2,
+	msm_mux_phase_flag24,
+	msm_mux_audio_ref,
+	msm_mux_lpass_ext,
+	msm_mux_mi2s_2,
+	msm_mux_phase_flag25,
+	msm_mux_qup01,
+	msm_mux_tgu_ch0,
+	msm_mux_phase_flag26,
+	msm_mux_tgu_ch1,
+	msm_mux_phase_flag27,
+	msm_mux_tgu_ch2,
+	msm_mux_phase_flag28,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag29,
+	msm_mux_MSS_LTE,
+	msm_mux_btfm_slimbus,
+	msm_mux_mi2s_1,
+	msm_mux_phase_flag30,
+	msm_mux_phase_flag31,
+	msm_mux_UIM2_DATA,
+	msm_mux_UIM2_CLK,
+	msm_mux_UIM2_RESET,
+	msm_mux_UIM2_PRESENT,
+	msm_mux_UIM1_DATA,
+	msm_mux_UIM1_CLK,
+	msm_mux_UIM1_RESET,
+	msm_mux_UIM1_PRESENT,
+	msm_mux_atest_usb1,
+	msm_mux_atest_usb10,
+	msm_mux_sd_write,
+	msm_mux_atest_usb11,
+	msm_mux_atest_usb12,
+	msm_mux_ddr_pxi0,
+	msm_mux_adsp_ext,
+	msm_mux_atest_usb13,
+	msm_mux_ddr_pxi1,
+	msm_mux_mi2s_0,
+	msm_mux_atest_usb2,
+	msm_mux_ddr_pxi2,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_AGERA_PLL,
+	msm_mux_vsense_trigger,
+	msm_mux_atest_usb20,
+	msm_mux_ddr_pxi3,
+	msm_mux_JITTER_BIST,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb21,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb22,
+	msm_mux_atest_tsens,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb23,
+	msm_mux_MCLK,
+	msm_mux_atest_tsens2,
+	msm_mux_wlan2_adc1,
+	msm_mux_ldo_en,
+	msm_mux_atest_char,
+	msm_mux_ldo_update,
+	msm_mux_atest_char0,
+	msm_mux_prng_rosc,
+	msm_mux_atest_char1,
+	msm_mux_atest_char2,
+	msm_mux_atest_char3,
+	msm_mux_NAV_GPIO,
+	msm_mux_NAV_PPS,
+	msm_mux_GPS_TX,
+	msm_mux_QLINK0_WMSS,
+	msm_mux_QLINK0_REQUEST,
+	msm_mux_QLINK0_ENABLE,
+	msm_mux_QLINK1_WMSS,
+	msm_mux_QLINK1_REQUEST,
+	msm_mux_QLINK1_ENABLE,
+	msm_mux_RFFE0_DATA,
+	msm_mux_RFFE0_CLK,
+	msm_mux_RFFE1_DATA,
+	msm_mux_RFFE1_CLK,
+	msm_mux_RFFE2_DATA,
+	msm_mux_RFFE2_CLK,
+	msm_mux_RFFE3_DATA,
+	msm_mux_RFFE3_CLK,
+	msm_mux_RFFE4_DATA,
+	msm_mux_RFFE4_CLK,
+	msm_mux_pa_indicator,
+	msm_mux_PCIE0_CLK,
+	msm_mux_USB_PHY,
+	msm_mux_NA,
+};
+
+static const char * const ibi_i3c_groups[] = {
+	"gpio0", "gpio1",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+	"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+	"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+	"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+	"gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+	"gpio153", "gpio154", "gpio155",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio0", "gpio1", "gpio2",
+};
+static const char * const qup00_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const CCI_I2C_groups[] = {
+	"gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+	"gpio44",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86",
+	"gpio87",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio3",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio3",
+};
+static const char * const qup14_groups[] = {
+	"gpio4", "gpio4", "gpio5", "gpio5",
+};
+static const char * const sdc1_tb_groups[] = {
+	"gpio4",
+};
+static const char * const sdc2_tb_groups[] = {
+	"gpio5",
+};
+static const char * const MDP_VSYNC_groups[] = {
+	"gpio6", "gpio23", "gpio24", "gpio27", "gpio28",
+};
+static const char * const GP_PDM1_groups[] = {
+	"gpio8", "gpio52",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio8", "gpio9", "gpio63", "gpio64",
+};
+static const char * const m_voc_groups[] = {
+	"gpio12",
+};
+static const char * const dp_hot_groups[] = {
+	"gpio12", "gpio118",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio12",
+};
+static const char * const qup10_groups[] = {
+	"gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio13",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio14",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio17",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio18",
+};
+static const char * const qup12_groups[] = {
+	"gpio19", "gpio19", "gpio20", "gpio20",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio22",
+};
+static const char * const edp_lcd_groups[] = {
+	"gpio23",
+};
+static const char * const qup13_groups[] = {
+	"gpio25", "gpio25", "gpio26", "gpio26",
+};
+static const char * const qup11_groups[] = {
+	"gpio27", "gpio27", "gpio28", "gpio28",
+};
+static const char * const PLL_BIST_groups[] = {
+	"gpio27",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio27", "gpio36",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio28", "gpio37",
+};
+static const char * const CAM_MCLK0_groups[] = {
+	"gpio29",
+};
+static const char * const CAM_MCLK1_groups[] = {
+	"gpio30",
+};
+static const char * const CAM_MCLK2_groups[] = {
+	"gpio31",
+};
+static const char * const CAM_MCLK3_groups[] = {
+	"gpio32",
+};
+static const char * const CAM_MCLK4_groups[] = {
+	"gpio33",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio34",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio34",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio34", "gpio52",
+};
+static const char * const cci_timer1_groups[] = {
+	"gpio35",
+};
+static const char * const CCI_ASYNC_groups[] = {
+	"gpio35", "gpio36", "gpio48", "gpio52", "gpio53",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio35",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio35", "gpio53",
+};
+static const char * const cci_timer2_groups[] = {
+	"gpio36",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio36",
+};
+static const char * const cci_timer3_groups[] = {
+	"gpio37",
+};
+static const char * const GP_PDM0_groups[] = {
+	"gpio37", "gpio68",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio37",
+};
+static const char * const cci_timer4_groups[] = {
+	"gpio38",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio38",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio38", "gpio41",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio39",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio39", "gpio65",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio40",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio40", "gpio66",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio41",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio42",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio42", "gpio47",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio43",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio43", "gpio88",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio44",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio44", "gpio89",
+};
+static const char * const qup02_groups[] = {
+	"gpio45", "gpio46", "gpio48", "gpio56", "gpio57",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio45",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio45", "gpio90",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio46",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio46", "gpio91",
+};
+static const char * const mdp_vsync0_groups[] = {
+	"gpio47",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio47",
+};
+static const char * const mdp_vsync1_groups[] = {
+	"gpio48",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio48", "gpio58",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio48",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio48", "gpio92",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio49",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio49",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio49", "gpio93",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio50",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio50", "gpio56",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio51",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio51", "gpio57",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio52",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio53",
+};
+static const char * const mdp_vsync2_groups[] = {
+	"gpio56",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio56",
+};
+static const char * const mdp_vsync3_groups[] = {
+	"gpio57",
+};
+static const char * const GP_PDM2_groups[] = {
+	"gpio57",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio57",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio60",
+};
+static const char * const lpass_ext_groups[] = {
+	"gpio60", "gpio93",
+};
+static const char * const mi2s_2_groups[] = {
+	"gpio60", "gpio72", "gpio73", "gpio74",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio60",
+};
+static const char * const qup01_groups[] = {
+	"gpio61", "gpio62", "gpio63", "gpio64",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio61",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio61",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio62",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio62",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio63",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio63",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio64",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio64",
+};
+static const char * const MSS_LTE_groups[] = {
+	"gpio65", "gpio66",
+};
+static const char * const btfm_slimbus_groups[] = {
+	"gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const mi2s_1_groups[] = {
+	"gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio67",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio68",
+};
+static const char * const UIM2_DATA_groups[] = {
+	"gpio75",
+};
+static const char * const UIM2_CLK_groups[] = {
+	"gpio76",
+};
+static const char * const UIM2_RESET_groups[] = {
+	"gpio77",
+};
+static const char * const UIM2_PRESENT_groups[] = {
+	"gpio78",
+};
+static const char * const UIM1_DATA_groups[] = {
+	"gpio79",
+};
+static const char * const UIM1_CLK_groups[] = {
+	"gpio80",
+};
+static const char * const UIM1_RESET_groups[] = {
+	"gpio81",
+};
+static const char * const UIM1_PRESENT_groups[] = {
+	"gpio82",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio83",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio84",
+};
+static const char * const sd_write_groups[] = {
+	"gpio85",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio85",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio86",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio86", "gpio90",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio87",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio87",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio87", "gpio91",
+};
+static const char * const mi2s_0_groups[] = {
+	"gpio88", "gpio89", "gpio90", "gpio91",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio88",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio88", "gpio92",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio88",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio88",
+};
+static const char * const AGERA_PLL_groups[] = {
+	"gpio89",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio89",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio89",
+};
+static const char * const ddr_pxi3_groups[] = {
+	"gpio89", "gpio93",
+};
+static const char * const JITTER_BIST_groups[] = {
+	"gpio90",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio90",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio90",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio91",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio91",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio92",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio92",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio92",
+};
+static const char * const MCLK_groups[] = {
+	"gpio93",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio93",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio93",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio95",
+};
+static const char * const atest_char_groups[] = {
+	"gpio95",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio96",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio96",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio97",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio97",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio98",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio99",
+};
+static const char * const NAV_GPIO_groups[] = {
+	"gpio101", "gpio102",
+};
+static const char * const NAV_PPS_groups[] = {
+	"gpio101", "gpio101", "gpio102", "gpio102",
+};
+static const char * const GPS_TX_groups[] = {
+	"gpio101", "gpio102", "gpio107", "gpio108",
+};
+static const char * const QLINK0_WMSS_groups[] = {
+	"gpio103",
+};
+static const char * const QLINK0_REQUEST_groups[] = {
+	"gpio104",
+};
+static const char * const QLINK0_ENABLE_groups[] = {
+	"gpio105",
+};
+static const char * const QLINK1_WMSS_groups[] = {
+	"gpio106",
+};
+static const char * const QLINK1_REQUEST_groups[] = {
+	"gpio107",
+};
+static const char * const QLINK1_ENABLE_groups[] = {
+	"gpio108",
+};
+static const char * const RFFE0_DATA_groups[] = {
+	"gpio109",
+};
+static const char * const RFFE0_CLK_groups[] = {
+	"gpio110",
+};
+static const char * const RFFE1_DATA_groups[] = {
+	"gpio111",
+};
+static const char * const RFFE1_CLK_groups[] = {
+	"gpio112",
+};
+static const char * const RFFE2_DATA_groups[] = {
+	"gpio113",
+};
+static const char * const RFFE2_CLK_groups[] = {
+	"gpio114",
+};
+static const char * const RFFE3_DATA_groups[] = {
+	"gpio115",
+};
+static const char * const RFFE3_CLK_groups[] = {
+	"gpio116",
+};
+static const char * const RFFE4_DATA_groups[] = {
+	"gpio117",
+};
+static const char * const RFFE4_CLK_groups[] = {
+	"gpio118",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio118",
+};
+static const char * const PCIE0_CLK_groups[] = {
+	"gpio122",
+};
+static const char * const USB_PHY_groups[] = {
+	"gpio124",
+};
+
+static const struct msm_function lagoon_functions[] = {
+	FUNCTION(ibi_i3c),
+	FUNCTION(gpio),
+	FUNCTION(cri_trng),
+	FUNCTION(qup00),
+	FUNCTION(CCI_I2C),
+	FUNCTION(qdss_cti),
+	FUNCTION(sp_cmu),
+	FUNCTION(dbg_out),
+	FUNCTION(qup14),
+	FUNCTION(sdc1_tb),
+	FUNCTION(sdc2_tb),
+	FUNCTION(MDP_VSYNC),
+	FUNCTION(GP_PDM1),
+	FUNCTION(qdss_gpio),
+	FUNCTION(m_voc),
+	FUNCTION(dp_hot),
+	FUNCTION(phase_flag0),
+	FUNCTION(qup10),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(pll_reset),
+	FUNCTION(phase_flag1),
+	FUNCTION(phase_flag2),
+	FUNCTION(qup12),
+	FUNCTION(ddr_bist),
+	FUNCTION(gcc_gp2),
+	FUNCTION(gcc_gp3),
+	FUNCTION(edp_lcd),
+	FUNCTION(qup13),
+	FUNCTION(qup11),
+	FUNCTION(PLL_BIST),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(CAM_MCLK0),
+	FUNCTION(CAM_MCLK1),
+	FUNCTION(CAM_MCLK2),
+	FUNCTION(CAM_MCLK3),
+	FUNCTION(CAM_MCLK4),
+	FUNCTION(cci_timer0),
+	FUNCTION(phase_flag3),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(cci_timer1),
+	FUNCTION(CCI_ASYNC),
+	FUNCTION(phase_flag4),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(cci_timer2),
+	FUNCTION(phase_flag5),
+	FUNCTION(cci_timer3),
+	FUNCTION(GP_PDM0),
+	FUNCTION(phase_flag6),
+	FUNCTION(cci_timer4),
+	FUNCTION(phase_flag7),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(phase_flag8),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(phase_flag9),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(phase_flag10),
+	FUNCTION(phase_flag11),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(phase_flag12),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(phase_flag13),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(qup02),
+	FUNCTION(phase_flag14),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(phase_flag15),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(mdp_vsync0),
+	FUNCTION(phase_flag16),
+	FUNCTION(mdp_vsync1),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag17),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(vfr_1),
+	FUNCTION(phase_flag18),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(phase_flag19),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(phase_flag20),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(phase_flag21),
+	FUNCTION(phase_flag22),
+	FUNCTION(mdp_vsync2),
+	FUNCTION(phase_flag23),
+	FUNCTION(mdp_vsync3),
+	FUNCTION(GP_PDM2),
+	FUNCTION(phase_flag24),
+	FUNCTION(audio_ref),
+	FUNCTION(lpass_ext),
+	FUNCTION(mi2s_2),
+	FUNCTION(phase_flag25),
+	FUNCTION(qup01),
+	FUNCTION(tgu_ch0),
+	FUNCTION(phase_flag26),
+	FUNCTION(tgu_ch1),
+	FUNCTION(phase_flag27),
+	FUNCTION(tgu_ch2),
+	FUNCTION(phase_flag28),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag29),
+	FUNCTION(MSS_LTE),
+	FUNCTION(btfm_slimbus),
+	FUNCTION(mi2s_1),
+	FUNCTION(phase_flag30),
+	FUNCTION(phase_flag31),
+	FUNCTION(UIM2_DATA),
+	FUNCTION(UIM2_CLK),
+	FUNCTION(UIM2_RESET),
+	FUNCTION(UIM2_PRESENT),
+	FUNCTION(UIM1_DATA),
+	FUNCTION(UIM1_CLK),
+	FUNCTION(UIM1_RESET),
+	FUNCTION(UIM1_PRESENT),
+	FUNCTION(atest_usb1),
+	FUNCTION(atest_usb10),
+	FUNCTION(sd_write),
+	FUNCTION(atest_usb11),
+	FUNCTION(atest_usb12),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(adsp_ext),
+	FUNCTION(atest_usb13),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(mi2s_0),
+	FUNCTION(atest_usb2),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(AGERA_PLL),
+	FUNCTION(vsense_trigger),
+	FUNCTION(atest_usb20),
+	FUNCTION(ddr_pxi3),
+	FUNCTION(JITTER_BIST),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb21),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb22),
+	FUNCTION(atest_tsens),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb23),
+	FUNCTION(MCLK),
+	FUNCTION(atest_tsens2),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(ldo_en),
+	FUNCTION(atest_char),
+	FUNCTION(ldo_update),
+	FUNCTION(atest_char0),
+	FUNCTION(prng_rosc),
+	FUNCTION(atest_char1),
+	FUNCTION(atest_char2),
+	FUNCTION(atest_char3),
+	FUNCTION(NAV_GPIO),
+	FUNCTION(NAV_PPS),
+	FUNCTION(GPS_TX),
+	FUNCTION(QLINK0_WMSS),
+	FUNCTION(QLINK0_REQUEST),
+	FUNCTION(QLINK0_ENABLE),
+	FUNCTION(QLINK1_WMSS),
+	FUNCTION(QLINK1_REQUEST),
+	FUNCTION(QLINK1_ENABLE),
+	FUNCTION(RFFE0_DATA),
+	FUNCTION(RFFE0_CLK),
+	FUNCTION(RFFE1_DATA),
+	FUNCTION(RFFE1_CLK),
+	FUNCTION(RFFE2_DATA),
+	FUNCTION(RFFE2_CLK),
+	FUNCTION(RFFE3_DATA),
+	FUNCTION(RFFE3_CLK),
+	FUNCTION(RFFE4_DATA),
+	FUNCTION(RFFE4_CLK),
+	FUNCTION(pa_indicator),
+	FUNCTION(PCIE0_CLK),
+	FUNCTION(USB_PHY),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup lagoon_groups[] = {
+	[0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, NA, NA, NA, NA, NA, NA,
+		       0x9C014, 0),
+	[1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, NA, NA, NA, NA, NA, NA,
+		       0, -1),
+	[2] = PINGROUP(2, qup00, CCI_I2C, cri_trng, qdss_cti, NA, NA, NA, NA,
+		       NA, 0, -1),
+	[3] = PINGROUP(3, qup00, CCI_I2C, sp_cmu, dbg_out, qdss_cti, NA, NA,
+		       NA, NA, 0x9C014, 1),
+	[4] = PINGROUP(4, qup14, qup14, sdc1_tb, NA, NA, NA, NA, NA, NA,
+		       0x9C008, 3),
+	[5] = PINGROUP(5, qup14, qup14, sdc2_tb, NA, NA, NA, NA, NA, NA, 0, -1),
+	[6] = PINGROUP(6, MDP_VSYNC, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       0, -1),
+	[7] = PINGROUP(7, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C014, 2),
+	[8] = PINGROUP(8, GP_PDM1, qdss_gpio, NA, NA, NA, NA, NA, NA, NA,
+		       0x9C014, 3),
+	[9] = PINGROUP(9, qdss_gpio, NA, NA, NA, NA, NA, NA, NA, NA,
+		       0x9C014, 4),
+	[10] = PINGROUP(10, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[11] = PINGROUP(11, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C008, 4),
+	[12] = PINGROUP(12, m_voc, dp_hot, NA, phase_flag0, NA, NA, NA, NA, NA,
+			0x9C008, 5),
+	[13] = PINGROUP(13, qup10, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 6),
+	[14] = PINGROUP(14, qup10, pll_reset, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[15] = PINGROUP(15, qup10, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[16] = PINGROUP(16, qup10, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C008, 7),
+	[17] = PINGROUP(17, NA, phase_flag1, qup10, NA, NA, NA, NA, NA, NA,
+			0x9C008, 8),
+	[18] = PINGROUP(18, NA, phase_flag2, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 9),
+	[19] = PINGROUP(19, qup12, qup12, ddr_bist, NA, NA, NA, NA, NA, NA,
+			0x9C008, 10),
+	[20] = PINGROUP(20, qup12, qup12, ddr_bist, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[21] = PINGROUP(21, gcc_gp2, ddr_bist, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 11),
+	[22] = PINGROUP(22, gcc_gp3, ddr_bist, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 12),
+	[23] = PINGROUP(23, MDP_VSYNC, edp_lcd, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 13),
+	[24] = PINGROUP(24, MDP_VSYNC, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 14),
+	[25] = PINGROUP(25, qup13, qup13, NA, NA, NA, NA, NA, NA, NA,
+			0x9C008, 15),
+	[26] = PINGROUP(26, qup13, qup13, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[27] = PINGROUP(27, qup11, qup11, MDP_VSYNC, PLL_BIST, NA, qdss_gpio14,
+			NA, NA, NA, 0x9C00C, 0),
+	[28] = PINGROUP(28, qup11, qup11, MDP_VSYNC, NA, qdss_gpio15, NA, NA,
+			NA, NA, 0x9C00C, 1),
+	[29] = PINGROUP(29, CAM_MCLK0, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[30] = PINGROUP(30, CAM_MCLK1, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[31] = PINGROUP(31, CAM_MCLK2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[32] = PINGROUP(32, CAM_MCLK3, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[33] = PINGROUP(33, CAM_MCLK4, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[34] = PINGROUP(34, cci_timer0, NA, phase_flag3, qdss_gpio12, NA, NA,
+			NA, NA, NA, 0x9C014, 5),
+	[35] = PINGROUP(35, cci_timer1, CCI_ASYNC, NA, phase_flag4,
+			qdss_gpio13, NA, NA, NA, NA, 0x9C014, 6),
+	[36] = PINGROUP(36, cci_timer2, CCI_ASYNC, NA, phase_flag5,
+			qdss_gpio14, NA, NA, NA, NA, 0x9C014, 7),
+	[37] = PINGROUP(37, cci_timer3, GP_PDM0, NA, phase_flag6, qdss_gpio15,
+			NA, NA, NA, NA, 0x9C014, 8),
+	[38] = PINGROUP(38, cci_timer4, NA, phase_flag7, qdss_gpio2, NA, NA,
+			NA, NA, NA, 0x9C014, 9),
+	[39] = PINGROUP(39, CCI_I2C, NA, phase_flag8, qdss_gpio0, NA, NA, NA,
+			NA, NA, 0, -1),
+	[40] = PINGROUP(40, CCI_I2C, NA, phase_flag9, qdss_gpio1, NA, NA, NA,
+			NA, NA, 0, -1),
+	[41] = PINGROUP(41, CCI_I2C, NA, phase_flag10, qdss_gpio2, NA, NA, NA,
+			NA, NA, 0, -1),
+	[42] = PINGROUP(42, CCI_I2C, NA, phase_flag11, qdss_gpio3, NA, NA, NA,
+			NA, NA, 0, -1),
+	[43] = PINGROUP(43, CCI_I2C, NA, phase_flag12, qdss_gpio4, NA, NA, NA,
+			NA, NA, 0, -1),
+	[44] = PINGROUP(44, CCI_I2C, NA, phase_flag13, qdss_gpio5, NA, NA, NA,
+			NA, NA, 0, -1),
+	[45] = PINGROUP(45, qup02, NA, phase_flag14, qdss_gpio6, NA, NA, NA,
+			NA, NA, 0, -1),
+	[46] = PINGROUP(46, qup02, NA, phase_flag15, qdss_gpio7, NA, NA, NA,
+			NA, NA, 0, -1),
+	[47] = PINGROUP(47, mdp_vsync0, NA, phase_flag16, qdss_gpio3, NA, NA,
+			NA, NA, NA, 0, -1),
+	[48] = PINGROUP(48, CCI_ASYNC, mdp_vsync1, gcc_gp1, NA, phase_flag17,
+			qdss_gpio8, qup02, NA, NA, 0x9C014, 10),
+	[49] = PINGROUP(49, vfr_1, NA, phase_flag18, qdss_gpio9, NA, NA, NA,
+			NA, NA, 0, -1),
+	[50] = PINGROUP(50, NA, phase_flag19, qdss_gpio10, NA, NA, NA, NA, NA,
+			NA, 0x9C00C, 2),
+	[51] = PINGROUP(51, NA, phase_flag20, qdss_gpio11, NA, NA, NA, NA, NA,
+			NA, 0x9C00C, 3),
+	[52] = PINGROUP(52, CCI_ASYNC, GP_PDM1, NA, phase_flag21, qdss_gpio12,
+			NA, NA, NA, NA, 0x9C00C, 4),
+	[53] = PINGROUP(53, CCI_ASYNC, NA, phase_flag22, qdss_gpio13, NA, NA,
+			NA, NA, NA, 0x9C00C, 5),
+	[54] = PINGROUP(54, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 6),
+	[55] = PINGROUP(55, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 7),
+	[56] = PINGROUP(56, qup02, mdp_vsync2, NA, phase_flag23, qdss_gpio10,
+			NA, NA, NA, NA, 0, -1),
+	[57] = PINGROUP(57, qup02, mdp_vsync3, GP_PDM2, NA, phase_flag24,
+			qdss_gpio11, NA, NA, NA, 0x9C014, 11),
+	[58] = PINGROUP(58, gcc_gp1, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x9C000, 1),
+	[59] = PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C014, 12),
+	[60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, NA, phase_flag25, NA,
+			NA, NA, NA, 0x9C000, 2),
+	[61] = PINGROUP(61, qup01, tgu_ch0, NA, phase_flag26, qdss_cti, NA, NA,
+			NA, NA, 0x9C000, 3),
+	[62] = PINGROUP(62, qup01, tgu_ch1, NA, phase_flag27, qdss_cti, NA, NA,
+			NA, NA, 0x9C000, 4),
+	[63] = PINGROUP(63, qup01, tgu_ch2, NA, phase_flag28, qdss_gpio, NA,
+			NA, NA, NA, 0, -1),
+	[64] = PINGROUP(64, qup01, tgu_ch3, NA, phase_flag29, qdss_gpio, NA,
+			NA, NA, NA, 0x9C000, 5),
+	[65] = PINGROUP(65, MSS_LTE, NA, qdss_gpio0, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[66] = PINGROUP(66, MSS_LTE, NA, qdss_gpio1, NA, NA, NA, NA, NA, NA,
+			0x9C000, 6),
+	[67] = PINGROUP(67, btfm_slimbus, mi2s_1, NA, phase_flag30, NA, NA, NA,
+			NA, NA, 0x9C000, 7),
+	[68] = PINGROUP(68, btfm_slimbus, mi2s_1, GP_PDM0, NA, phase_flag31,
+			NA, NA, NA, NA, 0, -1),
+	[69] = PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 8),
+	[70] = PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[71] = PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[72] = PINGROUP(72, mi2s_2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[73] = PINGROUP(73, mi2s_2, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 8),
+	[74] = PINGROUP(74, mi2s_2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[75] = PINGROUP(75, UIM2_DATA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[76] = PINGROUP(76, UIM2_CLK, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[77] = PINGROUP(77, UIM2_RESET, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[78] = PINGROUP(78, UIM2_PRESENT, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 9),
+	[79] = PINGROUP(79, UIM1_DATA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[80] = PINGROUP(80, UIM1_CLK, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[81] = PINGROUP(81, UIM1_RESET, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[82] = PINGROUP(82, UIM1_PRESENT, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 10),
+	[83] = PINGROUP(83, atest_usb1, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 11),
+	[84] = PINGROUP(84, NA, atest_usb10, NA, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 12),
+	[85] = PINGROUP(85, sd_write, NA, atest_usb11, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 13),
+	[86] = PINGROUP(86, btfm_slimbus, mi2s_1, NA, qdss_cti, atest_usb12,
+			ddr_pxi0, NA, NA, NA, 0, -1),
+	[87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, NA, qdss_cti,
+			atest_usb13, ddr_pxi1, NA, NA, 0x9C000, 9),
+	[88] = PINGROUP(88, mi2s_0, NA, qdss_gpio4, NA, atest_usb2, ddr_pxi2,
+			tsense_pwm1, tsense_pwm2, NA, 0x9C000, 10),
+	[89] = PINGROUP(89, mi2s_0, AGERA_PLL, NA, qdss_gpio5, NA,
+			vsense_trigger, atest_usb20, ddr_pxi3, NA, 0x9C000, 11),
+	[90] = PINGROUP(90, mi2s_0, JITTER_BIST, NA, qdss_gpio6, NA,
+			wlan1_adc0, atest_usb21, ddr_pxi0, NA, 0x9C000, 12),
+	[91] = PINGROUP(91, mi2s_0, NA, qdss_gpio7, NA, wlan2_adc0,
+			atest_usb22, ddr_pxi1, NA, NA, 0x9C000, 13),
+	[92] = PINGROUP(92, NA, qdss_gpio8, atest_tsens, wlan1_adc1,
+			atest_usb23, ddr_pxi2, NA, NA, NA, 0x9C000, 14),
+	[93] = PINGROUP(93, MCLK, lpass_ext, NA, qdss_gpio9, atest_tsens2,
+			wlan2_adc1, ddr_pxi3, NA, NA, 0x9C000, 15),
+	[94] = PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 14),
+	[95] = PINGROUP(95, ldo_en, NA, atest_char, NA, NA, NA, NA, NA, NA,
+			0x9C00C, 15),
+	[96] = PINGROUP(96, ldo_update, NA, atest_char0, NA, NA, NA, NA, NA,
+			NA, 0x9C010, 0),
+	[97] = PINGROUP(97, prng_rosc, NA, atest_char1, NA, NA, NA, NA, NA, NA,
+			0x9C010, 1),
+	[98] = PINGROUP(98, NA, atest_char2, NA, NA, NA, NA, NA, NA, NA,
+			0x9C010, 2),
+	[99] = PINGROUP(99, NA, atest_char3, NA, NA, NA, NA, NA, NA, NA,
+			0x9C010, 3),
+	[100] = PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C010, 4),
+	[101] = PINGROUP(101, NAV_GPIO, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+			 NA, NA, 0, -1),
+	[102] = PINGROUP(102, NAV_GPIO, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+			 NA, NA, 0, -1),
+	[103] = PINGROUP(103, QLINK0_WMSS, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[104] = PINGROUP(104, QLINK0_REQUEST, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 5),
+	[105] = PINGROUP(105, QLINK0_ENABLE, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[106] = PINGROUP(106, QLINK1_WMSS, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[107] = PINGROUP(107, QLINK1_REQUEST, GPS_TX, NA, NA, NA, NA, NA, NA,
+			 NA, 0x9C010, 6),
+	[108] = PINGROUP(108, QLINK1_ENABLE, GPS_TX, NA, NA, NA, NA, NA, NA,
+			 NA, 0, -1),
+	[109] = PINGROUP(109, RFFE0_DATA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[110] = PINGROUP(110, RFFE0_CLK, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 7),
+	[111] = PINGROUP(111, RFFE1_DATA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[112] = PINGROUP(112, RFFE1_CLK, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 8),
+	[113] = PINGROUP(113, RFFE2_DATA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[114] = PINGROUP(114, RFFE2_CLK, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 9),
+	[115] = PINGROUP(115, RFFE3_DATA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[116] = PINGROUP(116, RFFE3_CLK, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 10),
+	[117] = PINGROUP(117, RFFE4_DATA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[118] = PINGROUP(118, RFFE4_CLK, NA, pa_indicator, dp_hot, NA, NA, NA,
+			 NA, NA, 0x9C010, 11),
+	[119] = PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[120] = PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[121] = PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[122] = PINGROUP(122, PCIE0_CLK, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C014, 13),
+	[123] = PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C014, 14),
+	[124] = PINGROUP(124, USB_PHY, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0x9C010, 12),
+	[125] = PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C010, 13),
+	[126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[127] = PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[128] = PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 0),
+	[129] = PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 1),
+	[130] = PINGROUP(130, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[131] = PINGROUP(131, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 2),
+	[132] = PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[133] = PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 3),
+	[134] = PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 4),
+	[135] = PINGROUP(135, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[136] = PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 5),
+	[137] = PINGROUP(137, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 6),
+	[138] = PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 7),
+	[139] = PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 8),
+	[140] = PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 9),
+	[141] = PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[142] = PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 10),
+	[143] = PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[144] = PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 11),
+	[145] = PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 12),
+	[148] = PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[149] = PINGROUP(149, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[150] = PINGROUP(150, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[151] = PINGROUP(151, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[152] = PINGROUP(152, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[153] = PINGROUP(153, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 13),
+	[154] = PINGROUP(154, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[155] = PINGROUP(155, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 14),
+	[156] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1a1000, 15, 0),
+	[157] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1a0000, 13, 6),
+	[158] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3),
+	[159] = SDC_QDSD_PINGROUP(sdc1_data, 0x1a0000, 9, 0),
+	[160] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1a2000, 14, 6),
+	[161] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3),
+	[162] = SDC_QDSD_PINGROUP(sdc2_data, 0x1a2000, 9, 0),
+	[163] = UFS_RESET(ufs_reset, 0x1ae000),
+};
+
+static const struct msm_pinctrl_soc_data lagoon_pinctrl = {
+	.pins = lagoon_pins,
+	.npins = ARRAY_SIZE(lagoon_pins),
+	.functions = lagoon_functions,
+	.nfunctions = ARRAY_SIZE(lagoon_functions),
+	.groups = lagoon_groups,
+	.ngroups = ARRAY_SIZE(lagoon_groups),
+	.ngpios = 156,
+};
+
+static int lagoon_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &lagoon_pinctrl);
+}
+
+static const struct of_device_id lagoon_pinctrl_of_match[] = {
+	{ .compatible = "qcom,lagoon-pinctrl", },
+	{ },
+};
+
+static struct platform_driver lagoon_pinctrl_driver = {
+	.driver = {
+		.name = "lagoon-pinctrl",
+		.of_match_table = lagoon_pinctrl_of_match,
+	},
+	.probe = lagoon_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init lagoon_pinctrl_init(void)
+{
+	return platform_driver_register(&lagoon_pinctrl_driver);
+}
+arch_initcall(lagoon_pinctrl_init);
+
+static void __exit lagoon_pinctrl_exit(void)
+{
+	platform_driver_unregister(&lagoon_pinctrl_driver);
+}
+module_exit(lagoon_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI lagoon pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, lagoon_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-lito.c b/drivers/pinctrl/qcom/pinctrl-lito.c
index 4938591..055921b 100644
--- a/drivers/pinctrl/qcom/pinctrl-lito.c
+++ b/drivers/pinctrl/qcom/pinctrl-lito.c
@@ -23,7 +23,7 @@
 #define EAST	0x00900000
 #define DUMMY	0x0
 #define REG_SIZE 0x1000
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit) \
 	{						\
 		.name = "gpio" #id,			\
 		.pins = gpio##id##_pins,		\
@@ -62,6 +62,8 @@
 		.intr_polarity_bit = 1,		\
 		.intr_detection_bit = 2,	\
 		.intr_detection_width = 2,	\
+		.wake_reg = base + wake_off,	\
+		.wake_bit = bit,		\
 	}
 
 #define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
@@ -1323,239 +1325,287 @@ static const struct msm_function lito_functions[] = {
  * Clients would not be able to request these dummy pin groups.
  */
 static const struct msm_pingroup lito_groups[] = {
-	[0] = PINGROUP(0, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA),
-	[1] = PINGROUP(1, EAST, qup01, NA, phase_flag0, NA, NA, NA, NA, NA, NA),
-	[2] = PINGROUP(2, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA),
-	[3] = PINGROUP(3, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA),
-	[4] = PINGROUP(4, EAST, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[5] = PINGROUP(5, EAST, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[6] = PINGROUP(6, WEST, qup11, NA, phase_flag1, NA, NA, NA, NA, NA, NA),
+	[0] = PINGROUP(0, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 0),
+	[1] = PINGROUP(1, EAST, qup01, NA, phase_flag0, NA, NA, NA, NA, NA, NA,
+		       0, -1),
+	[2] = PINGROUP(2, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[3] = PINGROUP(3, EAST, qup01, NA, NA, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 1),
+	[4] = PINGROUP(4, EAST, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 2),
+	[5] = PINGROUP(5, EAST, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 3),
+	[6] = PINGROUP(6, WEST, qup11, NA, phase_flag1, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 0),
 	[7] = PINGROUP(7, WEST, qup11, ddr_bist, NA, phase_flag2, NA, NA, NA,
-		       NA, NA),
+		       NA, NA, 0, -1),
 	[8] = PINGROUP(8, WEST, qup11, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA,
-		       NA),
-	[9] = PINGROUP(9, WEST, qup11, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+		       NA, 0, -1),
+	[9] = PINGROUP(9, WEST, qup11, ddr_bist, NA, NA, NA, NA, NA, NA, NA,
+		       0xB4000, 1),
 	[10] = PINGROUP(10, WEST, mdp_vsync, ddr_bist, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 2),
 	[11] = PINGROUP(11, EAST, mdp_vsync, edp_lcd, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 4),
 	[12] = PINGROUP(12, EAST, mdp_vsync, m_voc, qup01, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[13] = PINGROUP(13, EAST, cam_mclk, pll_bypassnl, NA, qdss_gpio0, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
 	[14] = PINGROUP(14, EAST, cam_mclk, pll_reset, NA, qdss_gpio1, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0, -1),
 	[15] = PINGROUP(15, EAST, cam_mclk, NA, qdss_gpio2, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[16] = PINGROUP(16, EAST, cam_mclk, NA, qdss_gpio3, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[17] = PINGROUP(17, EAST, cci_i2c, NA, qdss_gpio4, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[18] = PINGROUP(18, EAST, cci_i2c, NA, qdss_gpio5, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[19] = PINGROUP(19, EAST, cci_i2c, NA, qdss_gpio6, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[20] = PINGROUP(20, EAST, cci_i2c, qdss_gpio7, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[21] = PINGROUP(21, EAST, cci_timer0, gcc_gp2, NA, qdss_gpio8, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0, -1),
 	[22] = PINGROUP(22, EAST, cci_timer1, gcc_gp3, NA, qdss_gpio, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 5),
 	[23] = PINGROUP(23, EAST, cci_timer2, NA, qdss_gpio9, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[24] = PINGROUP(24, EAST, cci_timer3, cci_async, qdss_gpio10, NA, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 6),
 	[25] = PINGROUP(25, EAST, cci_timer4, cci_async, cam_mclk, NA,
-			qdss_gpio11, NA, NA, NA, NA),
+			qdss_gpio11, NA, NA, NA, NA, 0, -1),
 	[26] = PINGROUP(26, EAST, cci_async, NA, qdss_gpio12, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0xB4000, 7),
 	[27] = PINGROUP(27, EAST, cci_i2c, qdss_gpio13, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[28] = PINGROUP(28, EAST, cci_i2c, qdss_gpio14, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[29] = PINGROUP(29, EAST, qup04, NA, NA, phase_flag6, qdss_gpio15, NA,
-			NA, NA, NA),
-	[30] = PINGROUP(30, EAST, qup04, qdss_gpio, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, 0, -1),
+	[30] = PINGROUP(30, EAST, qup04, qdss_gpio, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 8),
 	[31] = PINGROUP(31, EAST, qup04, NA, qdss_gpio12, NA, NA, NA, NA, NA,
-			NA),
-	[32] = PINGROUP(32, EAST, qup04, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[33] = PINGROUP(33, WEST, sd_write, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0xB4000, 9),
+	[32] = PINGROUP(32, EAST, qup04, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 10),
+	[33] = PINGROUP(33, WEST, sd_write, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 3),
 	[34] = PINGROUP(34, EAST, qup02, qdss_gpio6, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 11),
 	[35] = PINGROUP(35, EAST, qup02, qdss_gpio7, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[36] = PINGROUP(36, EAST, qup02, qdss_gpio14, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 12),
 	[37] = PINGROUP(37, EAST, qup02, qup01, GP_PDM0, qdss_gpio15, NA, NA,
-			NA, NA, NA),
-	[38] = PINGROUP(38, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA),
-	[39] = PINGROUP(39, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA),
-	[40] = PINGROUP(40, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA),
-	[41] = PINGROUP(41, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA),
-	[42] = PINGROUP(42, EAST, qup00, ibi_i3c, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, 0xB4000, 13),
+	[38] = PINGROUP(38, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 14),
+	[39] = PINGROUP(39, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 15),
+	[40] = PINGROUP(40, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[41] = PINGROUP(41, EAST, qup05, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 16),
+	[42] = PINGROUP(42, EAST, qup00, ibi_i3c, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 17),
 	[43] = PINGROUP(43, EAST, qup00, ibi_i3c, NA, phase_flag3, ddr_pxi0,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 18),
 	[44] = PINGROUP(44, EAST, qup00, NA, phase_flag4, atest_tsens2,
-			vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
-	[45] = PINGROUP(45, EAST, qup00, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[46] = PINGROUP(46, WEST, qup13, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[47] = PINGROUP(47, WEST, qup13, NA, NA, NA, NA, NA, NA, NA, NA),
+			vsense_trigger, atest_usb1, ddr_pxi0, NA, NA, 0, -1),
+	[45] = PINGROUP(45, EAST, qup00, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 19),
+	[46] = PINGROUP(46, WEST, qup13, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 4),
+	[47] = PINGROUP(47, WEST, qup13, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 5),
 	[48] = PINGROUP(48, WEST, qup13, gcc_gp1, NA, phase_flag5, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0xB4000, 6),
 	[49] = PINGROUP(49, WEST, pri_mi2s, qup12, qdss_gpio8, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0xB4000, 7),
 	[50] = PINGROUP(50, WEST, pri_mi2s_ws, qup12, GP_PDM1, qdss_gpio9, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 8),
 	[51] = PINGROUP(51, WEST, pri_mi2s, qup12, qdss_gpio10, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[52] = PINGROUP(52, WEST, pri_mi2s, qup12, qdss_gpio13, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0xB4000, 9),
 	[53] = PINGROUP(53, WEST, ter_mi2s, qup14, lpass_slimbus, NA, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 10),
 	[54] = PINGROUP(54, WEST, ter_mi2s, qup14, lpass_slimbus, NA,
-			phase_flag7, NA, NA, NA, NA),
+			phase_flag7, NA, NA, NA, NA, 0, -1),
 	[55] = PINGROUP(55, WEST, ter_mi2s, qup14, lpass_slimbus, NA, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 11),
 	[56] = PINGROUP(56, WEST, ter_mi2s, qup14, lpass_slimbus, gcc_gp1, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 12),
 	[57] = PINGROUP(57, WEST, sec_mi2s, qup12, GP_PDM2, qdss_gpio11, NA,
-			NA, NA, NA, NA),
-	[58] = PINGROUP(58, WEST, qua_mi2s, qup12, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 13),
+	[58] = PINGROUP(58, WEST, qua_mi2s, qup12, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 14),
 	[59] = PINGROUP(59, WEST, qup10, ibi_i3c, NA, phase_flag21, atest_usb2,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0xB4000, 15),
 	[60] = PINGROUP(60, WEST, qup10, ibi_i3c, NA, phase_flag8, atest_usb22,
-			NA, NA, NA, NA),
-	[61] = PINGROUP(61, WEST, qup10, NA, NA, NA, NA, NA, NA, NA, NA),
-	[62] = PINGROUP(62, WEST, qup10, tgu_ch3, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
+	[61] = PINGROUP(61, WEST, qup10, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[62] = PINGROUP(62, WEST, qup10, tgu_ch3, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 16),
 	[63] = PINGROUP(63, WEST, qup13, mdp_vsync0, qup10, mdp_vsync1,
-			mdp_vsync2, mdp_vsync3, tgu_ch0, qdss_cti, NA),
+			mdp_vsync2, mdp_vsync3, tgu_ch0, qdss_cti, NA, 0, -1),
 	[64] = PINGROUP(64, EAST, sdc4_cmd, tgu_ch1, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 20),
 	[65] = PINGROUP(65, EAST, sdc43, vfr_1, tgu_ch2, NA, NA, NA, NA, NA,
-			NA),
-	[66] = PINGROUP(66, EAST, sdc4_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	[67] = PINGROUP(67, EAST, sdc42, NA, NA, NA, NA, NA, NA, NA, NA),
-	[68] = PINGROUP(68, EAST, sdc41, GP_PDM0, NA, NA, NA, NA, NA, NA, NA),
-	[69] = PINGROUP(69, EAST, sdc40, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, 0xB4000, 21),
+	[66] = PINGROUP(66, EAST, sdc4_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 22),
+	[67] = PINGROUP(67, EAST, sdc42, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 23),
+	[68] = PINGROUP(68, EAST, sdc41, GP_PDM0, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 24),
+	[69] = PINGROUP(69, EAST, sdc40, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 25),
 	[70] = PINGROUP(70, WEST, sec_mi2s, qup10, mdp_vsync, ldo_en, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 17),
 	[71] = PINGROUP(71, WEST, sec_mi2s, qup10, mdp_vsync, ldo_update, NA,
-			NA, phase_flag7, NA, NA),
+			NA, phase_flag7, NA, NA, 0, -1),
 	[72] = PINGROUP(72, WEST, sec_mi2s, prng_rosc, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 18),
 	[73] = PINGROUP(73, EAST, NA, phase_flag9, atest_usb20, NA, NA, NA, NA,
-			NA, NA),
-	[74] = PINGROUP(74, WEST, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+			NA, NA, 0xB4000, 26),
+	[74] = PINGROUP(74, WEST, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 19),
 	[75] = PINGROUP(75, SOUTH, uim2_data, JITTER_BIST, NA, phase_flag10,
-			NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, NA, 0, -1),
 	[76] = PINGROUP(76, SOUTH, uim2_clk, PLL_BIST, NA, phase_flag11, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
 	[77] = PINGROUP(77, SOUTH, uim2_reset, PLL_CLK, NA, phase_flag12, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
 	[78] = PINGROUP(78, SOUTH, uim2_present, NA, phase_flag13, wlan2_adc1,
-			atest_usb11, ddr_pxi2, NA, NA, NA),
+			atest_usb11, ddr_pxi2, NA, NA, NA, 0xB4000, 3),
 	[79] = PINGROUP(79, SOUTH, uim1_data, NA, phase_flag14, wlan2_adc0,
-			atest_usb10, ddr_pxi2, NA, NA, NA),
+			atest_usb10, ddr_pxi2, NA, NA, NA, 0, -1),
 	[80] = PINGROUP(80, SOUTH, uim1_clk, NA, phase_flag15, ddr_pxi3, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
 	[81] = PINGROUP(81, SOUTH, uim1_reset, NA, phase_flag16, ddr_pxi3, NA,
-			NA, NA, NA, NA),
+			NA, NA, NA, NA, 0, -1),
 	[82] = PINGROUP(82, SOUTH, uim1_present, NA, phase_flag17, NA, NA, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 4),
 	[83] = PINGROUP(83, WEST, aud_ref, NA, usb2phy_ac, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 20),
 	[84] = PINGROUP(84, SOUTH, qlink1_wmss, uim_batt, aoss_cti, NA,
-			phase_flag18, atest_tsens, NA, NA, NA),
+			phase_flag18, atest_tsens, NA, NA, NA, 0xB4000, 5),
 	[85] = PINGROUP(85, SOUTH, qlink1_request, edp_hot, NA, phase_flag19,
-			NA, NA, NA, NA, NA),
+			NA, NA, NA, NA, NA, 0xB4000, 6),
 	[86] = PINGROUP(86, SOUTH, qlink1_enable, NA, phase_flag20, NA,
-			wlan1_adc0, atest_usb12, ddr_pxi1, NA, NA),
-	[87] = PINGROUP(87, EAST, qlink0_wmss, NA, NA, NA, NA, NA, NA, NA, NA),
+			wlan1_adc0, atest_usb12, ddr_pxi1, NA, NA, 0xB4000, 7),
+	[87] = PINGROUP(87, EAST, qlink0_wmss, NA, NA, NA, NA, NA, NA, NA, NA,
+			0xB4000, 27),
 	[88] = PINGROUP(88, EAST, qlink0_request, NA, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 28),
 	[89] = PINGROUP(89, EAST, qlink0_enable, NA, NA, NA, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[90] = PINGROUP(90, SOUTH, PON_BEAMER, NA, NAV_PPS_IN, NAV_PPS_OUT,
-			GPS_TX, NA, NA, NA, NA),
+			GPS_TX, NA, NA, NA, NA, 0xB4000, 8),
 	[91] = PINGROUP(91, SOUTH, NA, pa_indicator, dbg_out, NA, NA, NA, NA,
-			NA, NA),
+			NA, NA, 0, -1),
 	[92] = PINGROUP(92, SOUTH, nav_gpio, NAV_PPS_IN, NAV_PPS_OUT, GPS_TX,
-			NA, wlan1_adc1, atest_usb13, ddr_pxi1, NA),
+			NA, wlan1_adc1, atest_usb13, ddr_pxi1, NA, 0, -1),
 	[93] = PINGROUP(93, SOUTH, GPS_TX, nav_gpio, NAV_PPS_IN, NAV_PPS_OUT,
-			GPS_TX_MIRD, NA, NA, NA, NA),
+			GPS_TX_MIRD, NA, NA, NA, NA, 0, -1),
 	[94] = PINGROUP(94, SOUTH, NA, NA, NA, phase_flag22, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[95] = PINGROUP(95, SOUTH, NA, NA, NA, phase_flag23, atest_usb23, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0, -1),
 	[96] = PINGROUP(96, SOUTH, NA, NA, NA, phase_flag24, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[97] = PINGROUP(97, SOUTH, NA, NA, NA, phase_flag25, atest_usb21, NA,
-			NA, NA, NA),
+			NA, NA, NA, 0xB4000, 9),
 	[98] = PINGROUP(98, SOUTH, NA, NA, NA, phase_flag26, NA, NA, NA, NA,
-			NA),
+			NA, 0xB4000, 10),
 	[99] = PINGROUP(99, SOUTH, NA, NA, NA, phase_flag27, NA, NA, NA, NA,
-			NA),
+			NA, 0, -1),
 	[100] = PINGROUP(100, SOUTH, NA, NA, NA, phase_flag28, NA, NA, NA, NA,
-			 NA),
+			 NA, 0xB4000, 11),
 	[101] = PINGROUP(101, SOUTH, NA, NA, NA, phase_flag29, NA, NA, NA, NA,
-			 NA),
+			 NA, 0, -1),
 	[102] = PINGROUP(102, SOUTH, NA, NA, NA, phase_flag30, NA, NA, NA, NA,
-			 NA),
+			 NA, 0, -1),
 	[103] = PINGROUP(103, SOUTH, NA, NA, NA, phase_flag31, NA, NA, NA, NA,
-			 NA),
+			 NA, 0xB4000, 12),
 	[104] = PINGROUP(104, EAST, mss_lte, NA, GPS_TX, NA, NA, NA, NA, NA,
-			 NA),
+			 NA, 0, -1),
 	[105] = PINGROUP(105, EAST, mss_lte, NA, nav_gpio, NA, NA, NA, NA, NA,
-			 NA),
-	[106] = PINGROUP(106, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			 NA, 0xB4000, 29),
+	[106] = PINGROUP(106, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
 	[107] = PINGROUP(107, EAST, qdss_gpio, atest_char, NA, NA, NA, NA, NA,
-			 NA, NA),
+			 NA, NA, 0xB4000, 30),
 	[108] = PINGROUP(108, WEST, qup15, qdss_gpio0, atest_char3, NA, NA, NA,
-			 NA, NA, NA),
+			 NA, NA, NA, 0xB4000, 21),
 	[109] = PINGROUP(109, WEST, qup15, adsp_ext, NA, qdss_gpio1,
-			 atest_char2, NA, NA, NA, NA),
+			 atest_char2, NA, NA, NA, NA, 0xB4000, 22),
 	[110] = PINGROUP(110, EAST, qdss_gpio2, atest_char1, NA, NA, NA, NA,
-			 NA, NA, NA),
+			 NA, NA, NA, 0xB4000, 31),
 	[111] = PINGROUP(111, EAST, qdss_gpio3, atest_char0, NA, NA, NA, NA,
-			 NA, NA, NA),
+			 NA, NA, NA, 0xB4004, 0),
 	[112] = PINGROUP(112, WEST, qup15, qdss_gpio4, NA, NA, NA, NA, NA, NA,
-			 NA),
+			 NA, 0xB4000, 23),
 	[113] = PINGROUP(113, WEST, qup15, qdss_gpio5, NA, NA, NA, NA, NA, NA,
-			 NA),
+			 NA, 0xB4000, 24),
 	[114] = PINGROUP(114, WEST, usb_phy, NA, qdss_gpio, NA, NA, NA, NA, NA,
-			 NA),
-	[115] = PINGROUP(115, WEST, cri_trng0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[116] = PINGROUP(116, WEST, cri_trng1, NA, NA, NA, NA, NA, NA, NA, NA),
-	[117] = PINGROUP(117, WEST, cri_trng, NA, NA, NA, NA, NA, NA, NA, NA),
-	[118] = PINGROUP(118, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[119] = PINGROUP(119, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[120] = PINGROUP(120, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[121] = PINGROUP(121, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[122] = PINGROUP(122, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[123] = PINGROUP(123, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[124] = PINGROUP(124, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[125] = PINGROUP(125, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[126] = PINGROUP(126, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[127] = PINGROUP(127, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[128] = PINGROUP(128, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[129] = PINGROUP(129, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[130] = PINGROUP(130, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[131] = PINGROUP(131, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[132] = PINGROUP(132, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[133] = PINGROUP(133, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[134] = PINGROUP(134, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			 NA, 0xB4000, 25),
+	[115] = PINGROUP(115, WEST, cri_trng0, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 26),
+	[116] = PINGROUP(116, WEST, cri_trng1, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 27),
+	[117] = PINGROUP(117, WEST, cri_trng, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 28),
+	[118] = PINGROUP(118, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[119] = PINGROUP(119, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 29),
+	[120] = PINGROUP(120, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[121] = PINGROUP(121, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 30),
+	[122] = PINGROUP(122, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4000, 31),
+	[123] = PINGROUP(123, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[124] = PINGROUP(124, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 0),
+	[125] = PINGROUP(125, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[126] = PINGROUP(126, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 1),
+	[127] = PINGROUP(127, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 2),
+	[128] = PINGROUP(128, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 3),
+	[129] = PINGROUP(129, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[130] = PINGROUP(130, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 4),
+	[131] = PINGROUP(131, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[132] = PINGROUP(132, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 5),
+	[133] = PINGROUP(133, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 6),
+	[134] = PINGROUP(134, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 7),
 	[135] = PINGROUP(135, WEST, tsense_pwm1, tsense_pwm2, NA, NA, NA, NA,
-			 NA, NA, NA),
-	[136] = PINGROUP(136, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[137] = PINGROUP(137, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[138] = PINGROUP(138, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[139] = PINGROUP(139, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[140] = PINGROUP(140, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[141] = PINGROUP(141, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[142] = PINGROUP(142, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[143] = PINGROUP(143, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[144] = PINGROUP(144, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[145] = PINGROUP(145, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+			 NA, NA, NA, 0xB4004, 8),
+	[136] = PINGROUP(136, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[137] = PINGROUP(137, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 9),
+	[138] = PINGROUP(138, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 10),
+	[139] = PINGROUP(139, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 11),
+	[140] = PINGROUP(140, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 12),
+	[141] = PINGROUP(141, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 13),
+	[142] = PINGROUP(142, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 14),
+	[143] = PINGROUP(143, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 15),
+	[144] = PINGROUP(144, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 16),
+	[145] = PINGROUP(145, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+			 0xB4004, 17),
 	[146] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x5b8000, 15, 0),
 	[147] = SDC_QDSD_PINGROUP(sdc1_clk, 0x5b8000, 13, 6),
 	[148] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x5b8000, 11, 3),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e16bd41..0a8ca53 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -925,6 +925,29 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 	return 0;
 }
 
+static int msm_gpio_irq_set_affinity(struct irq_data *d,
+				const struct cpumask *dest, bool force)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data && test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return irq_chip_set_affinity_parent(d, dest, force);
+
+	return 0;
+}
+
+static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data && test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return irq_chip_set_vcpu_affinity_parent(d, vcpu_info);
+
+	return 0;
+}
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1110,6 +1133,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
 	pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
 	pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
 	pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
+	pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+	pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
 
 	chip->irq.chip = &pctrl->irq_chip;
 	chip->irq.handler = handle_edge_irq;
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 78c2f54..8f3468d 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -159,10 +159,8 @@ struct sprd_pinctrl {
 	struct sprd_pinctrl_soc_info *info;
 };
 
-enum sprd_pinconf_params {
-	SPRD_PIN_CONFIG_CONTROL = PIN_CONFIG_END + 1,
-	SPRD_PIN_CONFIG_SLEEP_MODE = PIN_CONFIG_END + 2,
-};
+#define SPRD_PIN_CONFIG_CONTROL		(PIN_CONFIG_END + 1)
+#define SPRD_PIN_CONFIG_SLEEP_MODE	(PIN_CONFIG_END + 2)
 
 static int sprd_pinctrl_get_id_by_name(struct sprd_pinctrl *sprd_pctl,
 				       const char *name)
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 1aba758..26a3f1e 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -40,7 +40,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
 
 static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
 {
-	writel(val, pmx->regs[bank] + reg);
+	writel_relaxed(val, pmx->regs[bank] + reg);
+	/* make sure pinmux register write completed */
+	pmx_readl(pmx, bank, reg);
 }
 
 static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index cac0034..cc6bf43 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -168,7 +168,8 @@ static void gsi_channel_state_change_wait(unsigned long chan_hdl,
 		}
 
 		if (op == GSI_CH_START) {
-			if (curr_state == GSI_CHAN_STATE_STARTED) {
+			if (curr_state == GSI_CHAN_STATE_STARTED ||
+				curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
 				ctx->state = curr_state;
 				return;
 			}
@@ -2422,6 +2423,11 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
 	ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
 	atomic_inc(&gsi_ctx->num_chan);
 
+	if (props->prot == GSI_CHAN_PROT_GCI) {
+		gsi_ctx->coal_info.ch_id = props->ch_id;
+		gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
+	}
+
 	return GSI_STATUS_SUCCESS;
 }
 EXPORT_SYMBOL(gsi_alloc_channel);
@@ -2819,7 +2825,8 @@ int gsi_start_channel(unsigned long chan_hdl)
 		ctx,
 		GSI_START_CMD_TIMEOUT_MS, op);
 
-	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+	if (ctx->state != GSI_CHAN_STATE_STARTED &&
+		ctx->state != GSI_CHAN_STATE_FLOW_CONTROL) {
 		/*
 		 * Hardware returned unexpected status, unexpected
 		 * hardware state.
@@ -3153,6 +3160,10 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
 		atomic_dec(&ctx->evtr->chan_ref_cnt);
 	atomic_dec(&gsi_ctx->num_chan);
 
+	if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+		gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
+		gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
+	}
 	return GSI_STATUS_SUCCESS;
 }
 EXPORT_SYMBOL(gsi_dealloc_channel);
@@ -3705,7 +3716,7 @@ EXPORT_SYMBOL(gsi_poll_n_channel);
 
 int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 {
-	struct gsi_chan_ctx *ctx;
+	struct gsi_chan_ctx *ctx, *coal_ctx;
 	enum gsi_chan_mode curr;
 	unsigned long flags;
 	enum gsi_chan_mode chan_mode;
@@ -3751,8 +3762,14 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
 			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
 		atomic_set(&ctx->poll_mode, mode);
-		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+			if (coal_ctx != NULL)
+				atomic_set(&coal_ctx->poll_mode, mode);
+		}
+
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
 		ctx->stats.callback_to_poll++;
@@ -3761,8 +3778,13 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
 		atomic_set(&ctx->poll_mode, mode);
-		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+			if (coal_ctx != NULL)
+				atomic_set(&coal_ctx->poll_mode, mode);
+		}
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 4ff09cc..3b6c648 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -87,6 +87,7 @@ enum gsi_chan_state {
 	GSI_CHAN_STATE_STARTED = 0x2,
 	GSI_CHAN_STATE_STOPPED = 0x3,
 	GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
+	GSI_CHAN_STATE_FLOW_CONTROL = 0x5,
 	GSI_CHAN_STATE_ERROR = 0xf
 };
 
@@ -199,6 +200,11 @@ struct gsi_generic_ee_cmd_debug_stats {
 	unsigned long halt_channel;
 };
 
+struct gsi_coal_chan_info {
+	uint8_t ch_id;
+	uint8_t evchid;
+};
+
 struct gsi_ctx {
 	void __iomem *base;
 	struct device *dev;
@@ -222,6 +228,7 @@ struct gsi_ctx {
 	struct completion gen_ee_cmd_compl;
 	void *ipc_logbuf;
 	void *ipc_logbuf_low;
+	struct gsi_coal_chan_info coal_info;
 	/*
 	 * The following used only on emulation systems.
 	 */
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 28a2fc6..7cbf21c 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -207,6 +207,8 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
 	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
+	__stringify(IPA_CLIENT_AQC_ETHERNET_PROD),
+	__stringify(IPA_CLIENT_AQC_ETHERNET_CONS),
 };
 
 /**
@@ -1997,6 +1999,43 @@ int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 EXPORT_SYMBOL(ipa_get_wdi_stats);
 
 /**
+ * ipa_uc_bw_monitor() - start uc bw monitoring
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_bw_monitor, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_bw_monitor);
+
+/**
+ * ipa_set_wlan_tx_info() -set WDI statistics from uc
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_wlan_tx_info, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_wlan_tx_info);
+/**
  * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
  *
  * Return value: u16 - number of IPA smem restricted bytes
@@ -3620,6 +3659,19 @@ int ipa_disable_wigig_pipe_i(enum ipa_client_type client)
 EXPORT_SYMBOL(ipa_disable_wigig_pipe_i);
 
 /**
+ * ipa_get_lan_rx_napi() - returns if NAPI is enabled in LAN RX
+ */
+bool ipa_get_lan_rx_napi(void)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_get_lan_rx_napi);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_lan_rx_napi);
+
+/**
  * ipa_tz_unlock_reg() - Allow AP access to memory regions controlled by TZ
  */
 int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
@@ -3645,6 +3697,45 @@ void ipa_deregister_client_callback(enum ipa_client_type client)
 		client);
 }
 
+int ipa_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_debug_stats_alloc, cmdinfo);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_debug_stats_alloc);
+
+int ipa_uc_debug_stats_dealloc(uint32_t prot_id)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_debug_stats_dealloc, prot_id);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_debug_stats_dealloc);
+
+void ipa_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats)
+{
+	IPA_API_DISPATCH(ipa_get_gsi_stats,
+		prot_id, stats);
+}
+EXPORT_SYMBOL(ipa_get_gsi_stats);
+
+int ipa_get_prot_id(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_prot_id,
+		client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_prot_id);
 
 static const struct dev_pm_ops ipa_pm_ops = {
 	.suspend_noirq = ipa_ap_suspend,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 62457e2..216b052 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -212,6 +212,10 @@ struct ipa_api_controller {
 
 	int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
 
+	int (*ipa_uc_bw_monitor)(struct ipa_wdi_bw_info *info);
+
+	int (*ipa_set_wlan_tx_info)(struct ipa_wdi_tx_info *info);
+
 	u16 (*ipa_get_smem_restr_bytes)(void);
 
 	int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
@@ -466,6 +470,18 @@ struct ipa_api_controller {
 		bool (*teth_port_state)(void), enum ipa_client_type client);
 
 	void (*ipa_deregister_client_callback)(enum ipa_client_type client);
+
+	int (*ipa_uc_debug_stats_alloc)(
+		struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
+
+	int (*ipa_uc_debug_stats_dealloc)(uint32_t prot_id);
+
+	bool (*ipa_get_lan_rx_napi)(void);
+
+	void (*ipa_get_gsi_stats)(int prot_id,
+		struct ipa_uc_dbg_ring_stats *stats);
+
+	int (*ipa_get_prot_id)(enum ipa_client_type client);
 };
 
 #ifdef CONFIG_IPA3
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index f6fc8c7..0b62ef3 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -149,6 +149,7 @@ enum ecm_ipa_operation {
  * @usb_to_ipa_client: producer client
  * @pm_hdl: handle for IPA PM
  * @is_vlan_mode: does the driver need to work in VLAN mode?
+ * @netif_rx_function: holds the correct network stack API, needed for NAPI
  */
 struct ecm_ipa_dev {
 	struct net_device *net;
@@ -166,6 +167,7 @@ struct ecm_ipa_dev {
 	enum ipa_client_type usb_to_ipa_client;
 	u32 pm_hdl;
 	bool is_vlan_mode;
+	int (*netif_rx_function)(struct sk_buff *skb);
 };
 
 static int ecm_ipa_open(struct net_device *net);
@@ -286,6 +288,13 @@ int ecm_ipa_init(struct ecm_ipa_params *params)
 	snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
 	net->netdev_ops = &ecm_ipa_netdev_ops;
 	net->watchdog_timeo = TX_TIMEOUT;
+	if (ipa_get_lan_rx_napi()) {
+		ecm_ipa_ctx->netif_rx_function = netif_receive_skb;
+		ECM_IPA_DEBUG("LAN RX NAPI enabled = True");
+	} else {
+		ecm_ipa_ctx->netif_rx_function = netif_rx_ni;
+		ECM_IPA_DEBUG("LAN RX NAPI enabled = False");
+	}
 	ECM_IPA_DEBUG("internal data structures were initialized\n");
 
 	if (!params->device_ready_notify)
@@ -655,7 +664,7 @@ static void ecm_ipa_packet_receive_notify
 		return;
 	}
 
-	if (unlikely(evt != IPA_RECEIVE))	{
+	if (unlikely(evt != IPA_RECEIVE)) {
 		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
 		return;
 	}
@@ -663,9 +672,9 @@ static void ecm_ipa_packet_receive_notify
 	skb->dev = ecm_ipa_ctx->net;
 	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
 
-	result = netif_rx(skb);
+	result = ecm_ipa_ctx->netif_rx_function(skb);
 	if (unlikely(result))
-		ECM_IPA_ERROR("fail on netif_rx\n");
+		ECM_IPA_ERROR("fail on netif_rx_function\n");
 	ecm_ipa_ctx->net->stats.rx_packets++;
 	ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
 }
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index ab096de..192bd26 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2641,7 +2641,7 @@ static void ipa3_usb_exit(void)
  * @note Cannot be called from atomic context
  *
  */
-int ipa3_get_usb_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 {
 	int i;
 
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index 90082fc..0c8bb24 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -760,3 +760,15 @@ int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return ipa_get_wdi_stats(stats);
 }
 EXPORT_SYMBOL(ipa_wdi_get_stats);
+
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return ipa_uc_bw_monitor(info);
+}
+EXPORT_SYMBOL(ipa_wdi_bw_monitor);
+
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return ipa_set_wlan_tx_info(info);
+}
+EXPORT_SYMBOL(ipa_wdi_sw_stats);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
index e23e5ff..500ab5b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
@@ -173,6 +173,10 @@ int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
 
 	out->is_uc_ready = inout.is_uC_ready;
 
+	out->lan_rx_napi_enable = ipa_get_lan_rx_napi();
+	IPA_WIGIG_DBG("LAN RX NAPI enabled = %s\n",
+				out->lan_rx_napi_enable ? "True" : "False");
+
 	IPA_WIGIG_DBG("exit\n");
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index 3c2f3acf..5ef6ee4 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -183,6 +183,7 @@ enum rndis_ipa_operation {
  * @state_lock: used to protect the state variable.
  * @pm_hdl: handle for IPA PM framework
  * @is_vlan_mode: should driver work in vlan mode?
+ * @netif_rx_function: holds the correct network stack API, needed for NAPI
  */
 struct rndis_ipa_dev {
 	struct net_device *net;
@@ -212,6 +213,7 @@ struct rndis_ipa_dev {
 	spinlock_t state_lock; /* Spinlock for the state variable.*/
 	u32 pm_hdl;
 	bool is_vlan_mode;
+	int (*netif_rx_function)(struct sk_buff *skb);
 };
 
 /**
@@ -623,6 +625,14 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
 		("netdev:%s registration succeeded, index=%d\n",
 		net->name, net->ifindex);
 
+	if (ipa_get_lan_rx_napi()) {
+		rndis_ipa_ctx->netif_rx_function = netif_receive_skb;
+		RNDIS_IPA_DEBUG("LAN RX NAPI enabled = True");
+	} else {
+		rndis_ipa_ctx->netif_rx_function = netif_rx_ni;
+		RNDIS_IPA_DEBUG("LAN RX NAPI enabled = False");
+	}
+
 	rndis_ipa = rndis_ipa_ctx;
 	params->ipa_rx_notify = rndis_ipa_packet_receive_notify;
 	params->ipa_tx_notify = rndis_ipa_tx_complete_notify;
@@ -1139,9 +1149,9 @@ static void rndis_ipa_packet_receive_notify(
 	}
 
 	trace_rndis_netif_ni(skb->protocol);
-	result = netif_rx_ni(skb);
+	result = rndis_ipa_ctx->netif_rx_function(skb);
 	if (unlikely(result))
-		RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
+		RNDIS_IPA_ERROR("fail on netif_rx_function\n");
 	rndis_ipa_ctx->net->stats.rx_packets++;
 	rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7bc80ac..0d9a2c7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -34,6 +34,7 @@
 #include <asm/cacheflush.h>
 #include <linux/soc/qcom/smem_state.h>
 #include <linux/of_irq.h>
+#include <linux/ctype.h>
 
 #ifdef CONFIG_ARM64
 
@@ -132,6 +133,7 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
 static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
 static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
 static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg);
 
 static struct ipa3_plat_drv_res ipa3_res = {0, };
 
@@ -1308,7 +1310,7 @@ static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg)
 			((struct ipa_ioc_mdfy_flt_rule_v2 *)
 			header)->rule_mdfy_size);
 	/* modify the rule pointer to the kernel pointer */
-	((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
+	((struct ipa_ioc_mdfy_flt_rule_v2 *)header)->rules =
 		(u64)kptr;
 	if (ipa3_mdfy_flt_rule_v2
 		((struct ipa_ioc_mdfy_flt_rule_v2 *)header)) {
@@ -1480,6 +1482,43 @@ static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
 	return retval;
 }
 
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg)
+{
+	u8 header[128] = { 0 };
+	uint8_t value;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_fnr_index_info))) {
+		IPAERR_RL("copy_from_user fails\n");
+		return -EFAULT;
+	}
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->hw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("hw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+
+	ipa3_ctx->fnr_info.hw_counter_offset = value;
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->sw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("sw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+	ipa3_ctx->fnr_info.sw_counter_offset = value;
+	/* reset when ipacm-cleanup */
+	ipa3_ctx->fnr_info.valid = true;
+	IPADBG("fnr_info hw=%d, hw=%d\n",
+		ipa3_ctx->fnr_info.hw_counter_offset,
+		ipa3_ctx->fnr_info.sw_counter_offset);
+	return 0;
+}
+
 static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int retval = 0;
@@ -2630,10 +2669,22 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		break;
 
 	case IPA_IOC_FNR_COUNTER_ALLOC:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
 		retval = ipa3_ioctl_fnr_counter_alloc(arg);
 		break;
 
 	case IPA_IOC_FNR_COUNTER_DEALLOC:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				 ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
 		hdl = (int)arg;
 		if (hdl < 0) {
 			IPAERR("IPA_FNR_COUNTER_DEALLOC failed: hdl %d\n",
@@ -2645,9 +2696,25 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		break;
 
 	case IPA_IOC_FNR_COUNTER_QUERY:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
 		retval = ipa3_ioctl_fnr_counter_query(arg);
 		break;
 
+	case IPA_IOC_SET_FNR_COUNTER_INFO:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_ioctl_fnr_counter_set(arg);
+		break;
+
 	case IPA_IOC_WIGIG_FST_SWITCH:
 		IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
 		if (copy_from_user(&fst_switch, (const void __user *)arg,
@@ -3033,11 +3100,14 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
 	int retval = 0;
 	int pipe_idx;
 	int flt_idx = 0;
-	int num_cmds = 0;
+	int num_cmds = 0, count = 0;
 	int index;
 	u32 lcl_addr_mem_part;
 	u32 lcl_hdr_sz;
 	struct ipa_mem_buffer mem;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int coal_ep = IPA_EP_NOT_ALLOCATED;
 
 	IPADBG("Entry\n");
 
@@ -3055,13 +3125,13 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
 		return retval;
 	}
 
-	/* Up to filtering pipes we have filtering tables */
-	desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
+	/* Up to filtering pipes we have filtering tables + 1 for coal close */
+	desc = kcalloc(ipa3_ctx->ep_flt_num + 1, sizeof(struct ipa3_desc),
 		GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 
-	cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
+	cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num + 1,
 		sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
 	if (!cmd_pyld) {
 		retval = -ENOMEM;
@@ -3093,6 +3163,28 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
 		goto free_cmd_pyld;
 	}
 
+	coal_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (coal_ep != IPA_EP_NOT_ALLOCATED) {
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(coal_ep, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_empty_img;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
 	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
 		if (!ipa_is_ep_support_flt(pipe_idx))
 			continue;
@@ -3104,7 +3196,12 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
 		if (!ipa3_ctx->ep[pipe_idx].valid ||
 		    ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
 
-			if (num_cmds >= ipa3_ctx->ep_flt_num) {
+			/*
+			 * When coal pipe is valid send close coalescing frame
+			 * command and increment the ep_flt_num accordingly.
+			 */
+			count = (coal_ep != IPA_EP_NOT_ALLOCATED) ? 1 : 0;
+			if (num_cmds >= (ipa3_ctx->ep_flt_num + count)) {
 				IPAERR("number of commands is out of range\n");
 				retval = -ENOBUFS;
 				goto free_empty_img;
@@ -3158,13 +3255,17 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
 {
 	struct ipa3_desc *desc;
 	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
-	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
 	int retval = 0;
+	int num_cmds = 0;
 	u32 modem_rt_index_lo;
 	u32 modem_rt_index_hi;
 	u32 lcl_addr_mem_part;
 	u32 lcl_hdr_sz;
 	struct ipa_mem_buffer mem;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
 
 	IPADBG("Entry\n");
 
@@ -3212,12 +3313,40 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
 		return -ENOMEM;
 	}
 
-	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	desc = kcalloc(2, sizeof(struct ipa3_desc), GFP_KERNEL);
 	if (!desc) {
-		IPAERR("failed to allocate memory\n");
+		retval = -ENOMEM;
 		goto free_empty_img;
 	}
 
+	cmd_pyld = kcalloc(2, sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_cmd_pyld;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
 	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -3226,23 +3355,27 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
 	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		lcl_addr_mem_part +
 		modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
 			IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmds]) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		retval = -ENOMEM;
-		goto free_desc;
+		goto free_cmd_pyld;
 	}
-	ipa3_init_imm_cmd_desc(desc, cmd_pyld);
+	ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
 
 	IPADBG("Sending 1 descriptor for rt tbl clearing\n");
-	retval = ipa3_send_cmd(1, desc);
+	retval = ipa3_send_cmd(num_cmds, desc);
 	if (retval) {
 		IPAERR("failed to send immediate command (err %d)\n", retval);
 		retval = -EFAULT;
 	}
 
-	ipahal_destroy_imm_cmd(cmd_pyld);
+free_cmd_pyld:
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+	kfree(cmd_pyld);
 free_desc:
 	kfree(desc);
 free_empty_img:
@@ -3253,11 +3386,14 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
 static int ipa3_q6_clean_q6_tables(void)
 {
 	struct ipa3_desc *desc;
-	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
 	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
 	int retval = 0;
+	int num_cmds = 0;
 	struct ipahal_reg_fltrt_hash_flush flush;
 	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
 
 	IPADBG("Entry\n");
 
@@ -3303,10 +3439,38 @@ static int ipa3_q6_clean_q6_tables(void)
 	if (ipa3_ctx->ipa_fltrt_not_hashable)
 		return retval;
 	/* Flush rules cache */
-	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	desc = kcalloc(2, sizeof(struct ipa3_desc), GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 
+	cmd_pyld = kcalloc(2, sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		retval = -ENOMEM;
+		goto bail_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_cmd_pyld;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
 	flush.v4_flt = true;
 	flush.v4_rt = true;
 	flush.v6_flt = true;
@@ -3317,24 +3481,27 @@ static int ipa3_q6_clean_q6_tables(void)
 	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
 	reg_write_cmd.value = valmask.val;
 	reg_write_cmd.value_mask = valmask.mask;
-	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
-		&reg_write_cmd, false);
-	if (!cmd_pyld) {
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[num_cmds]) {
 		IPAERR("fail construct register_write imm cmd\n");
 		retval = -EFAULT;
-		goto bail_desc;
+		goto free_cmd_pyld;
 	}
-	ipa3_init_imm_cmd_desc(desc, cmd_pyld);
+	ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
 
 	IPADBG("Sending 1 descriptor for tbls flush\n");
-	retval = ipa3_send_cmd(1, desc);
+	retval = ipa3_send_cmd(num_cmds, desc);
 	if (retval) {
 		IPAERR("failed to send immediate command (err %d)\n", retval);
 		retval = -EFAULT;
 	}
 
-	ipahal_destroy_imm_cmd(cmd_pyld);
-
+free_cmd_pyld:
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+	kfree(cmd_pyld);
 bail_desc:
 	kfree(desc);
 	IPADBG("Done - retval = %d\n", retval);
@@ -3351,12 +3518,39 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 	struct ipahal_imm_cmd_register_write reg_write;
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
 	int retval;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
 
-	desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
+	desc = kcalloc(ipa3_ctx->ipa_num_pipes + 1, sizeof(struct ipa3_desc),
 			GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ipa_assert();
+			return -ENOMEM;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
+		desc[num_descs].callback = ipa3_destroy_imm;
+		desc[num_descs].user1 = cmd_pyld;
+		++num_descs;
+	}
+
 	/* Set the exception path to AP */
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		ep_idx = ipa3_get_ep_mapping(client_idx);
@@ -3684,11 +3878,17 @@ int _ipa_init_sram_v3(void)
  */
 int _ipa_init_hdr_v3_0(void)
 {
-	struct ipa3_desc desc;
+	struct ipa3_desc hdr_init_desc;
+	struct ipa3_desc dma_cmd_desc[2];
 	struct ipa_mem_buffer mem;
 	struct ipahal_imm_cmd_hdr_init_local cmd = {0};
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_pyld *hdr_init_cmd_payload;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
 	struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int num_cmds = 0;
+	int i;
 
 	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
 	mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
@@ -3702,30 +3902,52 @@ int _ipa_init_hdr_v3_0(void)
 	cmd.size_hdr_table = mem.size;
 	cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
 		IPA_MEM_PART(modem_hdr_ofst);
-	cmd_pyld = ipahal_construct_imm_cmd(
+	hdr_init_cmd_payload = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
-	if (!cmd_pyld) {
+	if (!hdr_init_cmd_payload) {
 		IPAERR("fail to construct hdr_init_local imm cmd\n");
 		dma_free_coherent(ipa3_ctx->pdev,
 			mem.size, mem.base,
 			mem.phys_base);
 		return -EFAULT;
 	}
-	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	ipa3_init_imm_cmd_desc(&hdr_init_desc, hdr_init_cmd_payload);
 	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
 
-	if (ipa3_send_cmd(1, &desc)) {
+	if (ipa3_send_cmd(1, &hdr_init_desc)) {
 		IPAERR("fail to send immediate command\n");
-		ipahal_destroy_imm_cmd(cmd_pyld);
+		ipahal_destroy_imm_cmd(hdr_init_cmd_payload);
 		dma_free_coherent(ipa3_ctx->pdev,
 			mem.size, mem.base,
 			mem.phys_base);
 		return -EFAULT;
 	}
 
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	ipahal_destroy_imm_cmd(hdr_init_cmd_payload);
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			return -ENOMEM;
+		}
+		ipa3_init_imm_cmd_desc(&dma_cmd_desc[num_cmds],
+			cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
 	mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
 		IPA_MEM_PART(apps_hdr_proc_ctx_size);
 	mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
@@ -3742,28 +3964,31 @@ int _ipa_init_hdr_v3_0(void)
 	dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
 	dma_cmd.size = mem.size;
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmds]) {
 		IPAERR("fail to construct dma_shared_mem imm\n");
 		dma_free_coherent(ipa3_ctx->pdev,
 			mem.size, mem.base,
 			mem.phys_base);
 		return -ENOMEM;
 	}
-	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	ipa3_init_imm_cmd_desc(&dma_cmd_desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
 	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
 
-	if (ipa3_send_cmd(1, &desc)) {
+	if (ipa3_send_cmd(num_cmds, dma_cmd_desc)) {
 		IPAERR("fail to send immediate command\n");
-		ipahal_destroy_imm_cmd(cmd_pyld);
+		for (i = 0; i < num_cmds; i++)
+			ipahal_destroy_imm_cmd(cmd_pyld[i]);
 		dma_free_coherent(ipa3_ctx->pdev,
 			mem.size,
 			mem.base,
 			mem.phys_base);
 		return -EBUSY;
 	}
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 
 	ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
 
@@ -4202,6 +4427,8 @@ static int ipa3_setup_apps_pipes(void)
 	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
 	sys_in.notify = ipa3_lan_rx_cb;
 	sys_in.priv = NULL;
+	if (ipa3_ctx->lan_rx_napi_enable)
+		sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
 	sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
 	sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
 	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
@@ -4796,6 +5023,18 @@ static void __ipa3_dec_client_disable_clks(void)
 	if (ret)
 		goto bail;
 
+	/* Send force close coalsecing frame command in LPM mode before taking
+	 * mutex lock and otherwise observing race condition.
+	 */
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
+		!ipa3_ctx->tag_process_before_gating) {
+		ipa3_force_close_coal();
+		/* While sending force close command setting
+		 * tag process as true to make configure to
+		 * original state
+		 */
+		ipa3_ctx->tag_process_before_gating = false;
+	}
 	/* seems like this is the only client holding the clocks */
 	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
 	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
@@ -5729,7 +5968,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	mutex_lock(&ipa3_ctx->lock);
 	ipa3_ctx->ipa_initialization_complete = true;
 	mutex_unlock(&ipa3_ctx->lock);
-
+	if (ipa3_ctx->lan_rx_napi_enable)
+		napi_enable(&ipa3_ctx->napi_lan_rx);
 	ipa3_trigger_ipa_ready_cbs();
 	complete_all(&ipa3_ctx->init_completion_obj);
 	pr_info("IPA driver initialization was successful.\n");
@@ -5902,6 +6142,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
 	unsigned long missing;
 
 	char dbg_buff[32] = { 0 };
+	int i = 0;
 
 	if (count >= sizeof(dbg_buff))
 		return -EFAULT;
@@ -5922,6 +6163,17 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
 	if (ipa3_is_ready())
 		return count;
 
+	/*Ignore empty ipa_config file*/
+	for (i = 0 ; i < count ; ++i) {
+		if (!isspace(dbg_buff[i]))
+			break;
+	}
+
+	if (i == count) {
+		IPADBG("Empty ipa_config file\n");
+		return count;
+	}
+
 	/* Check MHI configuration on MDM devices */
 	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) {
 
@@ -6110,6 +6362,15 @@ static bool ipa_is_mem_dump_allowed(void)
 	return (desc.ret[0] == 1);
 }
 
+static int ipa3_lan_poll(struct napi_struct *napi, int budget)
+{
+	int rcvd_pkts = 0;
+
+	rcvd_pkts = ipa3_lan_rx_poll(ipa3_ctx->clnt_hdl_data_in,
+							NAPI_WEIGHT);
+	return rcvd_pkts;
+}
+
 /**
  * ipa3_pre_init() - Initialize the IPA Driver.
  * This part contains all initialization which doesn't require IPA HW, such
@@ -6172,7 +6433,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	for (i = 0; i < IPA_HW_PROTOCOL_MAX; i++) {
 		ipa3_ctx->gsi_info[i].protocol = i;
 		/* initialize all to be not started */
-		for (j = 0; j < MAX_CH_STATS_SUPPORTED; j++)
+		for (j = 0; j < IPA_MAX_CH_STATS_SUPPORTED; j++)
 			ipa3_ctx->gsi_info[i].ch_id_info[j].ch_id =
 				0xFF;
 	}
@@ -6192,6 +6453,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
 	ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
 	ipa3_ctx->ipa_wan_skb_page = resource_p->ipa_wan_skb_page;
+	ipa3_ctx->stats.page_recycle_stats[0].total_replenished = 0;
+	ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc = 0;
+	ipa3_ctx->stats.page_recycle_stats[1].total_replenished = 0;
+	ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc = 0;
 	ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
 	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
 	ipa3_ctx->ee = resource_p->ee;
@@ -6212,6 +6477,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		resource_p->secure_debug_check_action;
 	ipa3_ctx->do_ram_collection_on_crash =
 		resource_p->do_ram_collection_on_crash;
+	ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
 
 	if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
 		if (ipa_is_mem_dump_allowed())
@@ -6620,6 +6886,14 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	/* proxy vote for modem is added in ipa3_post_init() phase */
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
 		ipa3_proxy_clk_unvote();
+
+	/* Create the dummy netdev for LAN RX NAPI*/
+	if (ipa3_ctx->lan_rx_napi_enable) {
+		init_dummy_netdev(&ipa3_ctx->lan_ndev);
+		netif_napi_add(&ipa3_ctx->lan_ndev, &ipa3_ctx->napi_lan_rx,
+			ipa3_lan_poll, NAPI_WEIGHT);
+	}
+
 	return 0;
 fail_cdev_add:
 fail_gsi_pre_fw_load_init:
@@ -6965,6 +7239,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->tethered_flow_control
 		? "True" : "False");
 
+	ipa_drv_res->lan_rx_napi_enable =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,lan-rx-napi");
+	IPADBG(": Enable LAN rx NAPI = %s\n",
+		ipa_drv_res->lan_rx_napi_enable
+		? "True" : "False");
+
 	/* Get IPA wrapper address */
 	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 			"ipa-base");
@@ -7877,6 +8158,11 @@ struct ipa3_context *ipa3_get_ctx(void)
 	return ipa3_ctx;
 }
 
+bool ipa3_get_lan_rx_napi(void)
+{
+	return ipa3_ctx->lan_rx_napi_enable;
+}
+
 static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
 {
 	/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 6b56ab6..f773bfb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1056,7 +1056,8 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
 	if (!stop_in_proc)
 		goto exit;
 
-	if (remove_delay && ep->ep_delay_set) {
+	/* Remove delay only if stop channel success*/
+	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = false;
 		result = ipa3_cfg_ep_ctrl(clnt_hdl,
@@ -1137,7 +1138,7 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
 	if (should_force_clear)
 		ipa3_disable_force_clear(qmi_req_id);
 exit:
-	if (remove_delay && ep->ep_delay_set) {
+	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = false;
 		result = ipa3_cfg_ep_ctrl(clnt_hdl,
@@ -1742,3 +1743,49 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
 
 	return 0;
 }
+
+/**
+ * ipa3_get_aqc_gsi_stats() - Query AQC gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL aqc_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_AQC_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index adb0c99..0bc52f3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -83,6 +83,7 @@ const char *ipa3_hdr_l2_type_name[] = {
 	__stringify(IPA_HDR_L2_NONE),
 	__stringify(IPA_HDR_L2_ETHERNET_II),
 	__stringify(IPA_HDR_L2_802_3),
+	__stringify(IPA_HDR_L2_802_1Q),
 };
 
 const char *ipa3_hdr_proc_type_name[] = {
@@ -93,10 +94,11 @@ const char *ipa3_hdr_proc_type_name[] = {
 	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
 	__stringify(IPA_HDR_PROC_L2TP_HEADER_ADD),
 	__stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII_EX),
 };
 
 static struct dentry *dent;
-static char dbg_buff[IPA_MAX_MSG_LEN];
+static char dbg_buff[IPA_MAX_MSG_LEN + 1];
 static char *active_clients_buf;
 
 static s8 ep_reg_idx;
@@ -440,24 +442,24 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 	int i;
 
 	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK)
-		pr_err("is_pure_ack ");
+		pr_cont("is_pure_ack ");
 
 	if (attrib->attrib_mask & IPA_FLT_TOS)
-		pr_err("tos:%d ", attrib->u.v4.tos);
+		pr_cont("tos:%d ", attrib->u.v4.tos);
 
 	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
-		pr_err("tos_value:%d ", attrib->tos_value);
-		pr_err("tos_mask:%d ", attrib->tos_mask);
+		pr_cont("tos_value:%d ", attrib->tos_value);
+		pr_cont("tos_mask:%d ", attrib->tos_mask);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
-		pr_err("protocol:%d ", attrib->u.v4.protocol);
+		pr_cont("protocol:%d ", attrib->u.v4.protocol);
 
 	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
 		if (ip == IPA_IP_v4) {
 			addr[0] = htonl(attrib->u.v4.src_addr);
 			mask[0] = htonl(attrib->u.v4.src_addr_mask);
-			pr_err(
+			pr_cont(
 					"src_addr:%pI4 src_addr_mask:%pI4 ",
 					addr + 0, mask + 0);
 		} else if (ip == IPA_IP_v6) {
@@ -465,7 +467,7 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 				addr[i] = htonl(attrib->u.v6.src_addr[i]);
 				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
 			}
-			pr_err(
+			pr_cont(
 					   "src_addr:%pI6 src_addr_mask:%pI6 ",
 					   addr + 0, mask + 0);
 		}
@@ -474,7 +476,7 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 		if (ip == IPA_IP_v4) {
 			addr[0] = htonl(attrib->u.v4.dst_addr);
 			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
-			pr_err(
+			pr_cont(
 					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
 					   addr + 0, mask + 0);
 		} else if (ip == IPA_IP_v6) {
@@ -482,81 +484,84 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
 				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
 			}
-			pr_err(
+			pr_cont(
 					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
 					   addr + 0, mask + 0);
 		}
 	}
 	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
-		pr_err("src_port_range:%u %u ",
+		pr_cont("src_port_range:%u %u ",
 				   attrib->src_port_lo,
 			     attrib->src_port_hi);
 	}
 	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
-		pr_err("dst_port_range:%u %u ",
+		pr_cont("dst_port_range:%u %u ",
 				   attrib->dst_port_lo,
 			     attrib->dst_port_hi);
 	}
 	if (attrib->attrib_mask & IPA_FLT_TYPE)
-		pr_err("type:%d ", attrib->type);
+		pr_cont("type:%d ", attrib->type);
 
 	if (attrib->attrib_mask & IPA_FLT_CODE)
-		pr_err("code:%d ", attrib->code);
+		pr_cont("code:%d ", attrib->code);
 
 	if (attrib->attrib_mask & IPA_FLT_SPI)
-		pr_err("spi:%x ", attrib->spi);
+		pr_cont("spi:%x ", attrib->spi);
 
 	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
-		pr_err("src_port:%u ", attrib->src_port);
+		pr_cont("src_port:%u ", attrib->src_port);
 
 	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
-		pr_err("dst_port:%u ", attrib->dst_port);
+		pr_cont("dst_port:%u ", attrib->dst_port);
 
 	if (attrib->attrib_mask & IPA_FLT_TC)
-		pr_err("tc:%d ", attrib->u.v6.tc);
+		pr_cont("tc:%d ", attrib->u.v6.tc);
 
 	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
-		pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+		pr_cont("flow_label:%x ", attrib->u.v6.flow_label);
 
 	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
-		pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+		pr_cont("next_hdr:%d ", attrib->u.v6.next_hdr);
 
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
-		pr_err(
+		pr_cont(
 				   "metadata:%x metadata_mask:%x ",
 				   attrib->meta_data, attrib->meta_data_mask);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
-		pr_err("frg ");
+		pr_cont("frg ");
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
-		pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+		pr_cont("src_mac_addr:%pM ", attrib->src_mac_addr);
 	}
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
-		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+		pr_cont("dst_mac_addr:%pM ", attrib->dst_mac_addr);
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
-		pr_err("ether_type:%x ", attrib->ether_type);
+		pr_cont("ether_type:%x ", attrib->ether_type);
+
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID)
+		pr_cont("vlan_id:%x ", attrib->vlan_id);
 
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN)
-		pr_err("tcp syn ");
+		pr_cont("tcp syn ");
 
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
-		pr_err("tcp syn l2tp ");
+		pr_cont("tcp syn l2tp ");
 
 	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
-		pr_err("l2tp inner ip type: %d ", attrib->type);
+		pr_cont("l2tp inner ip type: %d ", attrib->type);
 
 	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
 		addr[0] = htonl(attrib->u.v4.dst_addr);
 		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
-		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+		pr_cont("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
 	}
 
 	pr_err("\n");
@@ -1189,6 +1194,26 @@ static ssize_t ipa3_read_odlstats(struct file *file, char __user *ubuf,
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
 
+static ssize_t ipa3_read_page_recycle_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"COAL : Total number of packets replenished =%llu\n"
+			"COAL : Number of tmp alloc packets  =%llu\n"
+			"DEF  : Total number of packets replenished =%llu\n"
+			"DEF  : Number of tmp alloc packets  =%llu\n",
+			ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
+			ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
+			ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
+			ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc);
+
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
 static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
@@ -1923,7 +1948,7 @@ static ssize_t ipa3_read_ipahal_regs(struct file *file, char __user *ubuf,
 static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
 		char __user *ubuf, size_t count, loff_t *ppos)
 {
-	struct ipa3_uc_dbg_ring_stats stats;
+	struct ipa_uc_dbg_ring_stats stats;
 	int nbytes;
 	int cnt = 0;
 
@@ -1971,7 +1996,7 @@ static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
 static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
 		char __user *ubuf, size_t count, loff_t *ppos)
 {
-	struct ipa3_uc_dbg_ring_stats stats;
+	struct ipa_uc_dbg_ring_stats stats;
 	int nbytes;
 	int cnt = 0;
 
@@ -2036,6 +2061,7 @@ static ssize_t ipa3_read_11ad_gsi_stats(struct file *file,
 static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
 		char __user *ubuf, size_t count, loff_t *ppos)
 {
+	struct ipa_uc_dbg_ring_stats stats;
 	int nbytes;
 	int cnt = 0;
 
@@ -2045,7 +2071,36 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
 		cnt += nbytes;
 		goto done;
 	}
-	return 0;
+	if (!ipa3_get_aqc_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read AQC GSI stats\n");
+		cnt += nbytes;
+	}
 done:
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
@@ -2053,7 +2108,7 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
 static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 	char __user *ubuf, size_t count, loff_t *ppos)
 {
-	struct ipa3_uc_dbg_ring_stats stats;
+	struct ipa_uc_dbg_ring_stats stats;
 	int nbytes;
 	int cnt = 0;
 
@@ -2125,7 +2180,7 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
 	char __user *ubuf, size_t count, loff_t *ppos)
 {
-	struct ipa3_uc_dbg_ring_stats stats;
+	struct ipa_uc_dbg_ring_stats stats;
 	int nbytes;
 	int cnt = 0;
 
@@ -2375,6 +2430,10 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
 			.read = ipa3_read_odlstats,
 		}
 	}, {
+		"page_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_page_recycle_stats,
+		}
+	}, {
 		"wdi", IPA_READ_ONLY_MODE, NULL, {
 			.read = ipa3_read_wdi,
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index d0ee749..e342024 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -18,6 +18,8 @@
 #define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
 #define IPA_WAN_PAGE_ORDER 3
+#define IPA_LAN_AGGR_PKT_CNT 5
+#define IPA_LAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_LAN_AGGR_PKT_CNT)
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_MIN_SLEEP_RX 1010
@@ -91,6 +93,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_work_func(struct work_struct *work);
 static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
+static void ipa3_wq_page_repl(struct work_struct *work);
 static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys);
 static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(gfp_t flag,
 	bool is_tmp_alloc);
@@ -715,8 +718,10 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
 
 	completed = wait_for_completion_timeout(
 		&comp->comp, msecs_to_jiffies(timeout));
-	if (!completed)
+	if (!completed) {
 		IPADBG("timeout waiting for imm-cmd ACK\n");
+		result = -EBUSY;
+	}
 
 	if (atomic_dec_return(&comp->cnt) == 0)
 		kfree(comp);
@@ -768,6 +773,26 @@ static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
 }
 
 /**
+ * __ipa3_update_curr_poll_state -> update current polling for default wan and
+ *                                  coalescing pipe.
+ * In RSC/RSB enabled cases using common event ring, so both the pipe
+ * polling state should be in sync.
+ */
+void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
+{
+	int ep_idx = IPA_EP_NOT_ALLOCATED;
+
+	if (client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	if (client == IPA_CLIENT_APPS_WAN_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
+	if (ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[ep_idx].sys)
+		atomic_set(&ipa3_ctx->ep[ep_idx].sys->curr_polling_state,
+									state);
+}
+
+/**
  * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  */
 static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
@@ -775,6 +800,8 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
 	int ret;
 
 	atomic_set(&sys->curr_polling_state, 0);
+	__ipa3_update_curr_poll_state(sys->ep->client, 0);
+
 	ipa3_dec_release_wakelock();
 	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
 		GSI_CHAN_MODE_CALLBACK);
@@ -783,8 +810,10 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
 		if (ret == -GSI_STATUS_PENDING_IRQ) {
 			ipa3_inc_acquire_wakelock();
 			atomic_set(&sys->curr_polling_state, 1);
+			__ipa3_update_curr_poll_state(sys->ep->client, 1);
 		} else {
-			IPAERR("Failed to switch to intr mode.\n");
+			IPAERR("Failed to switch to intr mode %d ch_id %d\n",
+			 sys->curr_polling_state, sys->ep->gsi_chan_hdl);
 		}
 	}
 
@@ -893,6 +922,11 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
 				SUSPEND_MAX_SLEEP_RX);
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_COAL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
 		} else
 			IPAERR("Unexpected event %d\n for client %d\n",
 				event, sys->ep->client);
@@ -921,7 +955,7 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
 int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
-	int i, ipa_ep_idx, wan_handle;
+	int i, ipa_ep_idx, wan_handle, coal_ep_id;
 	int result = -EINVAL;
 	struct ipahal_reg_coal_qmap_cfg qmap_cfg;
 	struct ipahal_reg_coal_evict_lru evict_lru;
@@ -951,6 +985,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		goto fail_gen;
 	}
 
+	coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 	/* save the input config parameters */
 	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
 		ep_cfg_copy = sys_in->ipa_ep_cfg;
@@ -1002,7 +1037,12 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
 
 		/* create IPA PM resources for handling polling mode */
-		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+		if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
+			coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[coal_ep_id].valid == 1) {
+			/* Use coalescing pipe PM handle for default pipe also*/
+			ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
+		} else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
 			pm_reg.name = ipa_clients_strings[sys_in->client];
 			pm_reg.callback = ipa_pm_sys_pipe_cb;
 			pm_reg.user_data = ep->sys;
@@ -1108,21 +1148,40 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	}
 
 	if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
-		ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
-		if (!ep->sys->repl) {
+		ep->sys->page_recycle_repl = kzalloc(
+			sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+		if (!ep->sys->page_recycle_repl) {
 			IPAERR("failed to alloc repl for client %d\n",
 					sys_in->client);
 			result = -ENOMEM;
 			goto fail_gen2;
 		}
-		atomic_set(&ep->sys->repl->pending, 0);
-		ep->sys->repl->capacity = (ep->sys->rx_pool_sz + 1) * 2;
+		atomic_set(&ep->sys->page_recycle_repl->pending, 0);
+		ep->sys->page_recycle_repl->capacity =
+				(ep->sys->rx_pool_sz + 1) * 2;
 
+		ep->sys->page_recycle_repl->cache =
+				kcalloc(ep->sys->page_recycle_repl->capacity,
+				sizeof(void *), GFP_KERNEL);
+		atomic_set(&ep->sys->page_recycle_repl->head_idx, 0);
+		atomic_set(&ep->sys->page_recycle_repl->tail_idx, 0);
+		ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
+		if (!ep->sys->repl) {
+			IPAERR("failed to alloc repl for client %d\n",
+				   sys_in->client);
+			result = -ENOMEM;
+			goto fail_page_recycle_repl;
+		}
+		ep->sys->repl->capacity = (ep->sys->rx_pool_sz + 1);
+
+		atomic_set(&ep->sys->repl->pending, 0);
 		ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
 				sizeof(void *), GFP_KERNEL);
 		atomic_set(&ep->sys->repl->head_idx, 0);
 		atomic_set(&ep->sys->repl->tail_idx, 0);
+
 		ipa3_replenish_rx_page_cache(ep->sys);
+		ipa3_wq_page_repl(&ep->sys->repl_work);
 	}
 
 	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
@@ -1195,6 +1254,11 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
 	ep->sys->repl->capacity = 0;
 	kfree(ep->sys->repl);
+fail_page_recycle_repl:
+	if (ep->sys->page_recycle_repl) {
+		ep->sys->page_recycle_repl->capacity = 0;
+		kfree(ep->sys->page_recycle_repl);
+	}
 fail_gen2:
 	ipa_pm_deregister(ep->sys->pm_hdl);
 fail_pm:
@@ -1801,13 +1865,12 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
 	flag |= __GFP_NOMEMALLOC;
 	rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
 		flag);
-	if (!rx_pkt)
+	if (unlikely(!rx_pkt))
 		return NULL;
 	rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
 	rx_pkt->page_data.page = __dev_alloc_pages(flag,
 		IPA_WAN_PAGE_ORDER);
-
-	if (!rx_pkt->page_data.page)
+	if (unlikely(!rx_pkt->page_data.page))
 		goto fail_page_alloc;
 
 	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
@@ -1838,7 +1901,7 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
 	struct ipa3_rx_pkt_wrapper *rx_pkt;
 	u32 curr;
 
-	for (curr = 0; curr < sys->repl->capacity; curr++) {
+	for (curr = 0; curr < sys->page_recycle_repl->capacity; curr++) {
 		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
 		if (unlikely(!rx_pkt)) {
 			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
@@ -1846,13 +1909,75 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
 			break;
 		}
 		rx_pkt->sys = sys;
-		sys->repl->cache[curr] = rx_pkt;
+		sys->page_recycle_repl->cache[curr] = rx_pkt;
 	}
 
 	return;
 
 }
 
+static void ipa3_wq_page_repl(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa3_sys_context, repl_work);
+	atomic_set(&sys->repl->pending, 0);
+	curr = atomic_read(&sys->repl->tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl->capacity;
+		if (unlikely(next == atomic_read(&sys->repl->head_idx)))
+			goto fail_kmem_cache_alloc;
+		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, true);
+		if (unlikely(!rx_pkt)) {
+			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
+			break;
+		}
+		rx_pkt->sys = sys;
+		sys->repl->cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl->tail_idx, next);
+	}
+
+	return;
+
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl->tail_idx) ==
+			atomic_read(&sys->repl->head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		pr_err_ratelimited("%s sys=%pK wq_repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+
+}
+
+static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
+{
+	int tail, head, avail;
+
+	if (atomic_read(&sys->repl->pending))
+		return;
+
+	tail = atomic_read(&sys->repl->tail_idx);
+	head = atomic_read(&sys->repl->head_idx);
+	avail = (tail - head) % sys->repl->capacity;
+
+	if (avail < sys->repl->capacity / 4) {
+		atomic_set(&sys->repl->pending, 1);
+		queue_work(sys->repl_wq, &sys->repl_work);
+	}
+}
+
+
 static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt;
@@ -1860,37 +1985,41 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 	int rx_len_cached = 0;
 	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
 	u32 curr;
+	u32 curr_wq;
 	int idx = 0;
 	struct page *cur_page;
-	gfp_t flag;
+	u32 stats_i = 0;
 
 	/* start replenish only when buffers go lower than the threshold */
 	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
 		return;
+	stats_i = (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) ? 0 : 1;
 
-	flag = gfp_any();
 	spin_lock_bh(&sys->spinlock);
 	rx_len_cached = sys->len;
-	curr = atomic_read(&sys->repl->head_idx);
+	curr = atomic_read(&sys->page_recycle_repl->head_idx);
+	curr_wq = atomic_read(&sys->repl->head_idx);
 
 	while (rx_len_cached < sys->rx_pool_sz) {
-		cur_page = sys->repl->cache[curr]->page_data.page;
+		cur_page = sys->page_recycle_repl->cache[curr]->page_data.page;
 		/* Found an idle page that can be used */
 		if (page_ref_count(cur_page) == 1) {
 			page_ref_inc(cur_page);
-			rx_pkt = sys->repl->cache[curr];
-			curr = (++curr == sys->repl->capacity) ? 0 : curr;
+			rx_pkt = sys->page_recycle_repl->cache[curr];
+			curr = (++curr == sys->page_recycle_repl->capacity) ?
+								0 : curr;
 		} else {
 			/*
 			 * Could not find idle page at curr index.
 			 * Allocate a new one.
 			 */
-			rx_pkt = ipa3_alloc_rx_pkt_page(flag, true);
-			if (!rx_pkt && flag == GFP_ATOMIC)
+			if (curr_wq == atomic_read(&sys->repl->tail_idx))
 				break;
-			else if (unlikely(!rx_pkt))
-				goto fail_kmem_cache_alloc;
-			rx_pkt->sys = sys;
+			ipa3_ctx->stats.page_recycle_stats[stats_i].tmp_alloc++;
+			rx_pkt = sys->repl->cache[curr_wq];
+			curr_wq = (++curr_wq == sys->repl->capacity) ?
+								 0 : curr_wq;
+			atomic_set(&sys->repl->head_idx, curr_wq);
 		}
 
 		dma_sync_single_for_device(ipa3_ctx->pdev,
@@ -1905,6 +2034,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
 		rx_len_cached++;
 		idx++;
+		ipa3_ctx->stats.page_recycle_stats[stats_i].total_replenished++;
 		/*
 		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
 		 * If this size is reached we need to queue the xfers.
@@ -1927,7 +2057,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 	if (likely(ret == GSI_STATUS_SUCCESS)) {
 		/* ensure write is done before setting head index */
 		mb();
-		atomic_set(&sys->repl->head_idx, curr);
+		atomic_set(&sys->page_recycle_repl->head_idx, curr);
 		sys->len = rx_len_cached;
 	} else {
 		/* we don't expect this will happen */
@@ -1935,15 +2065,21 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 		ipa_assert();
 	}
 	spin_unlock_bh(&sys->spinlock);
+	__trigger_repl_work(sys);
 
-	if (rx_len_cached < sys->rx_pool_sz) {
+	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else
+			WARN_ON(1);
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
-			msecs_to_jiffies(1));
+				msecs_to_jiffies(1));
 	}
+
 	return;
-fail_kmem_cache_alloc:
-	ipa_assert();
-	spin_unlock_bh(&sys->spinlock);
 }
 
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
@@ -2307,23 +2443,6 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
 	}
 }
 
-static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
-{
-	int tail, head, avail;
-
-	if (atomic_read(&sys->repl->pending))
-		return;
-
-	tail = atomic_read(&sys->repl->tail_idx);
-	head = atomic_read(&sys->repl->head_idx);
-	avail = (tail - head) % sys->repl->capacity;
-
-	if (avail < sys->repl->capacity / 4) {
-		atomic_set(&sys->repl->pending, 1);
-		queue_work(sys->repl_wq, &sys->repl_work);
-	}
-}
-
 static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt;
@@ -2449,16 +2568,18 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
 	struct ipa3_sys_context *sys = rx_pkt->sys;
 	int i;
 
-	for (i = 0; i < sys->repl->capacity; i++)
-		if (sys->repl->cache[i] == rx_pkt)
+	for (i = 0; i < sys->page_recycle_repl->capacity; i++)
+		if (sys->page_recycle_repl->cache[i] == rx_pkt)
 			break;
+	if (i < sys->page_recycle_repl->capacity) {
+		page_ref_dec(rx_pkt->page_data.page);
+		sys->page_recycle_repl->cache[i] = NULL;
+	}
 	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
 		rx_pkt->len, DMA_FROM_DEVICE);
 	__free_pages(rx_pkt->page_data.page,
 		IPA_WAN_PAGE_ORDER);
 	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
-	if (i < sys->repl->capacity)
-		sys->repl->cache[i] = NULL;
 }
 
 /**
@@ -2482,54 +2603,74 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->rcycl_list, link) {
 		list_del(&rx_pkt->link);
-		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
-			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr)
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				sys->rx_buff_sz, DMA_FROM_DEVICE);
+		else
+			IPADBG("DMA address already freed\n");
 		sys->free_skb(rx_pkt->data.skb);
 		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
 	spin_unlock_bh(&sys->spinlock);
 
 	if (sys->repl) {
-		if (!ipa3_ctx->ipa_wan_skb_page) {
-			head = atomic_read(&sys->repl->head_idx);
-			tail = atomic_read(&sys->repl->tail_idx);
-			while (head != tail) {
-				rx_pkt = sys->repl->cache[head];
+		head = atomic_read(&sys->repl->head_idx);
+		tail = atomic_read(&sys->repl->tail_idx);
+		while (head != tail) {
+			rx_pkt = sys->repl->cache[head];
+			if (!ipa3_ctx->ipa_wan_skb_page) {
 				dma_unmap_single(ipa3_ctx->pdev,
 					rx_pkt->data.dma_addr,
 					sys->rx_buff_sz,
 					DMA_FROM_DEVICE);
 				sys->free_skb(rx_pkt->data.skb);
-				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
-					rx_pkt);
-				head = (head + 1) % sys->repl->capacity;
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					rx_pkt->page_data.dma_addr,
+					rx_pkt->len,
+					DMA_FROM_DEVICE);
+				__free_pages(rx_pkt->page_data.page,
+					IPA_WAN_PAGE_ORDER);
 			}
-		} else {
-			for (i = 0; i < sys->repl->capacity; i++) {
-				rx_pkt = sys->repl->cache[i];
-				if (rx_pkt) {
-					dma_unmap_page(ipa3_ctx->pdev,
-						rx_pkt->page_data.dma_addr,
-						rx_pkt->len,
-						DMA_FROM_DEVICE);
-					__free_pages(rx_pkt->page_data.page,
-						IPA_WAN_PAGE_ORDER);
-					kmem_cache_free(
-						ipa3_ctx->rx_pkt_wrapper_cache,
-						rx_pkt);
-				}
-			}
+			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+				rx_pkt);
+			head = (head + 1) % sys->repl->capacity;
 		}
+
 		kfree(sys->repl->cache);
 		kfree(sys->repl);
 	}
+	if (sys->page_recycle_repl) {
+		for (i = 0; i < sys->page_recycle_repl->capacity; i++) {
+			rx_pkt = sys->page_recycle_repl->cache[i];
+			if (rx_pkt) {
+				dma_unmap_page(ipa3_ctx->pdev,
+					rx_pkt->page_data.dma_addr,
+					rx_pkt->len,
+					DMA_FROM_DEVICE);
+				__free_pages(rx_pkt->page_data.page,
+					IPA_WAN_PAGE_ORDER);
+				kmem_cache_free(
+					ipa3_ctx->rx_pkt_wrapper_cache,
+					rx_pkt);
+			}
+		}
+		kfree(sys->page_recycle_repl->cache);
+		kfree(sys->page_recycle_repl);
+	}
 }
 
 static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
 {
 	struct sk_buff *skb2 = NULL;
 
-	skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+	if (!ipa3_ctx->lan_rx_napi_enable)
+		skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
+					GFP_KERNEL);
+	else
+		skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
+					GFP_ATOMIC);
+
 	if (likely(skb2)) {
 		/* Set the data pointer */
 		skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
@@ -2583,8 +2724,12 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 				sys->len_pad);
 		if (sys->len_rem <= skb->len) {
 			if (sys->prev_skb) {
-				skb2 = skb_copy_expand(sys->prev_skb, 0,
-						sys->len_rem, GFP_KERNEL);
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					skb2 = skb_copy_expand(sys->prev_skb,
+						0, sys->len_rem, GFP_KERNEL);
+				else
+					skb2 = skb_copy_expand(sys->prev_skb,
+						0, sys->len_rem, GFP_ATOMIC);
 				if (likely(skb2)) {
 					memcpy(skb_put(skb2, sys->len_rem),
 						skb->data, sys->len_rem);
@@ -2610,8 +2755,12 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 			sys->len_pad = 0;
 		} else {
 			if (sys->prev_skb) {
-				skb2 = skb_copy_expand(sys->prev_skb, 0,
-					skb->len, GFP_KERNEL);
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					skb2 = skb_copy_expand(sys->prev_skb, 0,
+						skb->len, GFP_KERNEL);
+				else
+					skb2 = skb_copy_expand(sys->prev_skb, 0,
+						skb->len, GFP_ATOMIC);
 				if (likely(skb2)) {
 					memcpy(skb_put(skb2, skb->len),
 						skb->data, skb->len);
@@ -2635,7 +2784,10 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 		if (skb->len < pkt_status_sz) {
 			WARN_ON(sys->prev_skb != NULL);
 			IPADBG_LOW("status straddles buffer\n");
-			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			if (!ipa3_ctx->lan_rx_napi_enable)
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			else
+				sys->prev_skb = skb_copy(skb, GFP_ATOMIC);
 			sys->len_partial = skb->len;
 			goto out;
 		}
@@ -2652,25 +2804,26 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 				sys->status_stat->curr = 0;
 		}
 
-		if ((status.status_opcode !=
-			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
-			(status.status_opcode !=
-			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
-			(status.status_opcode !=
-			IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
-			(status.status_opcode !=
-			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
-			IPAERR("unsupported opcode(%d)\n",
+		switch (status.status_opcode) {
+		case IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS:
+		case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
+			break;
+		default:
+			IPAERR_RL("unsupported opcode(%d)\n",
 				status.status_opcode);
 			skb_pull(skb, pkt_status_sz);
 			continue;
 		}
+
 		IPA_STATS_EXCP_CNT(status.exception,
 				ipa3_ctx->stats.rx_excp_pkts);
 		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
 			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
-			IPAERR("status fields invalid\n");
-			IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+			IPAERR_RL("status fields invalid\n");
+			IPAERR_RL("STATUS opcode=%d src=%d dst=%d len=%d\n",
 				status.status_opcode, status.endp_src_idx,
 				status.endp_dest_idx, status.pkt_len);
 			WARN_ON(1);
@@ -2727,14 +2880,18 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 				IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
 				WARN_ON(sys->prev_skb != NULL);
 				IPADBG_LOW("Ins header in next buffer\n");
-				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					sys->prev_skb = skb_copy(skb,
+						GFP_KERNEL);
+				else
+					sys->prev_skb = skb_copy(skb,
+						GFP_ATOMIC);
 				sys->len_partial = skb->len;
 				goto out;
 			}
 
 			pad_len_byte = ((status.pkt_len + 3) & ~3) -
 					status.pkt_len;
-
 			len = status.pkt_len + pad_len_byte;
 			IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
 					status.pkt_len, len);
@@ -3053,13 +3210,13 @@ static void ipa3_free_skb_rx(struct sk_buff *skb)
 void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 {
 	struct sk_buff *rx_skb = (struct sk_buff *)data;
-	struct ipahal_pkt_status status;
+	struct ipahal_pkt_status_thin status;
 	struct ipa3_ep_context *ep;
 	unsigned int src_pipe;
 	u32 metadata;
 	u8 ucp;
 
-	ipahal_pkt_status_parse(rx_skb->data, &status);
+	ipahal_pkt_status_parse_thin(rx_skb->data, &status);
 	src_pipe = status.endp_src_idx;
 	metadata = status.metadata;
 	ucp = status.ucp;
@@ -3568,7 +3725,6 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 				ipa3_switch_to_intr_rx_work_func);
 			INIT_DELAYED_WORK(&sys->replenish_rx_work,
 					ipa3_replenish_rx_work_func);
-			INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
 			atomic_set(&sys->curr_polling_state, 0);
 			sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
 				IPA_GENERIC_RX_BUFF_BASE_SZ);
@@ -3582,6 +3738,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 			in->ipa_ep_cfg.aggr.aggr_time_limit =
 				IPA_GENERIC_AGGR_TIME_LIMIT;
 			if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+				INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
 				sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
 				sys->repl_hdlr =
 					ipa3_replenish_rx_cache_recycle;
@@ -3597,6 +3754,8 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
 				if (ipa3_ctx->ipa_wan_skb_page
 					&& in->napi_obj) {
+					INIT_WORK(&sys->repl_work,
+							ipa3_wq_page_repl);
 					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
 					sys->free_rx_wrapper =
 						ipa3_recycle_rx_page_wrapper;
@@ -3605,6 +3764,8 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 					sys->rx_pool_sz =
 						ipa3_ctx->wan_rx_ring_size;
 				} else {
+					INIT_WORK(&sys->repl_work,
+						ipa3_wq_repl_rx);
 					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
 					sys->free_rx_wrapper =
 						ipa3_free_rx_wrapper;
@@ -3707,7 +3868,16 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		}  else if (in->client == IPA_CLIENT_ODL_DPL_CONS) {
 			IPADBG("assigning policy to ODL client:%d\n",
 				in->client);
-			sys->ep->status.status_en = true;
+			/* Status enabling is needed for DPLv2 with
+			 * IPA versions < 4.5.
+			 * Dont enable ipa_status for APQ, since MDM IPA
+			 * has IPA >= 4.5 with DPLv3.
+			 */
+			if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+				ipa3_is_mhip_offload_enabled())
+				sys->ep->status.status_en = false;
+			else
+				sys->ep->status.status_en = true;
 			sys->policy = IPA_POLICY_INTR_POLL_MODE;
 			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
 			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
@@ -4157,6 +4327,8 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
 	bool clk_off;
 
 	atomic_set(&sys->curr_polling_state, 1);
+	__ipa3_update_curr_poll_state(sys->ep->client, 1);
+
 	ipa3_inc_acquire_wakelock();
 
 	/*
@@ -4317,6 +4489,12 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 		}
 		ipa3_ctx->gsi_evt_comm_ring_rem -= (ring_size);
 		ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+	} else if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		result = ipa_gsi_setup_event_ring(ep,
+				IPA_COMMON_EVENT_RING_SIZE, mem_flag);
+		if (result)
+			goto fail_setup_event_ring;
+
 	} else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
 			coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
 			ipa3_ctx->ep[coale_ep_idx].valid == 1) {
@@ -4665,9 +4843,75 @@ static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
 	*actual_num = idx + poll_num;
 	return ret;
 }
+/**
+ * ipa3_lan_rx_poll() - Poll the LAN rx packets from IPA HW.
+ * This function is executed in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode.
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_lan_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa3_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	int remain_aggr_weight;
+	struct gsi_chan_xfer_notify notify;
+
+	if (unlikely(clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0)) {
+		IPAERR("bad param 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+	remain_aggr_weight = weight / IPA_LAN_AGGR_PKT_CNT;
+	if (unlikely(remain_aggr_weight > IPA_LAN_NAPI_MAX_FRAMES)) {
+		IPAERR("NAPI weight is higher than expected\n");
+		IPAERR("expected %d got %d\n",
+			IPA_LAN_NAPI_MAX_FRAMES, remain_aggr_weight);
+		return cnt;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+start_poll:
+	while (remain_aggr_weight > 0 &&
+			atomic_read(&ep->sys->curr_polling_state)) {
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		ret = ipa_poll_gsi_pkt(ep->sys, &notify);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+			ipa3_dma_memcpy_notify(ep->sys);
+		else if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+			ipa3_wlan_wq_rx_common(ep->sys, &notify);
+		else
+			ipa3_wq_rx_common(ep->sys, &notify);
+
+		remain_aggr_weight--;
+		if (ep->sys->len == 0) {
+			if (remain_aggr_weight == 0)
+				cnt--;
+			break;
+		}
+	}
+	cnt += weight - remain_aggr_weight * IPA_LAN_AGGR_PKT_CNT;
+	if (cnt < weight) {
+		napi_complete(ep->sys->napi_obj);
+		ret = ipa3_rx_switch_to_intr_mode(ep->sys);
+		if (ret == -GSI_STATUS_PENDING_IRQ &&
+				napi_reschedule(ep->sys->napi_obj))
+			goto start_poll;
+
+		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
+	}
+
+	return cnt;
+}
 
 /**
- * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * ipa3_rx_poll() - Poll the WAN rx packets from IPA HW. This
  * function is exectued in the softirq context
  *
  * if input budget is zero, the driver switches back to
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 2b21b85..d8b7bf1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -64,7 +64,7 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
 	}
 
 	gen_params.ipt = ip;
-	if (entry->rt_tbl)
+	if (entry->rt_tbl && (!ipa3_check_idr_if_freed(entry->rt_tbl)))
 		gen_params.rt_tbl_idx = entry->rt_tbl->idx;
 	else
 		gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
@@ -491,6 +491,7 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	u32 tbl_hdr_width;
 	struct ipa3_flt_tbl *tbl;
 	u16 entries;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 
 	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
 	memset(&alloc_params, 0, sizeof(alloc_params));
@@ -567,14 +568,38 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 		goto fail_size_valid;
 	}
 
-	/* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
-	entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+	/* +4: 2 for bodies (hashable and non-hashable), 1 for flushing and 1
+	 * for closing the colaescing frame
+	 */
+	entries = (ipa3_ctx->ep_flt_num) * 2 + 4;
 
 	if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) {
 		rc = -ENOMEM;
 		goto fail_size_valid;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			rc = -ENOMEM;
+			goto fail_reg_write_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	/*
 	 * SRAM memory not allocated to hash tables. Sending
 	 * command to hash tables(filer/routing) operation not supported.
@@ -593,14 +618,14 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 					IPA_FILT_ROUT_HASH_FLUSH);
 		reg_write_cmd.value = valmask.val;
 		reg_write_cmd.value_mask = valmask.mask;
-		cmd_pyld[0] = ipahal_construct_imm_cmd(
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 				IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd,
 							false);
-		if (!cmd_pyld[0]) {
+		if (!cmd_pyld[num_cmd]) {
 			IPAERR(
 			"fail construct register_write imm cmd: IP %d\n", ip);
 			rc = -EFAULT;
-			goto fail_reg_write_construct;
+			goto fail_imm_cmd_construct;
 		}
 		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
 		++num_cmd;
@@ -1815,7 +1840,9 @@ int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
 					entry->ipacm_installed) {
 				list_del(&entry->link);
 				entry->tbl->rule_cnt--;
-				if (entry->rt_tbl)
+				if (entry->rt_tbl &&
+					(!ipa3_check_idr_if_freed(
+						entry->rt_tbl)))
 					entry->rt_tbl->ref_cnt--;
 				/* if rule id was allocated from idr, remove */
 				rule_id = entry->rule_id;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index d21b42b..d64a4bf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -98,7 +98,8 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
 				entry->hdr->phys_base,
 				hdr_base_addr,
 				entry->hdr->offset_entry,
-				entry->l2tp_params,
+				&entry->l2tp_params,
+				&entry->generic_params,
 				ipa3_ctx->use_64_bit_dma_mask);
 		if (ret)
 			return ret;
@@ -120,6 +121,7 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
 	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
 {
 	u64 hdr_base_addr;
+	gfp_t flag = GFP_KERNEL;
 
 	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
 
@@ -128,9 +130,14 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
 
 	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
 
+alloc:
 	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
-			&mem->phys_base, GFP_KERNEL);
+			&mem->phys_base, flag);
 	if (!mem->base) {
+		if (flag == GFP_KERNEL) {
+			flag = GFP_ATOMIC;
+			goto alloc;
+		}
 		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
 		return -ENOMEM;
 	}
@@ -153,7 +160,7 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
  */
 int __ipa_commit_hdr_v3_0(void)
 {
-	struct ipa3_desc desc[2];
+	struct ipa3_desc desc[3];
 	struct ipa_mem_buffer hdr_mem;
 	struct ipa_mem_buffer ctx_mem;
 	struct ipa_mem_buffer aligned_ctx_mem;
@@ -163,12 +170,17 @@ int __ipa_commit_hdr_v3_0(void)
 	struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
 	struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
 	struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
 	int rc = -EFAULT;
+	int i;
+	int num_cmd = 0;
 	u32 proc_ctx_size;
 	u32 proc_ctx_ofst;
 	u32 proc_ctx_size_ddr;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	struct ipahal_reg_valmask valmask;
 
-	memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+	memset(desc, 0, 3 * sizeof(struct ipa3_desc));
 
 	if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
 		IPAERR("fail to generate HDR HW TBL\n");
@@ -181,6 +193,27 @@ int __ipa_commit_hdr_v3_0(void)
 		goto end;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		coal_cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			goto end;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
 	if (ipa3_ctx->hdr_tbl_lcl) {
 		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
 			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
@@ -219,7 +252,8 @@ int __ipa_commit_hdr_v3_0(void)
 			}
 		}
 	}
-	ipa3_init_imm_cmd_desc(&desc[0], hdr_cmd_pyld);
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld);
+	++num_cmd;
 	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
 
 	proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
@@ -273,10 +307,11 @@ int __ipa_commit_hdr_v3_0(void)
 			}
 		}
 	}
-	ipa3_init_imm_cmd_desc(&desc[1], ctx_cmd_pyld);
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], ctx_cmd_pyld);
+	++num_cmd;
 	IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
 
-	if (ipa3_send_cmd(2, desc))
+	if (ipa3_send_cmd(num_cmd, desc))
 		IPAERR("fail to send immediate command\n");
 	else
 		rc = 0;
@@ -310,6 +345,9 @@ int __ipa_commit_hdr_v3_0(void)
 	}
 
 end:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
+
 	if (ctx_cmd_pyld)
 		ipahal_destroy_imm_cmd(ctx_cmd_pyld);
 
@@ -363,24 +401,28 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
 	entry->type = proc_ctx->type;
 	entry->hdr = hdr_entry;
 	entry->l2tp_params = proc_ctx->l2tp_params;
+	entry->generic_params = proc_ctx->generic_params;
 	if (add_ref_hdr)
 		hdr_entry->ref_cnt++;
 	entry->cookie = IPA_PROC_HDR_COOKIE;
 	entry->ipacm_installed = user_only;
 
 	needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
-
-	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
-		bin = IPA_HDR_PROC_CTX_BIN0;
-	} else if (needed_len <=
-			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
-		bin = IPA_HDR_PROC_CTX_BIN1;
-	} else {
+	if ((needed_len < 0) ||
+		((needed_len > ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+			&&
+			(needed_len >
+			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]))) {
 		IPAERR_RL("unexpected needed len %d\n", needed_len);
 		WARN_ON_RATELIMIT_IPA(1);
 		goto bad_len;
 	}
 
+	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+		bin = IPA_HDR_PROC_CTX_BIN0;
+	else
+		bin = IPA_HDR_PROC_CTX_BIN1;
+
 	mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
 		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
@@ -869,7 +911,7 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
 	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
 		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
 				true, user_only)) {
-			IPAERR_RL("failed to add hdr pric ctx %d\n", i);
+			IPAERR_RL("failed to add hdr proc ctx %d\n", i);
 			proc_ctxs->proc_ctx[i].status = -1;
 		} else {
 			proc_ctxs->proc_ctx[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index b9501b0..5e03e1c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -36,7 +36,7 @@ int ipa_hw_stats_init(void)
 		teth_stats_init->prod_mask = (
 			IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD) |
 			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
-		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
 			teth_stats_init->prod_mask |=
 			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
 		else
@@ -57,7 +57,7 @@ int ipa_hw_stats_init(void)
 			teth_stats_init->dst_ep_mask[ep_index] =
 				IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
 
-			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			if (ipa3_ctx->ipa_wdi3_over_gsi)
 				teth_stats_init->dst_ep_mask[ep_index] |=
 				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
 			else
@@ -78,7 +78,7 @@ int ipa_hw_stats_init(void)
 			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) |
 			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
 
-		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
 			teth_stats_init->prod_mask |=
 			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
 		else
@@ -88,6 +88,10 @@ int ipa_hw_stats_init(void)
 		teth_stats_init->prod_mask |=
 			IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG_PROD);
 
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_DL_NLO_DATA_PROD);
+
 		if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) {
 			ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
 			if (ep_index == -1) {
@@ -98,7 +102,36 @@ int ipa_hw_stats_init(void)
 			teth_stats_init->dst_ep_mask[ep_index] =
 			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
 
-			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			if (ipa3_ctx->ipa_wdi3_over_gsi)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
+
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG1_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG2_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG3_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG4_CONS);
+		}
+
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_DL_NLO_DATA_PROD) &&
+			(ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)) {
+			ep_index = ipa3_get_ep_mapping(
+					IPA_CLIENT_Q6_DL_NLO_DATA_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_wdi3_over_gsi)
 				teth_stats_init->dst_ep_mask[ep_index] |=
 				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
 			else
@@ -130,6 +163,10 @@ int ipa_hw_stats_init(void)
 					IPA_CLIENT_Q6_WAN_CONS) |
 				IPA_CLIENT_BIT_32(
 					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
 		else
 			teth_stats_init->dst_ep_mask[ep_index] =
 				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
@@ -148,6 +185,10 @@ int ipa_hw_stats_init(void)
 				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
 				IPA_CLIENT_BIT_32(
 					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
 		else
 			teth_stats_init->dst_ep_mask[ep_index] =
 				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
@@ -166,6 +207,10 @@ int ipa_hw_stats_init(void)
 				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
 				IPA_CLIENT_BIT_32(
 					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
 		else
 			teth_stats_init->dst_ep_mask[ep_index] =
 				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
@@ -185,6 +230,10 @@ int ipa_hw_stats_init(void)
 				IPA_CLIENT_Q6_WAN_CONS) |
 				IPA_CLIENT_BIT_32(
 					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+			(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
 		else
 			teth_stats_init->dst_ep_mask[ep_index] =
 			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
@@ -203,6 +252,25 @@ int ipa_hw_stats_init(void)
 	return ret;
 }
 
+static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld **coal_cmd_pyld)
+{
+	int i;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	reg_write_coal_close.skip_pipeline_clear = false;
+	reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_coal_close.offset = ipahal_get_reg_ofst(
+		IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(i, &valmask);
+	reg_write_coal_close.value = valmask.val;
+	reg_write_coal_close.value_mask = valmask.mask;
+	*coal_cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_coal_close, false);
+}
+
 int ipa_init_quota_stats(u32 pipe_bitmask)
 {
 	struct ipahal_stats_init_pyld *pyld;
@@ -212,9 +280,11 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 	struct ipahal_imm_cmd_pyld *quota_base_pyld;
 	struct ipahal_imm_cmd_register_write quota_mask = {0};
 	struct ipahal_imm_cmd_pyld *quota_mask_pyld;
-	struct ipa3_desc desc[3] = { {0} };
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
 	dma_addr_t dma_address;
 	int ret;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
@@ -248,6 +318,19 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 		goto destroy_init_pyld;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
 	/* setting the registers and init the stats pyld are done atomically */
 	quota_mask.skip_pipeline_clear = false;
 	quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -260,12 +343,13 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 	if (!quota_mask_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto unmap;
+		goto destroy_coal_cmd;
 	}
-	desc[0].opcode = quota_mask_pyld->opcode;
-	desc[0].pyld = quota_mask_pyld->data;
-	desc[0].len = quota_mask_pyld->len;
-	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = quota_mask_pyld->opcode;
+	desc[num_cmd].pyld = quota_mask_pyld->data;
+	desc[num_cmd].len = quota_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
 
 	quota_base.skip_pipeline_clear = false;
 	quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -281,10 +365,11 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 		ret = -ENOMEM;
 		goto destroy_quota_mask;
 	}
-	desc[1].opcode = quota_base_pyld->opcode;
-	desc[1].pyld = quota_base_pyld->data;
-	desc[1].len = quota_base_pyld->len;
-	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = quota_base_pyld->opcode;
+	desc[num_cmd].pyld = quota_base_pyld->data;
+	desc[num_cmd].len = quota_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
 
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
@@ -300,12 +385,13 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 		ret = -ENOMEM;
 		goto destroy_quota_base;
 	}
-	desc[2].opcode = cmd_pyld->opcode;
-	desc[2].pyld = cmd_pyld->data;
-	desc[2].len = cmd_pyld->len;
-	desc[2].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
 
-	ret = ipa3_send_cmd(3, desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -319,6 +405,8 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
 	ipahal_destroy_imm_cmd(quota_base_pyld);
 destroy_quota_mask:
 	ipahal_destroy_imm_cmd(quota_mask_pyld);
+destroy_coal_cmd:
+	ipahal_destroy_imm_cmd(coal_cmd_pyld);
 unmap:
 	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
 destroy_init_pyld:
@@ -333,14 +421,18 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 	struct ipahal_stats_get_offset_quota get_offset = { { 0 } };
 	struct ipahal_stats_offset offset = { 0 };
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
 	struct ipa_mem_buffer mem;
-	struct ipa3_desc desc = { 0 };
+	struct ipa3_desc desc[2];
 	struct ipahal_stats_quota_all *stats;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
 
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
 	get_offset.init = ipa3_ctx->hw_stats.quota.init;
 	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset,
 		&offset);
@@ -364,6 +456,19 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 		return ret;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	cmd.is_read = true;
 	cmd.clear_after_read = true;
 	cmd.skip_pipeline_clear = false;
@@ -372,19 +477,17 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 	cmd.system_addr = mem.phys_base;
 	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		IPA_MEM_PART(stats_quota_ofst) + offset.offset;
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		ret = -ENOMEM;
 		goto free_dma_mem;
 	}
-	desc.opcode = cmd_pyld->opcode;
-	desc.pyld = cmd_pyld->data;
-	desc.len = cmd_pyld->len;
-	desc.type = IPA_IMM_CMD_DESC;
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
 
-	ret = ipa3_send_cmd(1, &desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -434,7 +537,8 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 free_stats:
 	kfree(stats);
 destroy_imm:
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 free_dma_mem:
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 	return ret;
@@ -497,10 +601,13 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 	struct ipahal_imm_cmd_pyld *teth_base_pyld;
 	struct ipahal_imm_cmd_register_write teth_mask = { 0 };
 	struct ipahal_imm_cmd_pyld *teth_mask_pyld;
-	struct ipa3_desc desc[3] = { {0} };
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
 	dma_addr_t dma_address;
 	int ret;
 	int i;
+	int num_cmd = 0;
+
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
@@ -556,6 +663,19 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 		goto destroy_init_pyld;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
 	/* setting the registers and init the stats pyld are done atomically */
 	teth_mask.skip_pipeline_clear = false;
 	teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -568,12 +688,13 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 	if (!teth_mask_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto unmap;
+		goto destroy_coal_cmd;
 	}
-	desc[0].opcode = teth_mask_pyld->opcode;
-	desc[0].pyld = teth_mask_pyld->data;
-	desc[0].len = teth_mask_pyld->len;
-	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = teth_mask_pyld->opcode;
+	desc[num_cmd].pyld = teth_mask_pyld->data;
+	desc[num_cmd].len = teth_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	teth_base.skip_pipeline_clear = false;
 	teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -589,10 +710,11 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 		ret = -ENOMEM;
 		goto destroy_teth_mask;
 	}
-	desc[1].opcode = teth_base_pyld->opcode;
-	desc[1].pyld = teth_base_pyld->data;
-	desc[1].len = teth_base_pyld->len;
-	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = teth_base_pyld->opcode;
+	desc[num_cmd].pyld = teth_base_pyld->data;
+	desc[num_cmd].len = teth_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
@@ -608,12 +730,13 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 		ret = -ENOMEM;
 		goto destroy_teth_base;
 	}
-	desc[2].opcode = cmd_pyld->opcode;
-	desc[2].pyld = cmd_pyld->data;
-	desc[2].len = cmd_pyld->len;
-	desc[2].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
-	ret = ipa3_send_cmd(3, desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -627,6 +750,9 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 	ipahal_destroy_imm_cmd(teth_base_pyld);
 destroy_teth_mask:
 	ipahal_destroy_imm_cmd(teth_mask_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
 unmap:
 	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
 destroy_init_pyld:
@@ -641,9 +767,9 @@ int ipa_get_teth_stats(void)
 	struct ipahal_stats_get_offset_tethering get_offset = { { 0 } };
 	struct ipahal_stats_offset offset = {0};
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
 	struct ipa_mem_buffer mem;
-	struct ipa3_desc desc = { 0 };
+	struct ipa3_desc desc[2];
 	struct ipahal_stats_tethering_all *stats_all;
 	struct ipa_hw_stats_teth *sw_stats = &ipa3_ctx->hw_stats.teth;
 	struct ipahal_stats_tethering *stats;
@@ -651,10 +777,14 @@ int ipa_get_teth_stats(void)
 	struct ipahal_stats_init_tethering *init =
 		(struct ipahal_stats_init_tethering *)
 			&ipa3_ctx->hw_stats.teth.init;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
 
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
 	get_offset.init = ipa3_ctx->hw_stats.teth.init;
 	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset,
 		&offset);
@@ -678,6 +808,19 @@ int ipa_get_teth_stats(void)
 		return ret;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	cmd.is_read = true;
 	cmd.clear_after_read = true;
 	cmd.skip_pipeline_clear = false;
@@ -686,19 +829,17 @@ int ipa_get_teth_stats(void)
 	cmd.system_addr = mem.phys_base;
 	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		IPA_MEM_PART(stats_tethering_ofst) + offset.offset;
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		ret = -ENOMEM;
-		goto free_dma_mem;
+		goto destroy_imm;
 	}
-	desc.opcode = cmd_pyld->opcode;
-	desc.pyld = cmd_pyld->data;
-	desc.len = cmd_pyld->len;
-	desc.type = IPA_IMM_CMD_DESC;
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
 
-	ret = ipa3_send_cmd(1, &desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -788,7 +929,8 @@ int ipa_get_teth_stats(void)
 	kfree(stats_all);
 	stats = NULL;
 destroy_imm:
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 free_dma_mem:
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 	return ret;
@@ -914,9 +1056,11 @@ int ipa_init_flt_rt_stats(void)
 	struct ipahal_imm_cmd_pyld *rt_v4_base_pyld;
 	struct ipahal_imm_cmd_register_write rt_v6_base = {0};
 	struct ipahal_imm_cmd_pyld *rt_v6_base_pyld;
-	struct ipa3_desc desc[5] = { {0} };
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[6] = { {0} };
 	dma_addr_t dma_address;
 	int ret;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
@@ -948,6 +1092,19 @@ int ipa_init_flt_rt_stats(void)
 		goto destroy_init_pyld;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
 	stats_base_flt_v4 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
 	stats_base_flt_v6 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
 	stats_base_rt_v4 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
@@ -966,12 +1123,13 @@ int ipa_init_flt_rt_stats(void)
 	if (!flt_v4_base_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto unmap;
+		goto destroy_coal_cmd;
 	}
-	desc[0].opcode = flt_v4_base_pyld->opcode;
-	desc[0].pyld = flt_v4_base_pyld->data;
-	desc[0].len = flt_v4_base_pyld->len;
-	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = flt_v4_base_pyld->opcode;
+	desc[num_cmd].pyld = flt_v4_base_pyld->data;
+	desc[num_cmd].len = flt_v4_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	/* set IPA_STAT_FILTER_IPV6_BASE */
 	flt_v6_base.skip_pipeline_clear = false;
@@ -987,11 +1145,11 @@ int ipa_init_flt_rt_stats(void)
 		ret = -ENOMEM;
 		goto destroy_flt_v4_base;
 	}
-	desc[1].opcode = flt_v6_base_pyld->opcode;
-	desc[1].pyld = flt_v6_base_pyld->data;
-	desc[1].len = flt_v6_base_pyld->len;
-	desc[1].type = IPA_IMM_CMD_DESC;
-
+	desc[num_cmd].opcode = flt_v6_base_pyld->opcode;
+	desc[num_cmd].pyld = flt_v6_base_pyld->data;
+	desc[num_cmd].len = flt_v6_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	/* set IPA_STAT_ROUTER_IPV4_BASE */
 	rt_v4_base.skip_pipeline_clear = false;
@@ -1007,10 +1165,11 @@ int ipa_init_flt_rt_stats(void)
 		ret = -ENOMEM;
 		goto destroy_flt_v6_base;
 	}
-	desc[2].opcode = rt_v4_base_pyld->opcode;
-	desc[2].pyld = rt_v4_base_pyld->data;
-	desc[2].len = rt_v4_base_pyld->len;
-	desc[2].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = rt_v4_base_pyld->opcode;
+	desc[num_cmd].pyld = rt_v4_base_pyld->data;
+	desc[num_cmd].len = rt_v4_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	/* set IPA_STAT_ROUTER_IPV6_BASE */
 	rt_v6_base.skip_pipeline_clear = false;
@@ -1026,10 +1185,11 @@ int ipa_init_flt_rt_stats(void)
 		ret = -ENOMEM;
 		goto destroy_rt_v4_base;
 	}
-	desc[3].opcode = rt_v6_base_pyld->opcode;
-	desc[3].pyld = rt_v6_base_pyld->data;
-	desc[3].len = rt_v6_base_pyld->len;
-	desc[3].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = rt_v6_base_pyld->opcode;
+	desc[num_cmd].pyld = rt_v6_base_pyld->data;
+	desc[num_cmd].len = rt_v6_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
@@ -1045,12 +1205,13 @@ int ipa_init_flt_rt_stats(void)
 		ret = -ENOMEM;
 		goto destroy_rt_v6_base;
 	}
-	desc[4].opcode = cmd_pyld->opcode;
-	desc[4].pyld = cmd_pyld->data;
-	desc[4].len = cmd_pyld->len;
-	desc[4].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
-	ret = ipa3_send_cmd(5, desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -1068,6 +1229,9 @@ int ipa_init_flt_rt_stats(void)
 	ipahal_destroy_imm_cmd(flt_v6_base_pyld);
 destroy_flt_v4_base:
 	ipahal_destroy_imm_cmd(flt_v4_base_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
 unmap:
 	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
 destroy_init_pyld:
@@ -1083,9 +1247,14 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 	struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
 	struct ipahal_stats_offset offset = { 0 };
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
 	struct ipa_mem_buffer mem;
-	struct ipa3_desc desc = { 0 };
+	struct ipa3_desc desc[2];
+	int num_cmd = 0;
+	int i;
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
 
 	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
 	if (!get_offset) {
@@ -1122,6 +1291,19 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 		goto free_offset;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	cmd.is_read = true;
 	cmd.clear_after_read = clear;
 	cmd.skip_pipeline_clear = false;
@@ -1130,19 +1312,17 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 	cmd.system_addr = mem.phys_base;
 	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		smem_ofst + offset.offset;
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		ret = -ENOMEM;
-		goto free_dma_mem;
+		goto destroy_imm;
 	}
-	desc.opcode = cmd_pyld->opcode;
-	desc.pyld = cmd_pyld->data;
-	desc.len = cmd_pyld->len;
-	desc.type = IPA_IMM_CMD_DESC;
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
 
-	ret = ipa3_send_cmd(1, &desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -1157,7 +1337,8 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 	ret = 0;
 
 destroy_imm:
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 free_dma_mem:
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 free_offset:
@@ -1330,9 +1511,11 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 	struct ipahal_imm_cmd_pyld *drop_base_pyld;
 	struct ipahal_imm_cmd_register_write drop_mask = {0};
 	struct ipahal_imm_cmd_pyld *drop_mask_pyld;
-	struct ipa3_desc desc[3] = { {0} };
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
 	dma_addr_t dma_address;
 	int ret;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
@@ -1366,6 +1549,19 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 		goto destroy_init_pyld;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
 	/* setting the registers and init the stats pyld are done atomically */
 	drop_mask.skip_pipeline_clear = false;
 	drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -1378,12 +1574,13 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 	if (!drop_mask_pyld) {
 		IPAERR("failed to construct register_write imm cmd\n");
 		ret = -ENOMEM;
-		goto unmap;
+		goto destroy_coal_cmd;
 	}
-	desc[0].opcode = drop_mask_pyld->opcode;
-	desc[0].pyld = drop_mask_pyld->data;
-	desc[0].len = drop_mask_pyld->len;
-	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = drop_mask_pyld->opcode;
+	desc[num_cmd].pyld = drop_mask_pyld->data;
+	desc[num_cmd].len = drop_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	drop_base.skip_pipeline_clear = false;
 	drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
@@ -1399,10 +1596,11 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 		ret = -ENOMEM;
 		goto destroy_drop_mask;
 	}
-	desc[1].opcode = drop_base_pyld->opcode;
-	desc[1].pyld = drop_base_pyld->data;
-	desc[1].len = drop_base_pyld->len;
-	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = drop_base_pyld->opcode;
+	desc[num_cmd].pyld = drop_base_pyld->data;
+	desc[num_cmd].len = drop_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
 	cmd.is_read = false;
 	cmd.skip_pipeline_clear = false;
@@ -1418,12 +1616,13 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 		ret = -ENOMEM;
 		goto destroy_drop_base;
 	}
-	desc[2].opcode = cmd_pyld->opcode;
-	desc[2].pyld = cmd_pyld->data;
-	desc[2].len = cmd_pyld->len;
-	desc[2].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
 
-	ret = ipa3_send_cmd(3, desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -1437,6 +1636,9 @@ int ipa_init_drop_stats(u32 pipe_bitmask)
 	ipahal_destroy_imm_cmd(drop_base_pyld);
 destroy_drop_mask:
 	ipahal_destroy_imm_cmd(drop_mask_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
 unmap:
 	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
 destroy_init_pyld:
@@ -1451,14 +1653,18 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 	struct ipahal_stats_get_offset_drop get_offset = { { 0 } };
 	struct ipahal_stats_offset offset = { 0 };
 	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
 	struct ipa_mem_buffer mem;
-	struct ipa3_desc desc = { 0 };
+	struct ipa3_desc desc[2];
 	struct ipahal_stats_drop_all *stats;
+	int num_cmd = 0;
 
 	if (!ipa3_ctx->hw_stats.enabled)
 		return 0;
 
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
 	get_offset.init = ipa3_ctx->hw_stats.drop.init;
 	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset,
 		&offset);
@@ -1482,6 +1688,19 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 		return ret;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	cmd.is_read = true;
 	cmd.clear_after_read = true;
 	cmd.skip_pipeline_clear = false;
@@ -1490,19 +1709,17 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 	cmd.system_addr = mem.phys_base;
 	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
 		IPA_MEM_PART(stats_drop_ofst) + offset.offset;
-	cmd_pyld = ipahal_construct_imm_cmd(
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		ret = -ENOMEM;
-		goto free_dma_mem;
+		goto destroy_imm;
 	}
-	desc.opcode = cmd_pyld->opcode;
-	desc.pyld = cmd_pyld->data;
-	desc.len = cmd_pyld->len;
-	desc.type = IPA_IMM_CMD_DESC;
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
 
-	ret = ipa3_send_cmd(1, &desc);
+	ret = ipa3_send_cmd(num_cmd, desc);
 	if (ret) {
 		IPAERR("failed to send immediate command (error %d)\n", ret);
 		goto destroy_imm;
@@ -1554,7 +1771,8 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 free_stats:
 	kfree(stats);
 destroy_imm:
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 free_dma_mem:
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 	return ret;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 58c924f..5f319f2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/notifier.h>
 #include <linux/interrupt.h>
+#include <linux/netdevice.h>
 #include <linux/ipa.h>
 #include <linux/ipa_usb.h>
 #include <asm/dma-iommu.h>
@@ -692,6 +693,7 @@ struct ipa3_hdr_proc_ctx_offset_entry {
  * @link: entry's link in global header table entries list
  * @type: header processing context type
  * @l2tp_params: L2TP parameters
+ * @generic_params: generic proc_ctx params
  * @offset_entry: entry's offset
  * @hdr: the header
  * @cookie: cookie used for validity check
@@ -705,6 +707,7 @@ struct ipa3_hdr_proc_ctx_entry {
 	u32 cookie;
 	enum ipa_hdr_proc_type type;
 	struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+	struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
 	struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
 	struct ipa3_hdr_entry *hdr;
 	u32 ref_cnt;
@@ -996,6 +999,7 @@ struct ipa3_sys_context {
 	struct work_struct repl_work;
 	void (*repl_hdlr)(struct ipa3_sys_context *sys);
 	struct ipa3_repl_ctx *repl;
+	struct ipa3_repl_ctx *page_recycle_repl;
 	u32 pkt_sent;
 	struct napi_struct *napi_obj;
 	struct list_head pending_pkts[GSI_VEID_MAX];
@@ -1258,6 +1262,10 @@ enum ipa3_config_this_ep {
 	IPA_DO_NOT_CONFIGURE_THIS_EP,
 };
 
+struct ipa3_page_recycle_stats {
+	u64 total_replenished;
+	u64 tmp_alloc;
+};
 struct ipa3_stats {
 	u32 tx_sw_pkts;
 	u32 tx_hw_pkts;
@@ -1278,6 +1286,7 @@ struct ipa3_stats {
 	u32 flow_enable;
 	u32 flow_disable;
 	u32 tx_non_linear;
+	struct ipa3_page_recycle_stats page_recycle_stats[2];
 };
 
 /* offset for each stats */
@@ -1289,15 +1298,6 @@ struct ipa3_stats {
 #define IPA3_UC_DEBUG_STATS_OFF (20)
 
 /**
- * struct ipa3_uc_dbg_gsi_stats - uC dbg stats info for each
- * offloading protocol
- * @ring: ring stats for each channel
- */
-struct ipa3_uc_dbg_ring_stats {
-	struct IpaHwRingStats_t ring[MAX_CH_STATS_SUPPORTED];
-};
-
-/**
  * struct ipa3_uc_dbg_stats - uC dbg stats for offloading
  * protocols
  * @uc_dbg_stats_ofst: offset to SRAM base
@@ -1412,6 +1412,14 @@ struct ipa3_uc_ctx {
 	u32 rdy_comp_ring_size;
 	u32 *rdy_ring_rp_va;
 	u32 *rdy_comp_ring_wp_va;
+	bool uc_event_ring_valid;
+	struct ipa_mem_buffer event_ring;
+	u32 ering_wp_local;
+	u32 ering_rp_local;
+	u32 ering_wp;
+	u32 ering_rp;
+	struct ipa_wdi_bw_info info;
+	uint64_t bw_info_max;
 };
 
 /**
@@ -1482,6 +1490,14 @@ struct ipa3_mhip_ctx {
 };
 
 /**
+ * struct ipa3_aqc_ctx - IPA aqc context
+ */
+struct ipa3_aqc_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+
+/**
  * struct ipa3_transport_pm - transport power management related members
  * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
  */
@@ -1501,6 +1517,17 @@ struct ipa3cm_client_info {
 	bool uplink;
 };
 
+/**
+ * struct ipacm_fnr_info - the fnr-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_fnr_info {
+	bool valid;
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
 struct ipa3_smp2p_info {
 	u32 out_base_id;
 	u32 in_base_id;
@@ -1728,6 +1755,9 @@ struct ipa3_pc_mbox_data {
  * @wdi3_ctx: IPA wdi3 context
  * @gsi_info: channel/protocol info for GSI offloading uC stats
  * IPA context - holds all relevant info about IPA driver and its state
+ * @lan_rx_napi_enable: flag if NAPI is enabled on the LAN dp
+ * @lan_ndev: dummy netdev for LAN rx NAPI
+ * @napi_lan_rx: NAPI object for LAN rx
  */
 struct ipa3_context {
 	struct ipa3_char_device_context cdev;
@@ -1885,6 +1915,7 @@ struct ipa3_context {
 	struct ipa3_wdi3_ctx wdi3_ctx;
 	struct ipa3_usb_ctx usb_ctx;
 	struct ipa3_mhip_ctx mhip_ctx;
+	struct ipa3_aqc_ctx aqc_ctx;
 	atomic_t ipa_clk_vote;
 	int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
 	bool fw_loaded;
@@ -1893,6 +1924,11 @@ struct ipa3_context {
 	struct IpaHwOffloadStatsAllocCmdData_t
 		gsi_info[IPA_HW_PROTOCOL_MAX];
 	bool ipa_wan_skb_page;
+	struct ipacm_fnr_info fnr_info;
+	/* dummy netdev for lan RX NAPI */
+	bool lan_rx_napi_enable;
+	struct net_device lan_ndev;
+	struct napi_struct napi_lan_rx;
 };
 
 struct ipa3_plat_drv_res {
@@ -1925,6 +1961,7 @@ struct ipa3_plat_drv_res {
 	bool apply_rg10_wa;
 	bool gsi_ch20_wa;
 	bool tethered_flow_control;
+	bool lan_rx_napi_enable;
 	u32 mhi_evid_limits[2]; /* start and end values */
 	bool ipa_mhi_dynamic_config;
 	u32 ipa_tz_unlock_reg_num;
@@ -2476,10 +2513,14 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl);
 int ipa3_resume_wdi_pipe(u32 clnt_hdl);
 int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl);
 int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
-int ipa3_get_wdi_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
-int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
-int ipa3_get_usb_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
+void ipa3_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa3_get_prot_id(enum ipa_client_type client);
 u16 ipa3_get_smem_restr_bytes(void);
 int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
 int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
@@ -2572,6 +2613,9 @@ bool ipa3_get_client_uplink(int pipe_idx);
 int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
 
 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
+
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw);
+
 /*
  * IPADMA
  */
@@ -2657,6 +2701,8 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
 int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
 	struct ipa_smmu_out_params *out);
 
+bool ipa3_get_lan_rx_napi(void);
+
 /* internal functions */
 
 int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
@@ -2767,6 +2813,7 @@ int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter);
 void ipa3_counter_remove_hdl(int hdl);
 void ipa3_counter_id_remove_all(void);
 int ipa3_id_alloc(void *ptr);
+bool ipa3_check_idr_if_freed(void *ptr);
 void *ipa3_id_find(u32 id);
 void ipa3_id_remove(u32 id);
 int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
@@ -2839,6 +2886,11 @@ int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n);
 int ipa3_uc_debug_stats_alloc(
 	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
 int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
+int ipa3_uc_quota_monitor(uint64_t quota);
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa3_uc_setup_event_ring(void);
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
+int ipa3_uc_debug_stats_dealloc(uint32_t prot_id);
 void ipa3_tag_destroy_imm(void *user1, int user2);
 const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	(enum ipa_client_type client);
@@ -2897,6 +2949,8 @@ int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query);
 
 int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats);
 
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info);
+
 u32 ipa3_get_num_pipes(void);
 struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type);
 struct iommu_domain *ipa3_get_smmu_domain(void);
@@ -2916,6 +2970,7 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
 int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
 void ipa3_set_resorce_groups_min_max_limits(void);
 int ipa3_suspend_apps_pipes(bool suspend);
+void ipa3_force_close_coal(void);
 int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
 	enum ipa_ip_type ip_type,
 	bool hashable,
@@ -2943,6 +2998,7 @@ int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
 const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
 int ipa_gsi_ch20_wa(void);
 int ipa3_rx_poll(u32 clnt_hdl, int budget);
+int ipa3_lan_rx_poll(u32 clnt_hdl, int weight);
 int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
 	enum ipa_smmu_cb_type cb_type);
 int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
@@ -2997,8 +3053,8 @@ int ipa3_is_mhip_offload_enabled(void);
 int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
 	enum ipa_client_type dst_pipe);
 int ipa_mpm_panic_handler(char *buf, int size);
-int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats);
 int ipa3_mpm_enable_adpl_over_odl(bool enable);
+int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 #else
 static inline int ipa_mpm_mhip_xdci_pipe_enable(
 	enum ipa_usb_teth_prot prot)
@@ -3029,7 +3085,7 @@ static inline int ipa_mpm_panic_handler(char *buf, int size)
 	return 0;
 }
 
-static inline int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+static inline int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 {
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index b768750..18cc101 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -641,7 +641,8 @@ struct ipa_mhi_clk_vote_resp_msg_v01
 	 * executed from mhi context.
 	 */
 	if (vote) {
-		ret = mhi_device_get_sync(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
+		ret = mhi_device_get_sync(imp_ctx->md.mhi_dev,
+			MHI_VOTE_BUS | MHI_VOTE_DEVICE);
 		if (ret) {
 			IMP_ERR("mhi_sync_get failed %d\n", ret);
 			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
@@ -651,7 +652,8 @@ struct ipa_mhi_clk_vote_resp_msg_v01
 			return resp;
 		}
 	} else {
-		mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
+		mhi_device_put(imp_ctx->md.mhi_dev,
+			MHI_VOTE_BUS | MHI_VOTE_DEVICE);
 	}
 
 	mutex_lock(&imp_ctx->mutex);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index e5896ec..79ca342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -71,6 +71,7 @@
 #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
 #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
 #define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
+
 enum mhip_re_type {
 	MHIP_RE_XFER = 0x2,
 	MHIP_RE_NOP = 0x4,
@@ -376,12 +377,6 @@ struct ipa_mpm_mhi_driver {
 	/* General MPM mutex to protect concurrent update of MPM GSI states */
 	struct mutex mutex;
 	/*
-	 * Mutex to protect IPA clock vote/unvote to make sure IPA isn't double
-	 * devoted for concurrency scenarios such as SSR and LPM mode CB
-	 * concurrency.
-	 */
-	struct mutex lpm_mutex;
-	/*
 	 * Mutex to protect mhi_dev update/ access, for concurrency such as
 	 * 5G SSR and USB disconnect/connect.
 	 */
@@ -426,7 +421,7 @@ static void ipa_mpm_change_gsi_state(int probe_id,
 	enum ipa_mpm_gsi_state next_state);
 static int ipa_mpm_probe(struct platform_device *pdev);
 static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
-	int probe_id);
+	int probe_id, bool is_force, bool *is_acted);
 static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
 	int probe_id);
 static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
@@ -1357,7 +1352,6 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 
 	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
 
-	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 	if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
 		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
 		/* while in modem shutdown scenarios such as SSR, no explicit
@@ -1365,20 +1359,26 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 		 */
 		ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 	ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
-	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
 	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 	IPA_MPM_FUNC_EXIT();
 }
 
-/*
- * Turning on/OFF PCIE Clock is done once for all clients.
- * Always vote for Probe_ID 0 as a standard.
+/**
+ * @ipa_mpm_vote_unvote_pcie_clk - Vote/Unvote PCIe Clock per probe_id
+ *                                 Returns if success or failure.
+ * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
+ * @probe_id - MHI probe_id per client.
+ * @is_force - Forcebly casts vote - should be true only in probe.
+ * @is_acted - Output param - This indicates the clk is actually voted or not
+ *             The flag output is checked only when we vote for clocks.
+ * Return value: PCIe clock voting is success or failure.
  */
 static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
-	int probe_id)
+	int probe_id,
+	bool is_force,
+	bool *is_acted)
 {
 	int result = 0;
 
@@ -1392,14 +1392,34 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		return -EINVAL;
 	}
 
+	if (!is_acted) {
+		IPA_MPM_ERR("Invalid clk_vote ptr\n");
+		return -EFAULT;
+	}
+
 	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
 		IPA_MPM_ERR("MHI not initialized yet\n");
+		*is_acted = false;
 		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		return 0;
 	}
 
-	IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+	if (!ipa_mpm_ctx->md[probe_id].init_complete &&
+		!is_force) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		*is_acted = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return 0;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
+	IPA_MPM_DBG("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
 		vote, probe_id,
 		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
 
@@ -1409,7 +1429,7 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		if (result) {
 			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
 				result, probe_id);
-			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			*is_acted = false;
 			return result;
 		}
 
@@ -1423,7 +1443,7 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
 				probe_id);
 			WARN_ON(1);
-			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			*is_acted = true;
 			return 0;
 		}
 		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
@@ -1431,8 +1451,7 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
 		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
 	}
-
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	*is_acted = true;
 	return result;
 }
 
@@ -1445,7 +1464,7 @@ static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
 	if (vote > CLK_OFF)
 		return;
 
-	IPA_MPM_ERR("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+	IPA_MPM_DBG("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
 		vote, probe_id,
 		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));
 
@@ -1470,25 +1489,42 @@ static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
 	}
 }
 
+/**
+ * @ipa_mpm_start_stop_remote_mhip_chan - Start/Stop Remote device side MHIP
+ *                                        channels.
+ * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
+ * @probe_id - MHI probe_id per client.
+ * @ipa_mpm_start_stop_type - Start/Stop remote channels.
+ * @is_force - Forcebly casts remote channels to be started/stopped.
+ *             should be true only in probe.
+ * Return value: 0 if success or error value.
+ */
 static int ipa_mpm_start_stop_remote_mhip_chan(
 	int probe_id,
-	enum ipa_mpm_start_stop_type start_stop)
+	enum ipa_mpm_start_stop_type start_stop,
+	bool is_force)
 {
 	int ret = 0;
 	struct mhi_device *mhi_dev = ipa_mpm_ctx->md[probe_id].mhi_dev;
 
-	if (!mhi_dev) {
-		IPA_MPM_ERR("MHI not initialized yet\n");
+	/* Sanity check to make sure Remote channels can be started.
+	 * If probe in progress, mhi_prepare_for_transfer will start
+	 * the remote channels so no need to start it from here.
+	 */
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (!ipa_mpm_ctx->md[probe_id].init_complete && !is_force) {
+		IPA_MPM_ERR("MHI not initialized yet, probe in progress\n");
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		return ret;
 	}
 
-	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	/* For error state, expect modem SSR to recover from error */
 	if (ipa_mpm_ctx->md[probe_id].remote_state == MPM_MHIP_REMOTE_ERR) {
 		IPA_MPM_ERR("Remote channels in err state for %d\n", probe_id);
 		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		return -EFAULT;
 	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 
 	if (start_stop == MPM_MHIP_START) {
 		if (ipa_mpm_ctx->md[probe_id].remote_state ==
@@ -1497,12 +1533,14 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
 				probe_id);
 		} else {
 			ret = mhi_resume_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			if (ret)
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_ERR;
 			else
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_START;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		}
 	} else {
 		if (ipa_mpm_ctx->md[probe_id].remote_state ==
@@ -1511,15 +1549,16 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
 					probe_id);
 		} else {
 			ret = mhi_pause_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 			if (ret)
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_ERR;
 			else
 				ipa_mpm_ctx->md[probe_id].remote_state =
 							MPM_MHIP_REMOTE_STOP;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		}
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	return ret;
 }
 
@@ -1563,6 +1602,15 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 		IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
 		return MHIP_STATUS_EP_NOT_FOUND;
 	}
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("MHIP probe %d not initialized\n", probe_id);
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return MHIP_STATUS_EP_NOT_READY;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
 	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
@@ -1681,6 +1729,7 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 	static enum mhip_status_type status;
 	int ret = 0;
 	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
+	bool is_acted = true;
 
 	if (!state)
 		return -EPERM;
@@ -1704,10 +1753,10 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 
 	if (state->up) {
 		/* Start UL MHIP channel for offloading tethering connection */
-		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
+			false, &is_acted);
 		if (ret) {
-			IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
-				ret);
+			IPA_MPM_ERR("Err %d cloking on PCIe clk %d\n", ret);
 			return ret;
 		}
 
@@ -1718,7 +1767,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		 * Host IPA gets voted.
 		 */
 		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-							MPM_MHIP_START);
+							MPM_MHIP_START,
+							false);
 		if (ret) {
 			/*
 			 * This can fail only when modem is in SSR state.
@@ -1726,6 +1776,13 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 			 * so return a failure.
 			 */
 			IPA_MPM_ERR("MHIP remote chan start fail = %d\n", ret);
+
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+					probe_id,
+					false,
+					&is_acted);
+
 			return ret;
 		}
 		IPA_MPM_DBG("MHIP remote channels are started\n");
@@ -1741,18 +1798,27 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		case MHIP_STATUS_NO_OP:
 			IPA_MPM_DBG("UL chan already start, status = %d\n",
 					status);
-			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-			return ret;
+			if (is_acted) {
+				return ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+						probe_id,
+						false,
+						&is_acted);
+			}
+			break;
 		case MHIP_STATUS_FAIL:
 		case MHIP_STATUS_BAD_STATE:
 		case MHIP_STATUS_EP_NOT_FOUND:
 			IPA_MPM_ERR("UL chan start err =%d\n", status);
-			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
 			ipa_assert();
 			return -EFAULT;
 		default:
 			IPA_MPM_ERR("Err not found\n");
-			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
 			ret = -EFAULT;
 			break;
 		}
@@ -1765,7 +1831,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		 * Host IPA gets devoted.
 		 */
 		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-						MPM_MHIP_STOP);
+						MPM_MHIP_STOP,
+						false);
 		if (ret) {
 			/*
 			 * This can fail only when modem is in SSR state.
@@ -1801,7 +1868,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 			return -EFAULT;
 		}
 		/* Stop UL MHIP channel for offloading tethering connection */
-		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
 
 		if (ret) {
 			IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
@@ -1887,7 +1955,7 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
 
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
-	IPA_MPM_ERR("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
+	IPA_MPM_DBG("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
 		chan, ep, ep->gsi_chan_hdl);
 
 	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
@@ -1912,6 +1980,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
 	u32 wp_addr;
 	int pipe_idx;
+	bool is_acted = true;
 
 	IPA_MPM_FUNC_ENTRY();
 
@@ -1950,11 +2019,14 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_STOP;
 	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
-	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
-	mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id, true, &is_acted);
+	if (ret) {
+		IPA_MPM_ERR("Err %d voitng PCIe clocks\n", ret);
+		return -EPERM;
+	}
+
 	ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
 	ipa_mpm_ctx->md[probe_id].in_lpm = false;
-	mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
 	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
 
 	/*
@@ -2089,8 +2161,10 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		 * to MHI driver. remove_cb will be called eventually when
 		 * Device side comes from where pending cleanup happens.
 		 */
+		mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		atomic_inc(&ipa_mpm_ctx->probe_cnt);
-		ipa_mpm_ctx->md[probe_id].init_complete = true;
+		ipa_mpm_ctx->md[probe_id].init_complete = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 		IPA_MPM_FUNC_EXIT();
 		return 0;
 	}
@@ -2236,7 +2310,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		 * Host IPA gets unvoted.
 		 */
 		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-						MPM_MHIP_STOP);
+						MPM_MHIP_STOP, true);
 		if (ret) {
 			/*
 			 * This can fail only when modem is in SSR.
@@ -2248,10 +2322,24 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		}
 		if (ul_prod != IPA_CLIENT_MAX) {
 			/* No teth started yet, disable UL channel */
-			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
-						probe_id, MPM_MHIP_STOP);
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPA_MPM_ERR("fail to alloc EP.\n");
+				goto fail_stop_channel;
+			}
+			ret = ipa3_stop_gsi_channel(ipa_ep_idx);
+			if (ret) {
+				IPA_MPM_ERR("MHIP Stop channel err = %d\n",
+					ret);
+				goto fail_stop_channel;
+			}
+			ipa_mpm_change_gsi_state(probe_id,
+				IPA_MPM_MHIP_CHAN_UL,
+				GSI_STOPPED);
 		}
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+							true, &is_acted);
 		break;
 	case IPA_MPM_TETH_INPROGRESS:
 	case IPA_MPM_TETH_CONNECTED:
@@ -2271,25 +2359,28 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	}
 
 	atomic_inc(&ipa_mpm_ctx->probe_cnt);
-	ipa_mpm_ctx->md[probe_id].init_complete = true;
-
 	/* Check if ODL pipe is connected to MHIP DPL pipe before probe */
 	if (probe_id == IPA_MPM_MHIP_CH_ID_2 &&
 		ipa3_is_odl_connected()) {
-		IPA_MPM_ERR("setting DPL DMA to ODL\n");
+		IPA_MPM_DBG("setting DPL DMA to ODL\n");
 		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
 			IPA_CLIENT_USB_DPL_CONS, false);
 	}
-
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ipa_mpm_ctx->md[probe_id].init_complete = true;
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	IPA_MPM_FUNC_EXIT();
 	return 0;
 
 fail_gsi_setup:
 fail_start_channel:
+fail_stop_channel:
 fail_smmu:
 	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
 		IPA_MPM_DBG("SMMU failed\n");
-	ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+	if (is_acted)
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id, true,
+					&is_acted);
 	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
 	ipa_assert();
 	return ret;
@@ -2349,6 +2440,11 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
 	}
 
 	IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
+
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
 	ipa_mpm_mhip_shutdown(mhip_idx);
 
 	atomic_dec(&ipa_mpm_ctx->probe_cnt);
@@ -2387,7 +2483,19 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		return;
 	}
 
-	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	if (!ipa_mpm_ctx->md[mhip_idx].init_complete) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+		return;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
 	switch (mhi_cb) {
 	case MHI_CB_IDLE:
 		break;
@@ -2424,7 +2532,6 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
 		break;
 	}
-	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
 }
 
 static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
@@ -2454,8 +2561,9 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	int i;
 	enum ipa_mpm_mhip_client_type mhip_client;
 	enum mhip_status_type status;
-	int ret = 0;
 	int pipe_idx;
+	bool is_acted = true;
+	int ret = 0;
 
 	if (ipa_mpm_ctx == NULL) {
 		IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
@@ -2471,20 +2579,28 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 		}
 	}
 
-	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+	if ((probe_id < IPA_MPM_MHIP_CH_ID_0) ||
+		(probe_id >= IPA_MPM_MHIP_CH_ID_MAX)) {
 		IPA_MPM_ERR("Unknown probe_id\n");
 		return 0;
 	}
 
+	if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
+		/* For rndis, the MPM processing happens in WAN State IOCTL */
+		IPA_MPM_DBG("MPM Xdci connect for rndis, no -op\n");
+		return 0;
+	}
+
 	IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
 			xdci_teth_prot, mhip_client, probe_id);
 
 	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
 
-	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
+		false, &is_acted);
 	if (ret) {
 		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
-		return ret;
+			return ret;
 	}
 
 	/*
@@ -2494,12 +2610,12 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	 * Host IPA gets voted.
 	 */
 	ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-						MPM_MHIP_START);
+						MPM_MHIP_START, false);
 	if (ret) {
 		/*
 		 * This can fail only when modem is in SSR state.
 		 * Eventually there would be a remove callback,
-		 * so return a failure.
+		 * so return a failure. Dont have to unvote PCIE here.
 		 */
 		IPA_MPM_ERR("MHIP remote chan start fail = %d\n",
 				ret);
@@ -2519,14 +2635,18 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 		atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 1);
 		return 0;
 	default:
-		IPA_MPM_DBG("mhip_client = %d not processed\n", mhip_client);
-		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-		if (ret) {
-			IPA_MPM_ERR("Error unvoting on PCIe clk, err = %d\n",
+		IPA_MPM_ERR("mhip_client = %d not processed\n", mhip_client);
+		if (is_acted) {
+			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+			if (ret) {
+				IPA_MPM_ERR("Err unvoting PCIe clk, err = %d\n",
 					ret);
-			return ret;
+				return ret;
+			}
 		}
-		return 0;
+		ipa_assert();
+		return -EINVAL;
 	}
 
 	if (mhip_client != IPA_MPM_MHIP_USB_DPL)
@@ -2543,26 +2663,33 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 
 		/* Lift the delay for rmnet USB prod pipe */
 		ipa3_xdci_ep_delay_rm(pipe_idx);
-		if (status == MHIP_STATUS_NO_OP) {
+		if (status == MHIP_STATUS_NO_OP && is_acted) {
 			/* Channels already have been started,
 			 * we can devote for pcie clocks
 			 */
-			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
 		}
 		break;
 	case MHIP_STATUS_EP_NOT_READY:
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
 		break;
 	case MHIP_STATUS_FAIL:
 	case MHIP_STATUS_BAD_STATE:
 	case MHIP_STATUS_EP_NOT_FOUND:
 		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
 		ret = -EFAULT;
 		break;
 	default:
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
 		IPA_MPM_ERR("Err not found\n");
 		break;
 	}
@@ -2576,6 +2703,7 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 	enum ipa_mpm_mhip_client_type mhip_client;
 	enum mhip_status_type status;
 	int ret = 0;
+	bool is_acted = true;
 
 	if (ipa_mpm_ctx == NULL) {
 		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
@@ -2591,12 +2719,19 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 		}
 	}
 
-	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
-		IPA_MPM_ERR("Invalid probe_id\n");
+	if ((probe_id < IPA_MPM_MHIP_CH_ID_0) ||
+		(probe_id >= IPA_MPM_MHIP_CH_ID_MAX)) {
+		IPA_MPM_ERR("Unknown probe_id\n");
 		return 0;
 	}
 
-	IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
+	if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
+		/* For rndis, the MPM processing happens in WAN State IOCTL */
+		IPA_MPM_DBG("MPM Xdci disconnect for rndis, no -op\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
 			xdci_teth_prot, mhip_client, probe_id);
 	/*
 	 * Make sure to stop Device side channels before
@@ -2605,7 +2740,7 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 	 * Host IPA gets unvoted.
 	 */
 	ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-						MPM_MHIP_STOP);
+						MPM_MHIP_STOP, false);
 	if (ret) {
 		/*
 		 * This can fail only when modem is in SSR state.
@@ -2639,7 +2774,8 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 			ipa_mpm_ctx->md[probe_id].mhip_client =
 				IPA_MPM_MHIP_NONE;
 		}
-		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+			false, &is_acted);
 		if (ret)
 			IPA_MPM_ERR("Error clking off PCIe clk err%d\n", ret);
 		atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 0);
@@ -2662,7 +2798,8 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 	case MHIP_STATUS_BAD_STATE:
 	case MHIP_STATUS_EP_NOT_FOUND:
 		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
-		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+			false, &is_acted);
 		return -EFAULT;
 		break;
 	default:
@@ -2670,7 +2807,8 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 		break;
 	}
 
-	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+		false, &is_acted);
 
 	if (ret) {
 		IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
@@ -2778,7 +2916,6 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
 		mutex_init(&ipa_mpm_ctx->md[i].mutex);
 		mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
-		mutex_init(&ipa_mpm_ctx->md[i].lpm_mutex);
 	}
 
 	ipa_mpm_ctx->dev_info.pdev = pdev;
@@ -2916,7 +3053,7 @@ int ipa_mpm_panic_handler(char *buf, int size)
  * @note Cannot be called from atomic context
  *
  */
-int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 {
 	int i;
 
@@ -2963,6 +3100,7 @@ int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
 int ipa3_mpm_enable_adpl_over_odl(bool enable)
 {
 	int ret;
+	bool is_acted = true;
 
 	IPA_MPM_FUNC_ENTRY();
 
@@ -2976,19 +3114,21 @@ int ipa3_mpm_enable_adpl_over_odl(bool enable)
 		IPA_MPM_DBG("mpm enabling ADPL over ODL\n");
 
 		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
-			IPA_MPM_MHIP_CH_ID_2);
+			IPA_MPM_MHIP_CH_ID_2, false, &is_acted);
 		if (ret) {
-			IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
-				ret);
-			return ret;
+			IPA_MPM_ERR("Err %d cloking on PCIe clk\n", ret);
+				return ret;
 		}
 
 		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
 			IPA_CLIENT_ODL_DPL_CONS, false);
 		if (ret) {
 			IPA_MPM_ERR("MPM failed to set dma mode to ODL\n");
-			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
-				IPA_MPM_MHIP_CH_ID_2);
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+					IPA_MPM_MHIP_CH_ID_2,
+					false,
+					&is_acted);
 			return ret;
 		}
 
@@ -2997,21 +3137,25 @@ int ipa3_mpm_enable_adpl_over_odl(bool enable)
 	} else {
 		/* dec clk count and set DMA to USB */
 		IPA_MPM_DBG("mpm disabling ADPL over ODL\n");
-
 		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
-			IPA_MPM_MHIP_CH_ID_2);
+						IPA_MPM_MHIP_CH_ID_2,
+						false,
+						&is_acted);
 		if (ret) {
-			IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
+			IPA_MPM_ERR("Err %d cloking off PCIe clk\n",
 				ret);
-				return ret;
+			return ret;
 		}
 
 		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
 			IPA_CLIENT_USB_DPL_CONS, false);
 		if (ret) {
 			IPA_MPM_ERR("MPM failed to set dma mode to USB\n");
-			ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
-				IPA_MPM_MHIP_CH_ID_2);
+			if (ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
+							IPA_MPM_MHIP_CH_ID_2,
+							false,
+							&is_acted))
+				IPA_MPM_ERR("Err clocking on pcie\n");
 			return ret;
 		}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 0c5f7e0..9631182 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -27,9 +27,9 @@
 
 #define IPA_NAT_IPV6CT_TEMP_MEM_SIZE 128
 
-#define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 3
-#define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 2
-#define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 4
+#define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 4
+#define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 3
+#define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 5
 
 /*
  * The base table max entries is limited by index into table 13 bits number.
@@ -728,15 +728,43 @@ static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
 	struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
 	struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
 	int i, num_cmd = 0, result;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 
 	IPADBG("\n");
 
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
 	cmd_pyld[num_cmd] =
 		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
 	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct NOP imm cmd\n");
-		return -ENOMEM;
+		result = -ENOMEM;
+		goto destroy_imm_cmd;
 	}
 
 	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
@@ -798,15 +826,43 @@ static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
 	struct ipahal_imm_cmd_pyld
 		*cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
 	int i, num_cmd = 0, result;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 
 	IPADBG("\n");
 
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
 	cmd_pyld[num_cmd] =
 		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
 	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct NOP imm cmd\n");
-		return -ENOMEM;
+		result = -ENOMEM;
+		goto destroy_imm_cmd;
 	}
 
 	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
@@ -1306,10 +1362,27 @@ int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
 	struct ipa3_desc desc[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
 	uint8_t cnt, num_cmd = 0;
 	int result = 0;
+	int i;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int max_dma_table_cmds = IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC;
 
 	IPADBG("\n");
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	/**
+	 * We use a descriptor for closing coalsceing endpoint
+	 * by immediate command. So, DMA entries should be less than
+	 * IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC - 1 to overcome
+	 * buffer overflow of ipa3_desc array.
+	 */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
+		max_dma_table_cmds -= 1;
+
 	if (!dma->entries ||
-		dma->entries >= IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC) {
+		dma->entries > (max_dma_table_cmds - 1)) {
 		IPAERR_RL("Invalid number of entries %d\n",
 			dma->entries);
 		result = -EPERM;
@@ -1330,6 +1403,28 @@ int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
 		}
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
 	cmd_pyld[num_cmd] =
 		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
index 82a1954..69d35a3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
@@ -287,10 +287,6 @@ int ipa_setup_odl_pipe(void)
 
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
-	if (ipa3_is_mhip_offload_enabled()) {
-		IPADBG("MHIP is enabled, disable aggregation for ODL pipe");
-		ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
-	}
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
 						IPA_ODL_AGGR_BYTE_LIMIT;
@@ -316,6 +312,19 @@ int ipa_setup_odl_pipe(void)
 	ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE *
 						IPA_FIFO_ELEMENT_SIZE;
 	ipa3_odl_ctx->odl_client_hdl = -1;
+
+	/* For MHIP, ODL functionality is DMA. So bypass aggregation, checksum
+	 * offload, hdr_len.
+	 */
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+		ipa3_is_mhip_offload_enabled()) {
+		IPADBG("MHIP enabled: bypass aggr + csum offload for ODL");
+		ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+			IPA_DISABLE_CS_OFFLOAD;
+		ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 0;
+	}
+
 	ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg,
 			&ipa3_odl_ctx->odl_client_hdl);
 	return ret;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 9ba8fb9..4b1ca88 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -385,6 +385,9 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle,
 					IPA_QMI_ERR_NOT_SUPPORTED_V01;
 			}
 			resp = &resp2;
+		} else {
+			IPAWANERR("clk_rate_valid is false\n");
+			return;
 		}
 	} else {
 		resp = imp_handle_vote_req(vote_req->mhi_vote);
@@ -1037,7 +1040,7 @@ int ipa3_qmi_rmv_offload_request_send(
 	ipa3_qmi_ctx->num_ipa_offload_connection);
 
 	/*  max as num_ipa_offload_connection */
-	if (req->filter_handle_list_len >=
+	if (req->filter_handle_list_len >
 		ipa3_qmi_ctx->num_ipa_offload_connection) {
 		IPAWANDBG(
 		"cur(%d), req_rmv(%d)\n",
@@ -1465,6 +1468,41 @@ static void ipa3_q6_clnt_install_firewall_rules_ind_cb(
 	}
 }
 
+static void ipa3_q6_clnt_bw_vhang_ind_cb(struct qmi_handle *handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *data)
+{
+	struct ipa_bw_change_ind_msg_v01 *qmi_ind;
+	uint32_t bw_mbps = 0;
+
+	if (handle != ipa_q6_clnt) {
+		IPAWANERR("Wrong client\n");
+		return;
+	}
+
+	qmi_ind = (struct ipa_bw_change_ind_msg_v01 *) data;
+
+	IPAWANDBG("Q6 BW change UL valid(%d):(%d)Kbps\n",
+		qmi_ind->peak_bw_ul_valid,
+		qmi_ind->peak_bw_ul);
+
+	IPAWANDBG("Q6 BW change DL valid(%d):(%d)Kbps\n",
+		qmi_ind->peak_bw_dl_valid,
+		qmi_ind->peak_bw_dl);
+
+	if (qmi_ind->peak_bw_ul_valid)
+		bw_mbps += qmi_ind->peak_bw_ul/1000;
+
+	if (qmi_ind->peak_bw_dl_valid)
+		bw_mbps += qmi_ind->peak_bw_dl/1000;
+
+	IPAWANDBG("vote modem BW (%u)\n", bw_mbps);
+	if (ipa3_vote_for_bus_bw(&bw_mbps)) {
+		IPAWANERR("Failed to vote BW (%u)\n", bw_mbps);
+	}
+}
+
 static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
 {
 	int rc;
@@ -1692,6 +1730,14 @@ static struct qmi_msg_handler client_handlers[] = {
 			QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01,
 		.fn = ipa3_q6_clnt_install_firewall_rules_ind_cb,
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_IPA_BW_CHANGE_INDICATION_V01,
+		.ei = ipa_bw_change_ind_msg_v01_ei,
+		.decoded_size =
+			IPA_BW_CHANGE_IND_MSG_V01_MAX_MSG_LEN,
+		.fn = ipa3_q6_clnt_bw_vhang_ind_cb,
+	},
 };
 
 
@@ -2058,6 +2104,51 @@ int ipa3_qmi_set_aggr_info(enum ipa_aggr_enum_type_v01 aggr_enum_type)
 		resp.resp.error, "ipa_mhi_prime_aggr_info_req_msg_v01");
 }
 
+int ipa3_qmi_req_ind(void)
+{
+	struct ipa_indication_reg_req_msg_v01 req;
+	struct ipa_indication_reg_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&req, 0, sizeof(struct ipa_indication_reg_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+
+	req.bw_change_ind_valid = true;
+	req.bw_change_ind = true;
+
+	req_desc.max_msg_len =
+		QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01;
+	req_desc.ei_array = ipa3_indication_reg_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01;
+	resp_desc.ei_array = ipa3_indication_reg_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_INDICATION_REGISTER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, &req,
+		&resp_desc, &resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_INDICATION_REGISTER_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_INDICATION_REGISTER_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INDICATION_REGISTER_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_indication_reg_req_msg_v01");
+}
+
 int ipa3_qmi_stop_data_qouta(void)
 {
 	struct ipa_stop_data_usage_quota_req_msg_v01 req;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index c1fb534..9973ec6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -213,6 +213,7 @@ extern struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[];
 extern struct qmi_elem_info ipa_add_offload_connection_resp_msg_v01_ei[];
 extern struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[];
 extern struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[];
+extern struct qmi_elem_info ipa_bw_change_ind_msg_v01_ei[];
 
 /**
  * struct ipa3_rmnet_context - IPA rmnet context
@@ -318,6 +319,8 @@ int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
 int ipa3_qmi_set_aggr_info(
 	enum ipa_aggr_enum_type_v01 aggr_enum_type);
 
+int ipa3_qmi_req_ind(void);
+
 int ipa3_qmi_stop_data_qouta(void);
 
 void ipa3_q6_handshake_complete(bool ssr_bootup);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index bf4fffe..2c85dc7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1350,6 +1350,16 @@ struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
 				ipa_mhi_ready_ind_valid),
 	},
 	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				ipa_mhi_ready_ind),
+	},
+	{
 		.data_type	= QMI_OPT_FLAG,
 		.elem_len	= 1,
 		.elem_size	= sizeof(u8),
@@ -1370,14 +1380,24 @@ struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
 				endpoint_desc_ind),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				bw_change_ind_valid),
+	},
+	{
 		.data_type	= QMI_UNSIGNED_1_BYTE,
 		.elem_len	= 1,
 		.elem_size	= sizeof(u8),
 		.array_type	= NO_ARRAY,
-		.tlv_type	= 0x12,
+		.tlv_type	= 0x14,
 		.offset		= offsetof(
 			struct ipa_indication_reg_req_msg_v01,
-				ipa_mhi_ready_ind),
+				bw_change_ind),
 	},
 	{
 		.data_type	= QMI_EOTI,
@@ -5118,3 +5138,47 @@ struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[] = {
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
+
+struct qmi_elem_info ipa_bw_change_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct ipa_bw_change_ind_msg_v01,
+					   peak_bw_ul_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct ipa_bw_change_ind_msg_v01,
+					   peak_bw_ul),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct ipa_bw_change_ind_msg_v01,
+					   peak_bw_dl_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct ipa_bw_change_ind_msg_v01,
+					   peak_bw_dl),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index b39b5a5..7c6f123 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -14,7 +14,7 @@
 #define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
 #define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
 
-#define IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC 5
+#define IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC 6
 
 #define IPA_RT_GET_RULE_TYPE(__entry) \
 	( \
@@ -97,6 +97,7 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
 
 		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
 		if ((proc_ctx == NULL) ||
+			ipa3_check_idr_if_freed(proc_ctx) ||
 			(proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
 			gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
 			gen_params.hdr_ofst = 0;
@@ -474,6 +475,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 	struct ipa3_rt_tbl_set *set;
 	struct ipa3_rt_tbl *tbl;
 	u32 tbl_hdr_width;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 
 	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
 	memset(desc, 0, sizeof(desc));
@@ -564,6 +566,27 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 		goto fail_size_valid;
 	}
 
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			goto fail_size_valid;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
 	/*
 	 * SRAM memory not allocated to hash tables. Sending
 	 * command to hash tables(filer/routing) operation not supported.
@@ -588,7 +611,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 		if (!cmd_pyld[num_cmd]) {
 			IPAERR(
 			"fail construct register_write imm cmd. IP %d\n", ip);
-			goto fail_size_valid;
+			goto fail_imm_cmd_construct;
 		}
 		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
 		num_cmd++;
@@ -747,7 +770,8 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
 
 	set = &ipa3_ctx->rt_tbl_set[ip];
 	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
-		if (!strcmp(name, entry->name))
+		if (!ipa3_check_idr_if_freed(entry) &&
+			!strcmp(name, entry->name))
 			return entry;
 	}
 
@@ -1747,7 +1771,8 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
 
 	if (entry->hdr)
 		__ipa3_release_hdr(entry->hdr->id);
-	else if (entry->proc_ctx)
+	else if (entry->proc_ctx &&
+		(!ipa3_check_idr_if_freed(entry->proc_ctx)))
 		__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
 	list_del(&entry->link);
 	entry->tbl->rule_cnt--;
@@ -1948,7 +1973,9 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
 				tbl->rule_cnt--;
 				if (rule->hdr)
 					__ipa3_release_hdr(rule->hdr->id);
-				else if (rule->proc_ctx)
+				else if (rule->proc_ctx &&
+					(!ipa3_check_idr_if_freed(
+						rule->proc_ctx)))
 					__ipa3_release_hdr_proc_ctx(
 						rule->proc_ctx->id);
 				rule->cookie = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 5df8dcb..d110961 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -14,7 +14,7 @@
 
 #define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
 #define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
-
+#define IPA_UC_EVENT_RING_SIZE 10
 /**
  * Mailbox register to Interrupt HWP for CPU cmd
  * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
@@ -24,6 +24,11 @@
 #define IPA_CPU_2_HW_CMD_MBOX_m          0
 #define IPA_CPU_2_HW_CMD_MBOX_n         23
 
+#define IPA_UC_ERING_m 0
+#define IPA_UC_ERING_n_r 1
+#define IPA_UC_ERING_n_w 0
+#define IPA_UC_MON_INTERVAL 5
+
 /**
  * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
  * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
@@ -39,6 +44,7 @@
  * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
  * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
  * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING:  Command to setup the event ring
  */
 enum ipa3_cpu_2_hw_commands {
 	IPA_CPU_2_HW_CMD_NO_OP                     =
@@ -65,6 +71,8 @@ enum ipa3_cpu_2_hw_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
 	IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO           =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
+	IPA_CPU_2_HW_CMD_SETUP_EVENT_RING          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 12),
 };
 
 /**
@@ -161,6 +169,23 @@ union IpaHwChkChEmptyCmdData_t {
 	u32 raw32b;
 } __packed;
 
+struct IpaSetupEventRingCmdParams_t {
+	u32 ring_base_pa;
+	u32 ring_base_pa_hi;
+	u32 ring_size; //size = 10
+} __packed;
+
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING command. Parameters are
+ * sent as 32b immediate parameters.
+ */
+union IpaSetupEventRingCmdData_t {
+	struct IpaSetupEventRingCmdParams_t event;
+	u32 raw32b[6]; //uc-internal
+} __packed;
+
 
 /**
  * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO
@@ -219,11 +244,11 @@ const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
 
 static void ipa3_uc_save_dbg_stats(u32 size)
 {
-	u8 protocol_id;
+	u8 prot_id;
 	u32 addr_offset;
 	void __iomem *mmio;
 
-	protocol_id = IPA_UC_DBG_STATS_GET_PROT_ID(
+	prot_id = IPA_UC_DBG_STATS_GET_PROT_ID(
 		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
 	addr_offset = IPA_UC_DBG_STATS_GET_OFFSET(
 		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
@@ -234,36 +259,72 @@ static void ipa3_uc_save_dbg_stats(u32 size)
 		IPAERR("unexpected NULL mmio\n");
 		return;
 	}
-	switch (protocol_id) {
+	switch (prot_id) {
 	case IPA_HW_PROTOCOL_AQC:
+		if (!ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
 		break;
 	case IPA_HW_PROTOCOL_11ad:
 		break;
 	case IPA_HW_PROTOCOL_WDI:
-		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size = size;
-		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
-		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
 		break;
 	case IPA_HW_PROTOCOL_WDI3:
-		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_size = size;
-		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
-		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
 		break;
 	case IPA_HW_PROTOCOL_ETH:
 		break;
 	case IPA_HW_PROTOCOL_MHIP:
-		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_size = size;
-		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
-		ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
 		break;
 	case IPA_HW_PROTOCOL_USB:
-		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_size = size;
-		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
-		ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
+		if (!ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
 		break;
 	default:
-		IPAERR("unknown protocols %d\n", protocol_id);
+		IPAERR("unknown protocols %d\n", prot_id);
 	}
+	return;
+unmap:
+	iounmap(mmio);
 }
 
 static void ipa3_log_evt_hdlr(void)
@@ -314,6 +375,68 @@ static void ipa3_log_evt_hdlr(void)
 	ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
 }
 
+static void ipa3_event_ring_hdlr(void)
+{
+	u32 ering_rp, offset;
+	void *rp_va;
+	struct ipa_inform_wlan_bw bw_info;
+	struct eventElement_t *e_b = NULL, *e_q = NULL;
+	int mul = 0;
+
+	ering_rp = ipahal_read_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r);
+	offset = sizeof(struct eventElement_t);
+	ipa3_ctx->uc_ctx.ering_rp = ering_rp;
+
+	while (ipa3_ctx->uc_ctx.ering_rp_local != ering_rp) {
+		rp_va = ipa3_ctx->uc_ctx.event_ring.base +
+			ipa3_ctx->uc_ctx.ering_rp_local;
+
+		if (((struct eventElement_t *) rp_va)->Opcode == BW_NOTIFY) {
+			e_b = ((struct eventElement_t *) rp_va);
+			IPADBG("prot(%d), index (%d) throughput (%lu)\n",
+			e_b->Protocol,
+			e_b->Value.bw_param.ThresholdIndex,
+			e_b->Value.bw_param.throughput);
+			/* check values */
+			mul = 1000 * IPA_UC_MON_INTERVAL;
+			if (e_b->Value.bw_param.throughput <
+				ipa3_ctx->uc_ctx.bw_info_max*mul) {
+				memset(&bw_info, 0,
+					sizeof(struct ipa_inform_wlan_bw));
+				bw_info.index =
+					e_b->Value.bw_param.ThresholdIndex;
+				mul = 1000 / IPA_UC_MON_INTERVAL;
+				bw_info.throughput =
+					e_b->Value.bw_param.throughput*mul;
+				if (ipa3_inform_wlan_bw(&bw_info))
+					IPAERR_RL("failed index %d to wlan\n",
+					bw_info.index);
+			}
+		} else if (((struct eventElement_t *) rp_va)->Opcode
+			== QUOTA_NOTIFY) {
+			e_q = ((struct eventElement_t *) rp_va);
+			IPADBG("got quota-notify %d reach(%d) usage (%lu)\n",
+			e_q->Protocol,
+			e_q->Value.quota_param.ThreasholdReached,
+			e_q->Value.quota_param.usage);
+			if (ipa3_broadcast_wdi_quota_reach_ind(0,
+				e_q->Value.quota_param.usage))
+				IPAERR_RL("failed on quota_reach for %d\n",
+				e_q->Protocol);
+		}
+		ipa3_ctx->uc_ctx.ering_rp_local += offset;
+		ipa3_ctx->uc_ctx.ering_rp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		/* update wp */
+		ipa3_ctx->uc_ctx.ering_wp_local += offset;
+		ipa3_ctx->uc_ctx.ering_wp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, IPA_UC_ERING_m,
+			IPA_UC_ERING_n_w, ipa3_ctx->uc_ctx.ering_wp_local);
+	}
+}
+
 /**
  * ipa3_uc_state_check() - Check the status of the uC interface
  *
@@ -441,6 +564,11 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
 		IPADBG("uC evt log info ofst=0x%x\n",
 			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
 		ipa3_log_evt_hdlr();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVNT_RING_NOTIFY) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_event_ring_hdlr();
 	} else {
 		IPADBG("unsupported uC evt opcode=%u\n",
 				ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
@@ -1044,7 +1172,7 @@ int ipa3_uc_debug_stats_alloc(
 	return result;
 }
 
-int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
+int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
 {
 	int result;
 	struct ipa_mem_buffer cmd;
@@ -1060,7 +1188,7 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
 	}
 	cmd_data = (struct IpaHwOffloadStatsDeAllocCmdData_t *)
 		cmd.base;
-	cmd_data->protocol = protocol;
+	cmd_data->protocol = prot_id;
 	command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC;
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
@@ -1073,8 +1201,10 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
 		IPAERR("fail to dealloc offload stats\n");
 		goto cleanup;
 	}
-	switch (protocol) {
+	switch (prot_id) {
 	case IPA_HW_PROTOCOL_AQC:
+		iounmap(ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
 		break;
 	case IPA_HW_PROTOCOL_11ad:
 		break;
@@ -1089,7 +1219,7 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
 	case IPA_HW_PROTOCOL_ETH:
 		break;
 	default:
-		IPAERR("unknown protocols %d\n", protocol);
+		IPAERR("unknown protocols %d\n", prot_id);
 	}
 	result = 0;
 cleanup:
@@ -1099,3 +1229,287 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
 	IPADBG("exit\n");
 	return result;
 }
+
+int ipa3_uc_setup_event_ring(void)
+{
+	int res = 0;
+	struct ipa_mem_buffer cmd, *ring;
+	union IpaSetupEventRingCmdData_t *ring_info;
+
+	ring = &ipa3_ctx->uc_ctx.event_ring;
+	/* Allocate event ring */
+	ring->size = sizeof(struct eventElement_t) * IPA_UC_EVENT_RING_SIZE;
+	ring->base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring->size,
+		&ring->phys_base, GFP_KERNEL);
+	if (ring->base == NULL)
+		return -ENOMEM;
+
+	cmd.size = sizeof(*ring_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		dma_free_coherent(ipa3_ctx->uc_pdev, ring->size,
+			ring->base, ring->phys_base);
+		return -ENOMEM;
+	}
+
+	ring_info = (union IpaSetupEventRingCmdData_t *) cmd.base;
+	ring_info->event.ring_base_pa = (u32) (ring->phys_base & 0xFFFFFFFF);
+	ring_info->event.ring_base_pa_hi =
+		(u32) ((ring->phys_base & 0xFFFFFFFF00000000) >> 32);
+	ring_info->event.ring_size = IPA_UC_EVENT_RING_SIZE;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_SETUP_EVENT_RING, 0,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to setup event ring 0x%x 0x%x, size %d\n",
+			ring_info->event.ring_base_pa,
+			ring_info->event.ring_base_pa_hi,
+			ring_info->event.ring_size);
+		goto free_cmd;
+	}
+
+	ipa3_ctx->uc_ctx.uc_event_ring_valid = true;
+	/* write wp/rp values */
+	ipa3_ctx->uc_ctx.ering_rp_local = 0;
+	ipa3_ctx->uc_ctx.ering_wp_local =
+		ring->size - sizeof(struct eventElement_t);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r, 0);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_w,
+			ipa3_ctx->uc_ctx.ering_wp_local);
+	ipa3_ctx->uc_ctx.ering_wp =
+		ipa3_ctx->uc_ctx.ering_wp_local;
+	ipa3_ctx->uc_ctx.ering_rp = 0;
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_quota_monitor(uint64_t quota)
+{
+	int ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaQuotaMonitoring_t *quota_info;
+
+	/* check uc-event-ring setup */
+	if (!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+		IPAERR("uc_event_ring_valid %d\n",
+		ipa3_ctx->uc_ctx.uc_event_ring_valid);
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*quota_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	quota_info = (struct IpaQuotaMonitoring_t *)cmd.base;
+	quota_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	quota_info->params.WdiQM.Quota = quota;
+	quota_info->params.WdiQM.info.Num = 4;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	quota_info->params.WdiQM.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	quota_info->params.WdiQM.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	quota_info->params.WdiQM.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	quota_info->params.WdiQM.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	quota_info->params.WdiQM.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_QUOTA_MONITORING,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set quota %d, number offset %d\n",
+			quota_info->params.WdiQM.Quota,
+			quota_info->params.WdiQM.info.Num);
+		goto free_cmd;
+	}
+
+	IPADBG(" offest1 %d offest2 %d offest3 %d offest4 %d\n",
+			quota_info->params.WdiQM.info.Offset[0],
+			quota_info->params.WdiQM.info.Offset[1],
+			quota_info->params.WdiQM.info.Offset[2],
+			quota_info->params.WdiQM.info.Offset[3]);
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return res;
+}
+
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int i, ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaBwMonitoring_t *bw_info;
+
+	if (!info)
+		return -EINVAL;
+
+	/* check uc-event-ring setup */
+	if (!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+		IPAERR("uc_event_ring_valid %d\n",
+		ipa3_ctx->uc_ctx.uc_event_ring_valid);
+		return -EINVAL;
+	}
+
+	/* check max entry */
+	if (info->num > BW_MONITORING_MAX_THRESHOLD) {
+		IPAERR("%d, support max %d bw monitor\n", info->num,
+		BW_MONITORING_MAX_THRESHOLD);
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*bw_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	bw_info = (struct IpaBwMonitoring_t *)cmd.base;
+	bw_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	bw_info->params.WdiBw.NumThresh = info->num;
+	bw_info->params.WdiBw.Stop = info->stop;
+	IPADBG("stop bw-monitor? %d\n", bw_info->params.WdiBw.Stop);
+
+	/* cache the bw info */
+	ipa3_ctx->uc_ctx.info.num = info->num;
+	ipa3_ctx->uc_ctx.info.stop = info->stop;
+	ipa3_ctx->uc_ctx.bw_info_max = 0;
+
+	for (i = 0; i < info->num; i++) {
+		bw_info->params.WdiBw.BwThreshold[i] = info->threshold[i];
+		IPADBG("%d-st, %lu\n", i, bw_info->params.WdiBw.BwThreshold[i]);
+		ipa3_ctx->uc_ctx.info.threshold[i] = info->threshold[i];
+		if (info->threshold[i] > ipa3_ctx->uc_ctx.bw_info_max)
+			ipa3_ctx->uc_ctx.bw_info_max = info->threshold[i];
+	}
+	/* set max to both UL+DL */
+	ipa3_ctx->uc_ctx.bw_info_max *= 2;
+	IPADBG("bw-monitor max %lu\n", ipa3_ctx->uc_ctx.bw_info_max);
+
+	bw_info->params.WdiBw.info.Num = 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[4] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[5] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[6] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[7] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	bw_info->params.WdiBw.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_BW_MONITORING,
+			IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+			false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set bw %d level with %d coutners\n",
+			bw_info->params.WdiBw.NumThresh,
+			bw_info->params.WdiBw.info.Num);
+		goto free_cmd;
+	}
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return res;
+}
+
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	struct ipa_flt_rt_stats stats;
+	struct ipacm_fnr_info fnr_info;
+
+	memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+	if (!ipa_get_fnr_info(&fnr_info)) {
+		IPAERR("FNR counter haven't configured\n");
+		return -EINVAL;
+	}
+
+	/* update sw counters */
+	memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+	stats.num_bytes = info->sta_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		UL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to ul_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + UL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	stats.num_bytes = info->ap_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		DL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to dl_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + DL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 4d296ec..4865664 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -30,6 +30,8 @@
 #define MAX_MHIP_CHANNELS 4
 #define MAX_USB_CHANNELS 2
 
+#define BW_QUOTA_MONITORING_MAX_ADDR_OFFSET 8
+#define BW_MONITORING_MAX_THRESHOLD 3
 /**
  *  @brief   Enum value determined based on the feature it
  *           corresponds to
@@ -98,6 +100,7 @@ enum ipa4_hw_protocol {
  * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
  *  device
  * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ * @IPA_HW_2_CPU_POST_EVNT_RING_NOTIFICAITON : Event to notify APPS
  */
 enum ipa3_hw_2_cpu_events {
 	IPA_HW_2_CPU_EVENT_NO_OP     =
@@ -106,6 +109,8 @@ enum ipa3_hw_2_cpu_events {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
 	IPA_HW_2_CPU_EVENT_LOG_INFO  =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_EVNT_RING_NOTIFY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
 };
 
 /**
@@ -447,6 +452,8 @@ struct Ipa3HwStatsNTNInfoData_t {
  * uC stats calculation for a particular protocol
  * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
  * uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_QUOTA_MONITORING : Command to start the Quota monitoring
+ * @IPA_CPU_2_HW_CMD_BW_MONITORING : Command to start the BW monitoring
  */
 enum ipa_cpu_2_hw_offload_commands {
 	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
@@ -461,36 +468,13 @@ enum ipa_cpu_2_hw_offload_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
 	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_CPU_2_HW_CMD_QUOTA_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_CPU_2_HW_CMD_BW_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
 };
 
 /**
- * struct IpaOffloadStatschannel_info - channel info for uC
- * stats
- * @dir: Director of the channel ID DIR_CONSUMER =0,
- * DIR_PRODUCER = 1
- * @ch_id: Channel id of the IPA endpoint for which stats need
- * to be calculated, 0xFF means invalid channel or disable stats
- * on already stats enabled channel
- */
-struct IpaOffloadStatschannel_info {
-	uint8_t dir;
-	uint8_t ch_id;
-} __packed;
-
-/**
- * struct IpaHwOffloadStatsAllocCmdData_t - protocol info for uC
- * stats start
- * @protocol: Enum that indicates the protocol type
- * @ch_id_info: Channel id of the IPA endpoint for which stats
- * need to be calculated
- */
-struct IpaHwOffloadStatsAllocCmdData_t {
-	uint32_t protocol;
-	struct IpaOffloadStatschannel_info
-		ch_id_info[MAX_CH_STATS_SUPPORTED];
-} __packed;
-
-/**
  * struct IpaHwOffloadStatsDeAllocCmdData_t - protocol info for
  * uC stats stop
  * @protocol: Enum that indicates the protocol type
@@ -623,6 +607,48 @@ struct IpaHwOffloadSetUpCmdData_t {
 	union IpaHwSetUpCmd SetupCh_params;
 } __packed;
 
+struct IpaCommonMonitoringParams_t {
+	/* max 8 */
+	uint8_t  Num;
+	/* Sampling interval in ms */
+	uint8_t  Interval;
+	uint16_t Offset[BW_QUOTA_MONITORING_MAX_ADDR_OFFSET];
+} __packed; // 18 bytes
+
+struct IpaWdiQuotaMonitoringParams_t {
+	uint64_t Quota;
+	struct IpaCommonMonitoringParams_t info;
+} __packed;
+
+struct IpaWdiBwMonitoringParams_t {
+	uint64_t BwThreshold[BW_MONITORING_MAX_THRESHOLD];
+	struct IpaCommonMonitoringParams_t info;
+	uint8_t NumThresh;
+	/*Variable to Start Stop Bw Monitoring*/
+	uint8_t Stop;
+} __packed;
+
+union IpaQuotaMonitoringParams_t {
+	struct IpaWdiQuotaMonitoringParams_t WdiQM;
+} __packed;
+
+union IpaBwMonitoringParams_t {
+	struct IpaWdiBwMonitoringParams_t WdiBw;
+} __packed;
+
+struct IpaQuotaMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaQuotaMonitoringParams_t  params;
+} __packed;
+
+struct IpaBwMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaBwMonitoringParams_t   params;
+} __packed;
+
+
 struct IpaHwOffloadSetUpCmdData_t_v4_0 {
 	u32 protocol;
 	union IpaHwSetUpCmd SetupCh_params;
@@ -644,6 +670,46 @@ struct IpaHwOffloadCommonChCmdData_t {
 	union IpaHwCommonChCmd CommonCh_params;
 } __packed;
 
+enum EVENT_2_CPU_OPCODE {
+	BW_NOTIFY = 0x0,
+	QUOTA_NOTIFY = 0x1,
+};
+
+struct EventStructureBwMonitoring_t {
+	uint32_t ThresholdIndex;
+	uint64_t throughput;
+} __packed;
+
+struct EventStructureQuotaMonitoring_t {
+	/* indicate threshold has reached */
+	uint32_t ThreasholdReached;
+	uint64_t usage;
+} __packed;
+
+union EventParamFormat_t {
+	struct EventStructureBwMonitoring_t bw_param;
+	struct EventStructureQuotaMonitoring_t quota_param;
+} __packed;
+
+/* EVT RING STRUCTURE
+ *	|	Word|	bit	|	Field	|
+ *	-----------------------------
+ *	|	0	|0	-	8|	Protocol|
+ *	|		|8	-	16|	Reserved0|
+ *	|		|16	-	24|	Opcode	|
+ *	|		|24	-	31|	Reserved1|
+ *	|	1	|0	-	31|	Word1	|
+ *	|	2	|0	-	31|	Word2	|
+ *	|	3	|0	-	31|	Word3	|
+ */
+struct eventElement_t {
+	uint8_t Protocol;
+	uint8_t Reserved0;
+	uint8_t Opcode;
+	uint8_t Reserved1;
+	union EventParamFormat_t Value;
+} __packed;
+
 struct IpaHwOffloadCommonChCmdData_t_v4_0 {
 	u32 protocol;
 	union IpaHwCommonChCmd CommonCh_params;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index d6a057b..c0f66ec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -419,7 +419,7 @@ static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
  * @note Cannot be called from atomic context
  *
  */
-int ipa3_get_wdi_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 {
 	int i;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index a800150..c9f84cf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -65,6 +65,7 @@
 
 #define IPA_FILT_ROUT_HASH_REG_VAL_v4_2 (0x00000000)
 #define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
+#define IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC (500)
 
 #define IPA_AGGR_BYTE_LIMIT (\
 		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
@@ -2423,7 +2424,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
-	[IPA_4_5][IPA_CLIENT_USB_DPL_CONS]        = {
+	[IPA_4_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
 			true, IPA_v4_5_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
@@ -3898,8 +3899,8 @@ static void ipa_cfg_qtime(void)
 
 	/* Configure timestamp resolution */
 	memset(&ts_cfg, 0, sizeof(ts_cfg));
-	ts_cfg.dpl_timestamp_lsb = 0;
-	ts_cfg.dpl_timestamp_sel = false; /* DPL: use legacy 1ms resolution */
+	ts_cfg.dpl_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+	ts_cfg.dpl_timestamp_sel = true;
 	ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
 	ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT;
 	val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
@@ -4115,7 +4116,8 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
 		[client].qmb_master_sel;
 }
 
-/* ipa3_set_client() - provide client mapping
+/**
+ * ipa3_set_client() - provide client mapping
  * @client: client type
  *
  * Return value: none
@@ -4132,8 +4134,8 @@ void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
 		ipa3_ctx->ipacm_client[index].uplink = uplink;
 	}
 }
-
-/* ipa3_get_wlan_stats() - get ipa wifi stats
+/**
+ * ipa3_get_wlan_stats() - get ipa wifi stats
  *
  * Return value: success or failure
  */
@@ -4149,6 +4151,12 @@ int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
 	return 0;
 }
 
+/**
+ * ipa3_set_wlan_quota() - set ipa wifi quota
+ * @wdi_quota: quota requirement
+ *
+ * Return value: success or failure
+ */
 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
 {
 	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
@@ -4162,6 +4170,23 @@ int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
 }
 
 /**
+ * ipa3_inform_wlan_bw() - inform wlan bw-index
+ *
+ * Return value: success or failure
+ */
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_INFORM_WLAN_BW,
+			wdi_bw);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
  * ipa3_get_client() - provide client mapping
  * @client: client type
  *
@@ -4256,7 +4281,11 @@ void ipa_init_ep_flt_bitmap(void)
 	}
 
 	for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
-		if (ipa3_ep_mapping[hw_idx][cl].support_flt) {
+		/* In normal mode don't add filter support test pipes*/
+		if ((ipa3_ep_mapping[hw_idx][cl].support_flt &&
+				!IPA_CLIENT_IS_TEST(cl)) ||
+			ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+			ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
 			gsi_ep_ptr =
 				&ipa3_ep_mapping[hw_idx][cl].ipa_gsi_ep_info;
 			pipe_num =
@@ -6250,6 +6279,22 @@ void *ipa3_id_find(u32 id)
 	return ptr;
 }
 
+bool ipa3_check_idr_if_freed(void *ptr)
+{
+	int id;
+	void *iter_ptr;
+
+	spin_lock(&ipa3_ctx->idr_lock);
+	idr_for_each_entry(&ipa3_ctx->ipa_idr, iter_ptr, id) {
+		if ((uintptr_t)ptr == (uintptr_t)iter_ptr) {
+			spin_unlock(&ipa3_ctx->idr_lock);
+			return false;
+		}
+	}
+	spin_unlock(&ipa3_ctx->idr_lock);
+	return true;
+}
+
 void ipa3_id_remove(u32 id)
 {
 	spin_lock(&ipa3_ctx->idr_lock);
@@ -6295,10 +6340,12 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 	struct ipahal_imm_cmd_ip_packet_tag_status status;
 	int i;
 	struct sk_buff *dummy_skb;
-	int res;
+	int res = 0;
 	struct ipa3_tag_completion *comp;
 	int ep_idx;
 	u32 retry_cnt = 0;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 
 	/* Not enough room for the required descriptors for the tag process */
 	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
@@ -6327,6 +6374,34 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 		memcpy(&(tag_desc[0]), desc, descs_num *
 			sizeof(tag_desc[0]));
 		desc_idx += descs_num;
+	} else {
+		res = -EFAULT;
+		IPAERR("desc is NULL\n");
+		goto fail_free_tag_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			res = -ENOMEM;
+			goto fail_free_tag_desc;
+		}
+		ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
+		desc[desc_idx].callback = ipa3_tag_destroy_imm;
+		desc[desc_idx].user1 = cmd_pyld;
+		++desc_idx;
 	}
 
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
@@ -6335,7 +6410,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 	if (!cmd_pyld) {
 		IPAERR("failed to construct NOP imm cmd\n");
 		res = -ENOMEM;
-		goto fail_free_tag_desc;
+		goto fail_free_desc;
 	}
 	ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
 	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
@@ -6878,6 +6953,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
 	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
 	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+	api_ctrl->ipa_uc_bw_monitor = ipa3_uc_bw_monitor;
+	api_ctrl->ipa_set_wlan_tx_info = ipa3_set_wlan_tx_info;
 	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
 	api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
 			ipa3_broadcast_wdi_quota_reach_ind;
@@ -6986,6 +7063,15 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 		ipa3_register_client_callback;
 	api_ctrl->ipa_deregister_client_callback =
 		ipa3_deregister_client_callback;
+	api_ctrl->ipa_uc_debug_stats_alloc =
+		ipa3_uc_debug_stats_alloc;
+	api_ctrl->ipa_uc_debug_stats_dealloc =
+		ipa3_uc_debug_stats_dealloc;
+	api_ctrl->ipa_get_gsi_stats =
+		ipa3_get_gsi_stats;
+	api_ctrl->ipa_get_prot_id =
+		ipa3_get_prot_id;
+	api_ctrl->ipa_get_lan_rx_napi = ipa3_get_lan_rx_napi;
 	return 0;
 }
 
@@ -7565,15 +7651,11 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
 				client_type);
 		}
 	}
-	if (IPA_CLIENT_IS_PROD(ep->client)) {
-		IPADBG("Calling gsi_stop_channel ch:%lu\n",
-			ep->gsi_chan_hdl);
-		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
-			ep->gsi_chan_hdl, res);
-		return res;
-	}
 
+	/*
+	 * Apply the GSI stop retry logic if GSI returns err code to retry.
+	 * Apply the retry logic for ipa_client_prod as well as ipa_client_cons.
+	 */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		IPADBG("Calling gsi_stop_channel ch:%lu\n",
 			ep->gsi_chan_hdl);
@@ -7626,7 +7708,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
 
 static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
-	int ipa_ep_idx;
+	int ipa_ep_idx, coal_ep_idx;
 	struct ipa3_ep_context *ep;
 	int res;
 
@@ -7645,16 +7727,16 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	if (!ep->valid)
 		return 0;
 
+	coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
 	IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx);
+
 	/*
-	 * move the channel to callback mode.
-	 * This needs to happen before starting the channel to make
-	 * sure we don't loose any interrupt
+	 * Configure the callback mode only one time after starting the channel
+	 * otherwise observing IEOB interrupt received before configure callmode
+	 * second time. It was leading race condition in updating current
+	 * polling state.
 	 */
-	if (!suspend && !atomic_read(&ep->sys->curr_polling_state) &&
-		!IPA_CLIENT_IS_APPS_PROD(client))
-		gsi_config_channel_mode(ep->gsi_chan_hdl,
-					GSI_CHAN_MODE_CALLBACK);
 
 	if (suspend) {
 		res = __ipa3_stop_gsi_channel(ipa_ep_idx);
@@ -7671,7 +7753,17 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	}
 
 	/* Apps prod pipes use common event ring so cannot configure mode*/
-	if (IPA_CLIENT_IS_APPS_PROD(client))
+
+	/*
+	 * Skipping to configure mode for default wan pipe,
+	 * as both pipes using commong event ring. if both pipes
+	 * configure same event ring observing race condition in
+	 * updating current polling state.
+	 */
+
+	if (IPA_CLIENT_IS_APPS_PROD(client) ||
+		(client == IPA_CLIENT_APPS_WAN_CONS &&
+			coal_ep_idx != IPA_EP_NOT_ALLOCATED))
 		return 0;
 
 	if (suspend) {
@@ -7717,7 +7809,7 @@ void ipa3_force_close_coal(void)
 
 	IPADBG("Sending 1 descriptor for coal force close\n");
 	if (ipa3_send_cmd_timeout(1, &desc,
-		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+		IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC)) {
 		IPAERR("ipa3_send_cmd failed\n");
 		ipa_assert();
 	}
@@ -7727,18 +7819,26 @@ void ipa3_force_close_coal(void)
 int ipa3_suspend_apps_pipes(bool suspend)
 {
 	int res;
-	enum ipa_client_type client;
 
-	if (suspend)
-		ipa3_force_close_coal();
+	/* As per HPG first need start/stop coalescing channel
+	 * then default one. Coalescing client number was greater then
+	 * default one so starting the last client.
+	 */
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_coal_cons;
 
-	for (client = 0; client < IPA_CLIENT_MAX; client++) {
-		if (IPA_CLIENT_IS_APPS_CONS(client)) {
-			res = _ipa_suspend_resume_pipe(client, suspend);
-			if (res)
-				goto undo_cons;
-		}
-	}
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_odl_cons;
 
 	if (suspend) {
 		struct ipahal_reg_tx_wrapper tx;
@@ -7753,7 +7853,8 @@ int ipa3_suspend_apps_pipes(bool suspend)
 		if (tx.coal_slave_open_frame != 0) {
 			IPADBG("COAL frame is open 0x%x\n",
 				tx.coal_slave_open_frame);
-			goto undo_cons;
+			res = -EAGAIN;
+			goto undo_odl_cons;
 		}
 
 		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
@@ -7762,28 +7863,37 @@ int ipa3_suspend_apps_pipes(bool suspend)
 			ipa3_ctx->ee);
 		if (res) {
 			IPADBG("suspend irq is pending 0x%x\n", res);
-			goto undo_cons;
+			goto undo_odl_cons;
 		}
 	}
 do_prod:
-	for (client = 0; client < IPA_CLIENT_MAX; client++) {
-		if (IPA_CLIENT_IS_APPS_PROD(client)) {
-			res = _ipa_suspend_resume_pipe(client, suspend);
-			if (res)
-				goto undo_prod;
-		}
-	}
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_prod;
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_prod;
 
 	return 0;
-undo_prod:
-	for (client--; client < IPA_CLIENT_MAX && client >= 0; client--)
-		if (IPA_CLIENT_IS_APPS_PROD(client))
-			_ipa_suspend_resume_pipe(client, !suspend);
-	client = IPA_CLIENT_MAX;
-undo_cons:
-	for (client--; client < IPA_CLIENT_MAX && client >= 0; client--)
-		if (IPA_CLIENT_IS_APPS_CONS(client))
-			_ipa_suspend_resume_pipe(client, !suspend);
+
+undo_wan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, !suspend);
+
+undo_lan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, !suspend);
+
+undo_odl_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
+undo_lan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
+undo_wan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
+	return res;
+
+undo_coal_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+
 	return res;
 }
 
@@ -8371,6 +8481,30 @@ bool ipa3_is_apq(void)
 }
 
 /**
+ * ipa_get_fnr_info() - get fnr_info
+ *
+ * Return value: true if set, false if not set
+ *
+ */
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info)
+{
+	bool res = false;
+
+	if (ipa3_ctx->fnr_info.valid) {
+		fnr_info->valid = ipa3_ctx->fnr_info.valid;
+		fnr_info->hw_counter_offset =
+			ipa3_ctx->fnr_info.hw_counter_offset;
+		fnr_info->sw_counter_offset =
+			ipa3_ctx->fnr_info.sw_counter_offset;
+		res = true;
+	} else {
+		IPAERR("fnr_info not valid!\n");
+		res = false;
+	}
+	return res;
+}
+
+/**
  * ipa3_disable_prefetch() - disable\enable tx prefetch
  *
  * @client: the client which is related to the TX where prefetch will be
@@ -8452,3 +8586,98 @@ u32 ipa3_get_r_rev_version(void)
 
 	return r_rev;
 }
+
+/**
+ * ipa3_get_gsi_stats() - Query gsi stats from uc
+ * @prot_id: IPA_HW_FEATURE_OFFLOAD protocol id
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+void ipa3_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats)
+{
+	switch (prot_id) {
+	case IPA_HW_PROTOCOL_AQC:
+		stats->num_ch = MAX_AQC_CHANNELS;
+		ipa3_get_aqc_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		stats->num_ch = MAX_WDI2_CHANNELS;
+		ipa3_get_wdi_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		stats->num_ch = MAX_WDI3_CHANNELS;
+		ipa3_get_wdi3_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	case IPA_HW_PROTOCOL_MHIP:
+		stats->num_ch = MAX_MHIP_CHANNELS;
+		ipa3_get_mhip_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_USB:
+		stats->num_ch = MAX_USB_CHANNELS;
+		ipa3_get_usb_gsi_stats(stats);
+		break;
+	default:
+		IPAERR("unsupported HW feature %d\n", prot_id);
+	}
+}
+
+/**
+ * ipa3_get_prot_id() - Query gsi protocol id
+ * @client: ipa_client_type
+ *
+ * return the prot_id based on the client type,
+ * return -EINVAL when no such mapping exists.
+ */
+int ipa3_get_prot_id(enum ipa_client_type client)
+{
+	int prot_id = -EINVAL;
+
+	switch (client) {
+	case IPA_CLIENT_AQC_ETHERNET_CONS:
+	case IPA_CLIENT_AQC_ETHERNET_PROD:
+		prot_id = IPA_HW_PROTOCOL_AQC;
+		break;
+	case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+	case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+	case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+	case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+		prot_id = IPA_HW_PROTOCOL_MHIP;
+		break;
+	case IPA_CLIENT_WLAN1_PROD:
+	case IPA_CLIENT_WLAN1_CONS:
+		prot_id = IPA_HW_PROTOCOL_WDI;
+		break;
+	case IPA_CLIENT_WLAN2_PROD:
+	case IPA_CLIENT_WLAN2_CONS:
+		prot_id = IPA_HW_PROTOCOL_WDI3;
+		break;
+	case IPA_CLIENT_USB_PROD:
+	case IPA_CLIENT_USB_CONS:
+		prot_id = IPA_HW_PROTOCOL_USB;
+		break;
+	case IPA_CLIENT_ETHERNET_PROD:
+	case IPA_CLIENT_ETHERNET_CONS:
+		prot_id = IPA_HW_PROTOCOL_ETH;
+		break;
+	case IPA_CLIENT_WIGIG_PROD:
+	case IPA_CLIENT_WIGIG1_CONS:
+	case IPA_CLIENT_WIGIG2_CONS:
+	case IPA_CLIENT_WIGIG3_CONS:
+	case IPA_CLIENT_WIGIG4_CONS:
+		prot_id = IPA_HW_PROTOCOL_11ad;
+		break;
+	default:
+		IPAERR("unknown prot_id for client %d\n",
+			client);
+	}
+
+	return prot_id;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index f213178..463a3d3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -725,6 +725,20 @@ int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
 
+	/* start uC event ring */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (ipa3_ctx->uc_ctx.uc_loaded &&
+			!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+			if (ipa3_uc_setup_event_ring())	{
+				IPAERR("failed to set uc_event ring\n");
+				return -EFAULT;
+			}
+		} else
+			IPAERR("uc-loaded %d, ring-valid %d",
+			ipa3_ctx->uc_ctx.uc_loaded,
+			ipa3_ctx->uc_ctx.uc_event_ring_valid);
+	}
+
 	/* enable data path */
 	result = ipa3_enable_data_path(ipa_ep_idx_rx);
 	if (result) {
@@ -928,7 +942,7 @@ int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
  * @note Cannot be called from atomic context
  *
  */
-int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
+int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 {
 	int i;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 0b0545e..985eae8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -863,11 +863,53 @@ struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
 	(status->status_mask |= \
 		((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
 
+static enum ipahal_pkt_status_exception pkt_status_parse_exception(
+	bool is_ipv6, u64 exception)
+{
+	enum ipahal_pkt_status_exception exception_type = 0;
+
+	switch (exception) {
+	case 0:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+		break;
+	case 1:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+		break;
+	case 4:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+		break;
+	case 8:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+		break;
+	case 16:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+		break;
+	case 32:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+		break;
+	case 64:
+		if (is_ipv6)
+			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT;
+		else
+			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+		break;
+	case 229:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+			exception);
+		WARN_ON(1);
+	}
+
+	return exception_type;
+}
+
+
 static void ipa_pkt_status_parse(
 	const void *unparsed_status, struct ipahal_pkt_status *status)
 {
 	enum ipahal_pkt_status_opcode opcode = 0;
-	enum ipahal_pkt_status_exception exception_type = 0;
 	bool is_ipv6;
 
 	struct ipa_pkt_status_hw *hw_status =
@@ -926,9 +968,8 @@ static void ipa_pkt_status_parse(
 		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
 		break;
 	default:
-		IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+		IPAHAL_ERR_RL("unsupported Status Opcode 0x%x\n",
 			hw_status->status_opcode);
-		WARN_ON(1);
 	}
 	status->status_opcode = opcode;
 
@@ -943,45 +984,11 @@ static void ipa_pkt_status_parse(
 		status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
 		break;
 	default:
-		IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+		IPAHAL_ERR_RL("unsupported Status NAT type 0x%x\n",
 			hw_status->nat_type);
-		WARN_ON(1);
 	}
-
-	switch (hw_status->exception) {
-	case 0:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
-		break;
-	case 1:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
-		break;
-	case 4:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
-		break;
-	case 8:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
-		break;
-	case 16:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
-		break;
-	case 32:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
-		break;
-	case 64:
-		if (is_ipv6)
-			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT;
-		else
-			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
-		break;
-	case 229:
-		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
-		break;
-	default:
-		IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
-			hw_status->exception);
-		WARN_ON(1);
-	}
-	status->exception = exception_type;
+	status->exception = pkt_status_parse_exception(is_ipv6,
+						hw_status->exception);
 
 	IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
 	IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
@@ -1006,15 +1013,49 @@ static void ipa_pkt_status_parse(
 }
 
 /*
+ * ipa_pkt_status_parse_thin() - Parse some of the packet status fields
+ * for specific usage in the LAN rx data path where parsing needs to be done
+ * but only for specific fields.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from HW
+ * @status: Pointer to pre-allocated buffer where the parsed info will be
+ * stored
+ */
+static void ipa_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status)
+{
+	struct ipa_pkt_status_hw *hw_status =
+		(struct ipa_pkt_status_hw *)unparsed_status;
+	bool is_ipv6;
+
+	is_ipv6 = (hw_status->status_mask & 0x80) ? false : true;
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
+			unparsed_status, status);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("Parse Thin Status Packet\n");
+	status->metadata = hw_status->metadata;
+	status->endp_src_idx = hw_status->endp_src_idx;
+	status->ucp = hw_status->ucp;
+	status->exception = pkt_status_parse_exception(is_ipv6,
+						hw_status->exception);
+}
+
+/*
  * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
  *  specific IPA version
  * @size: H/W size of the status packet
  * @parse: CB that parses the H/W packet status into the abstracted structure
+ * @parse_thin: light weight CB that parses only some of the fields for
+ * data path optimization
  */
 struct ipahal_pkt_status_obj {
 	u32 size;
 	void (*parse)(const void *unparsed_status,
 		struct ipahal_pkt_status *status);
+	void (*parse_thin)(const void *unparsed_status,
+			struct ipahal_pkt_status_thin *status);
 };
 
 /*
@@ -1030,6 +1071,7 @@ static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
 	[IPA_HW_v3_0] = {
 		IPA3_0_PKT_STATUS_SIZE,
 		ipa_pkt_status_parse,
+		ipa_pkt_status_parse_thin,
 		},
 };
 
@@ -1053,6 +1095,8 @@ static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
 	/*
 	 * Since structure alignment is implementation dependent,
 	 * add test to avoid different and incompatible data layouts.
+	 * If test fails it also means that ipahal_pkt_status_parse_thin
+	 * need to be checked.
 	 *
 	 * In case new H/W has different size or structure of status packet,
 	 * add a compile time validty check for it like below (as well as
@@ -1085,6 +1129,12 @@ static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
 				  i+1);
 				WARN_ON(1);
 			}
+			if (!ipahal_pkt_status_objs[i+1].parse_thin) {
+				IPAHAL_ERR(
+				  "Packet Status without Parse_thin func ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
 		}
 	}
 
@@ -1120,6 +1170,26 @@ void ipahal_pkt_status_parse(const void *unparsed_status,
 }
 
 /*
+ * ipahal_pkt_status_parse_thin() - Similar to iphal_pkt_status_parse,
+ * the difference is it only parses some of the status packet fields
+ * used for TP optimization.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status)
+{
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
+			unparsed_status, status);
+		return;
+	}
+	IPAHAL_DBG_LOW("Parse_thin Status Packet\n");
+	ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse_thin(unparsed_status,
+				status);
+}
+
+/*
  * ipahal_pkt_status_exception_str() - returns string represents exception type
  * @exception: [in] The exception type
  */
@@ -1202,6 +1272,7 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
  * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
@@ -1209,7 +1280,9 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		u32 hdr_len, bool is_hdr_proc_ctx,
 		dma_addr_t phys_base, u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64)
 {
 	u64 hdr_addr;
 
@@ -1253,11 +1326,11 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		ctx->l2tp_params.tlv.value =
 				IPA_HDR_UCP_L2TP_HEADER_ADD;
 		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
-			l2tp_params.hdr_add_param.eth_hdr_retained;
+			l2tp_params->hdr_add_param.eth_hdr_retained;
 		ctx->l2tp_params.l2tp_params.input_ip_version =
-			l2tp_params.hdr_add_param.input_ip_version;
+			l2tp_params->hdr_add_param.input_ip_version;
 		ctx->l2tp_params.l2tp_params.output_ip_version =
-			l2tp_params.hdr_add_param.output_ip_version;
+			l2tp_params->hdr_add_param.output_ip_version;
 
 		IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
 		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
@@ -1284,15 +1357,15 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		ctx->l2tp_params.tlv.value =
 				IPA_HDR_UCP_L2TP_HEADER_REMOVE;
 		ctx->l2tp_params.l2tp_params.hdr_len_remove =
-			l2tp_params.hdr_remove_param.hdr_len_remove;
+			l2tp_params->hdr_remove_param.hdr_len_remove;
 		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
-			l2tp_params.hdr_remove_param.eth_hdr_retained;
+			l2tp_params->hdr_remove_param.eth_hdr_retained;
 		ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid =
-			l2tp_params.hdr_remove_param.hdr_ofst_pkt_size_valid;
+			l2tp_params->hdr_remove_param.hdr_ofst_pkt_size_valid;
 		ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size =
-			l2tp_params.hdr_remove_param.hdr_ofst_pkt_size;
+			l2tp_params->hdr_remove_param.hdr_ofst_pkt_size;
 		ctx->l2tp_params.l2tp_params.hdr_endianness =
-			l2tp_params.hdr_remove_param.hdr_endianness;
+			l2tp_params->hdr_remove_param.hdr_endianness;
 		IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n",
 			ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid,
 			ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size);
@@ -1303,6 +1376,33 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
 		ctx->end.length = 0;
 		ctx->end.value = 0;
+	}  else if (type == IPA_HDR_PROC_ETHII_TO_ETHII_EX) {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *)
+			(base + offset);
+
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+
+		ctx->hdr_add_ex.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->hdr_add_ex.tlv.length = 1;
+		ctx->hdr_add_ex.tlv.value = IPA_HDR_UCP_ETHII_TO_ETHII_EX;
+
+		ctx->hdr_add_ex.params.input_ethhdr_negative_offset =
+			generic_params->input_ethhdr_negative_offset;
+		ctx->hdr_add_ex.params.output_ethhdr_negative_offset =
+			generic_params->output_ethhdr_negative_offset;
+		ctx->hdr_add_ex.params.reserved = 0;
+
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
 	} else {
 		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
 
@@ -1357,9 +1457,35 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
  */
 static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
 {
-	return (type == IPA_HDR_PROC_NONE) ?
-			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
-			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+	int ret;
+
+	switch (type) {
+	case IPA_HDR_PROC_NONE:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq);
+		break;
+	case IPA_HDR_PROC_ETHII_TO_ETHII:
+	case IPA_HDR_PROC_ETHII_TO_802_3:
+	case IPA_HDR_PROC_802_3_TO_ETHII:
+	case IPA_HDR_PROC_802_3_TO_802_3:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_L2TP_HEADER_ADD:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_L2TP_HEADER_REMOVE:
+		ret =
+		sizeof(struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_ETHII_TO_ETHII_EX:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex);
+		break;
+	default:
+		/* invalid value to make sure failure */
+		IPAHAL_ERR_RL("invalid ipa_hdr_proc_type %d\n", type);
+		ret = -1;
+	}
+
+	return ret;
 }
 
 /*
@@ -1376,7 +1502,9 @@ struct ipahal_hdr_funcs {
 			bool is_hdr_proc_ctx, dma_addr_t phys_base,
 			u64 hdr_base_addr,
 			struct ipa_hdr_offset_entry *offset_entry,
-			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params,
+			struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+			struct ipa_eth_II_to_eth_II_ex_procparams
+			*generic_params,
 			bool is_64);
 
 	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
@@ -1443,13 +1571,16 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
  * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64)
 {
 	IPAHAL_DBG(
 		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
@@ -1470,7 +1601,8 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 
 	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
 			hdr_len, is_hdr_proc_ctx, phys_base,
-			hdr_base_addr, offset_entry, l2tp_params, is_64);
+			hdr_base_addr, offset_entry, l2tp_params,
+			generic_params, is_64);
 }
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 25c9416..d43af78 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -589,6 +589,21 @@ struct ipahal_pkt_status {
 };
 
 /*
+ * struct ipahal_pkt_status_thin - this struct is used to parse only
+ *  a few fields from the status packet, needed for LAN optimization.
+ * @exception: The first exception that took place.
+ * @metadata: meta data value used by packet
+ * @endp_src_idx: Source end point index.
+ * @ucp: UC Processing flag
+ */
+struct ipahal_pkt_status_thin {
+	enum ipahal_pkt_status_exception exception;
+	u32 metadata;
+	u8 endp_src_idx;
+	bool ucp;
+};
+
+/*
  * ipahal_pkt_status_get_size() - Get H/W size of packet status
  */
 u32 ipahal_pkt_status_get_size(void);
@@ -602,6 +617,17 @@ void ipahal_pkt_status_parse(const void *unparsed_status,
 	struct ipahal_pkt_status *status);
 
 /*
+ * ipahal_pkt_status_parse_thin() - Parse some of the packet status fields
+ * for specific usage in the LAN rx data path where parsing needs to be done
+ * but only for specific fields.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from HW
+ * @status: Pointer to pre-allocated buffer where the parsed info will be
+ * stored
+ */
+void ipahal_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status);
+
+/*
  * ipahal_pkt_status_exception_str() - returns string represents exception type
  * @exception: [in] The exception type
  */
@@ -630,6 +656,7 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
  * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
@@ -637,7 +664,9 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64);
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64);
 
 /*
  * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index d84646f..e3c984d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -847,6 +847,31 @@ static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
 		*rest = ipa_write_8(mac_addr[i], *rest);
 }
 
+static inline int ipa_fltrt_generate_vlan_hw_rule_bdy(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 *ofst_meq32, u8 **extra, u8 **rest)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		*extra = ipa_write_8((u8)-6, *extra);
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		*rest = ipa_write_32(0xFFFF0FFF, *rest);
+		*rest = ipa_write_32(vlan_tag, *rest);
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
 static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
 	const struct ipa_rule_attrib *attrib,
 	u8 **extra_wrds, u8 **rest_wrds)
@@ -1036,6 +1061,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
 		}
 	}
 
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -1422,6 +1451,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
 		ofst_meq32++;
 	}
 
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -2046,6 +2079,31 @@ static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
 			mac_addr[i];
 }
 
+static inline int ipa_flt_generat_vlan_eq(
+	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq32,
+	struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		eq_atrb->offset_meq_32[*ofst_meq32].offset = -6;
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		eq_atrb->offset_meq_32[*ofst_meq32].mask = 0xFFFF0FFF;
+		eq_atrb->offset_meq_32[*ofst_meq32].value = vlan_tag;
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
 static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
 		const struct ipa_rule_attrib *attrib,
 		struct ipa_ipfltri_rule_eq *eq_atrb)
@@ -2282,6 +2340,9 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
 		}
 	}
 
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -2770,6 +2831,9 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
 		ofst_meq32++;
 	}
 
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
 	if (attrib->attrib_mask & IPA_FLT_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
 			ihl_ofst_meq32)) {
@@ -3945,6 +4009,7 @@ static int ipa_fltrt_alloc_lcl_bdy(
 	struct ipahal_fltrt_alloc_imgs_params *params)
 {
 	struct ipahal_fltrt_obj *obj;
+	gfp_t flag = GFP_KERNEL;
 
 	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
 
@@ -3977,10 +4042,15 @@ static int ipa_fltrt_alloc_lcl_bdy(
 		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
 			params->nhash_bdy.size);
 
+alloc1:
 		params->nhash_bdy.base = dma_zalloc_coherent(
 			ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
-			&params->nhash_bdy.phys_base, GFP_KERNEL);
+			&params->nhash_bdy.phys_base, flag);
 		if (!params->nhash_bdy.base) {
+			if (flag == GFP_KERNEL) {
+				flag = GFP_ATOMIC;
+				goto alloc1;
+			}
 			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
 				params->nhash_bdy.size);
 			return -ENOMEM;
@@ -4007,10 +4077,15 @@ static int ipa_fltrt_alloc_lcl_bdy(
 		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
 			params->hash_bdy.size);
 
+alloc2:
 		params->hash_bdy.base = dma_zalloc_coherent(
 			ipahal_ctx->ipa_pdev, params->hash_bdy.size,
-			&params->hash_bdy.phys_base, GFP_KERNEL);
+			&params->hash_bdy.phys_base, flag);
 		if (!params->hash_bdy.base) {
+			if (flag == GFP_KERNEL) {
+				flag = GFP_ATOMIC;
+				goto alloc2;
+			}
 			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
 				params->hash_bdy.size);
 			goto hash_bdy_fail;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 6811244..3625e7e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -605,12 +605,15 @@ struct ipa_pkt_status_hw {
 /* Headers and processing context H/W structures and definitions */
 
 /* uCP command numbers */
-#define IPA_HDR_UCP_802_3_TO_802_3 6
-#define IPA_HDR_UCP_802_3_TO_ETHII 7
-#define IPA_HDR_UCP_ETHII_TO_802_3 8
-#define IPA_HDR_UCP_ETHII_TO_ETHII 9
-#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
-#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
+#define IPA_HDR_UCP_802_3_TO_802_3		6
+#define IPA_HDR_UCP_802_3_TO_ETHII		7
+#define IPA_HDR_UCP_ETHII_TO_802_3		8
+#define IPA_HDR_UCP_ETHII_TO_ETHII		9
+#define IPA_HDR_UCP_L2TP_HEADER_ADD		10
+#define IPA_HDR_UCP_L2TP_HEADER_REMOVE		11
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_ADD	12
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_REMOVE	13
+#define IPA_HDR_UCP_ETHII_TO_ETHII_EX		14
 
 /* Processing context TLV type */
 #define IPA_PROC_CTX_TLV_TYPE_END 0
@@ -724,4 +727,28 @@ struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq {
 	struct ipa_hw_hdr_proc_ctx_tlv end;
 };
 
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_ex -
+ * HW structure of IPA processing context - add generic header
+ * @tlv: IPA processing context TLV
+ * @params: generic eth2 to eth2 parameters
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_ex {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_eth_II_to_eth_II_ex_procparams params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @params: params for header generic header add
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_add_hdr_ex hdr_add_ex;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
 #endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 440788f..2c0cc1f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -3659,6 +3659,11 @@ void ipahal_get_aggr_force_close_valmask(int ep_idx,
 		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5;
 		bmsk =
 		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v4_7) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_7;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_7;
 	}
 
 	if (ep_idx > (sizeof(valmask->val) * 8 - 1)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index d934b91..44ecc90 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -139,6 +139,8 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_2 0
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5 0x7fffffff
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_7 0x7fffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_7 0
 
 /* IPA_ENDP_INIT_ROUTE_n register */
 #define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 8c38ac6..6c58fb2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1618,6 +1618,8 @@ static int handle3_egress_format(struct net_device *dev,
 			rc = ipa3_qmi_set_aggr_info(DATA_AGGR_TYPE_QMAP_V01);
 		}
 		rmnet_ipa3_ctx->ipa_mhi_aggr_formet_set = true;
+		/* register Q6 indication */
+		rc = ipa3_qmi_req_ind();
 		return rc;
 	}
 
@@ -2147,35 +2149,16 @@ static int rmnet_ipa_send_coalesce_notification(uint8_t qmap_id,
 
 int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state)
 {
-	uint32_t bw_mbps = 0;
 	int ret = 0;
 
 	if (!state)
 		return -EINVAL;
 
-	if (state->up) {
-		if (rmnet_ipa3_ctx->ipa_config_is_apq) {
-			bw_mbps = 5200;
-			ret = ipa3_vote_for_bus_bw(&bw_mbps);
-			if (ret) {
-				IPAERR("Failed to vote for bus BW (%u)\n",
-							bw_mbps);
-				return ret;
-			}
-		}
+	if (state->up)
 		ret = ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
-	} else {
-		if (rmnet_ipa3_ctx->ipa_config_is_apq) {
-			bw_mbps = 0;
-			ret = ipa3_vote_for_bus_bw(&bw_mbps);
-			if (ret) {
-				IPAERR("Failed to vote for bus BW (%u)\n",
-							bw_mbps);
-				return ret;
-			}
-		}
+	else
 		ret = ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
-	}
+
 	return ret;
 }
 
@@ -3104,10 +3087,16 @@ static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
 	IPAWANERR("iface name %s, quota %lu\n",
 		  data->interface_name, (unsigned long) data->quota_mbytes);
 
-	rc = ipa3_set_wlan_quota(&wifi_quota);
-	/* check if wlan-fw takes this quota-set */
-	if (!wifi_quota.set_valid)
-		rc = -EFAULT;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		IPADBG("use ipa-uc for quota\n");
+		rc = ipa3_uc_quota_monitor(data->set_quota);
+	} else {
+		rc = ipa3_set_wlan_quota(&wifi_quota);
+		/* check if wlan-fw takes this quota-set */
+		if (!wifi_quota.set_valid)
+			rc = -EFAULT;
+	}
 	return rc;
 }
 
@@ -3452,7 +3441,7 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 		}
 	}
 
-	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
 		wlan_client = IPA_CLIENT_WLAN2_CONS;
 	else
 		wlan_client = IPA_CLIENT_WLAN1_CONS;
@@ -3522,6 +3511,90 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 		(unsigned long) data->ipv4_rx_bytes,
 		(unsigned long) data->ipv6_rx_bytes);
 
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 ||
+		ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+		goto skip_nlo_stats;
+
+	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
+	rc = ipa_query_teth_stats(IPA_CLIENT_Q6_DL_NLO_DATA_PROD,
+				con_stats, reset);
+	if (rc) {
+		IPAERR("IPA_CLIENT_Q6_DL_NLO_DATA_PROD query failed %d,\n", rc);
+		kfree(con_stats);
+		return rc;
+	}
+
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
+		wlan_client = IPA_CLIENT_WLAN2_CONS;
+	else
+		wlan_client = IPA_CLIENT_WLAN1_CONS;
+
+	IPAWANDBG("wlan: v4_rx_p-b(%d,%lld) v6_rx_p-b(%d,%lld),client(%d)\n",
+		con_stats->client[wlan_client].num_ipv4_pkts,
+		con_stats->client[wlan_client].num_ipv4_bytes,
+		con_stats->client[wlan_client].num_ipv6_pkts,
+		con_stats->client[wlan_client].num_ipv6_bytes,
+		wlan_client);
+
+	IPAWANDBG("usb: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes);
+
+	for (i = 0; i < MAX_WIGIG_CLIENTS; i++) {
+		enum ipa_client_type wigig_client =
+			rmnet_ipa3_get_wigig_cons(i);
+
+		if (wigig_client > IPA_CLIENT_WIGIG4_CONS)
+			break;
+
+		IPAWANDBG("wigig%d: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
+			i + 1,
+			con_stats->client[wigig_client].num_ipv4_pkts,
+			con_stats->client[wigig_client].num_ipv4_bytes,
+			con_stats->client[wigig_client].num_ipv6_pkts,
+			con_stats->client[wigig_client].num_ipv6_bytes);
+	}
+
+	/* update the DL stats */
+	data->ipv4_rx_packets +=
+		con_stats->client[wlan_client].num_ipv4_pkts +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts;
+	data->ipv6_rx_packets +=
+		con_stats->client[wlan_client].num_ipv6_pkts +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts;
+	data->ipv4_rx_bytes +=
+		con_stats->client[wlan_client].num_ipv4_bytes +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes;
+	data->ipv6_rx_bytes +=
+		con_stats->client[wlan_client].num_ipv6_bytes +
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes;
+
+	for (i = 0; i < MAX_WIGIG_CLIENTS; i++) {
+		enum ipa_client_type wigig_client =
+			rmnet_ipa3_get_wigig_cons(i);
+
+		if (wigig_client > IPA_CLIENT_WIGIG4_CONS)
+			break;
+
+		data->ipv4_rx_packets +=
+			con_stats->client[wigig_client].num_ipv4_pkts;
+		data->ipv6_rx_packets +=
+			con_stats->client[wigig_client].num_ipv6_pkts;
+		data->ipv4_rx_bytes +=
+			con_stats->client[wigig_client].num_ipv4_bytes;
+		data->ipv6_rx_bytes +=
+			con_stats->client[wigig_client].num_ipv6_bytes;
+	}
+
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long) data->ipv4_rx_packets,
+		(unsigned long) data->ipv6_rx_packets,
+		(unsigned long) data->ipv4_rx_bytes,
+		(unsigned long) data->ipv6_rx_bytes);
+
+skip_nlo_stats:
 	/* query USB UL stats */
 	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
 	rc = ipa_query_teth_stats(IPA_CLIENT_USB_PROD, con_stats, reset);
@@ -3552,10 +3625,31 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	data->ipv6_tx_bytes =
 		con_stats->client[index].num_ipv6_bytes;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->platform_type == IPA_PLAT_TYPE_MSM) {
+
+		index = IPA_CLIENT_Q6_UL_NLO_DATA_CONS;
+
+		IPAWANDBG("usb: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+				con_stats->client[index].num_ipv4_pkts,
+				con_stats->client[index].num_ipv4_bytes,
+				con_stats->client[index].num_ipv6_pkts,
+				con_stats->client[index].num_ipv6_bytes);
+
+		/* update the USB UL stats */
+		data->ipv4_tx_packets +=
+			con_stats->client[index].num_ipv4_pkts;
+		data->ipv6_tx_packets +=
+			con_stats->client[index].num_ipv6_pkts;
+		data->ipv4_tx_bytes +=
+			con_stats->client[index].num_ipv4_bytes;
+		data->ipv6_tx_bytes +=
+			con_stats->client[index].num_ipv6_bytes;
+	}
 	/* query WLAN UL stats */
 	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
 
-	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
 		rc = ipa_query_teth_stats(IPA_CLIENT_WLAN2_PROD,
 			con_stats, reset);
 	else
@@ -3589,6 +3683,27 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	data->ipv6_tx_bytes +=
 		con_stats->client[index].num_ipv6_bytes;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->platform_type == IPA_PLAT_TYPE_MSM) {
+		index = IPA_CLIENT_Q6_UL_NLO_DATA_CONS;
+
+		IPAWANDBG("wlan1: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+				con_stats->client[index].num_ipv4_pkts,
+				con_stats->client[index].num_ipv4_bytes,
+				con_stats->client[index].num_ipv6_pkts,
+				con_stats->client[index].num_ipv6_bytes);
+
+		/* update the USB UL stats */
+		data->ipv4_tx_packets +=
+			con_stats->client[index].num_ipv4_pkts;
+		data->ipv6_tx_packets +=
+			con_stats->client[index].num_ipv6_pkts;
+		data->ipv4_tx_bytes +=
+			con_stats->client[index].num_ipv4_bytes;
+		data->ipv6_tx_bytes +=
+			con_stats->client[index].num_ipv6_bytes;
+	}
+
 	if (ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD) !=
 			IPA_EP_NOT_ALLOCATED) {
 		/* query WIGIG UL stats */
@@ -3621,6 +3736,22 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 			con_stats->client[index].num_ipv4_bytes;
 		data->ipv6_tx_bytes +=
 			con_stats->client[index].num_ipv6_bytes;
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+				ipa3_ctx->platform_type == IPA_PLAT_TYPE_MSM) {
+			index = IPA_CLIENT_Q6_UL_NLO_DATA_CONS;
+
+			/* update the WIGIG UL stats */
+			data->ipv4_tx_packets +=
+				con_stats->client[index].num_ipv4_pkts;
+			data->ipv6_tx_packets +=
+				con_stats->client[index].num_ipv6_pkts;
+			data->ipv4_tx_bytes +=
+				con_stats->client[index].num_ipv4_bytes;
+			data->ipv6_tx_bytes +=
+				con_stats->client[index].num_ipv6_bytes;
+		}
+
 	} else {
 		IPAWANDBG("IPA_CLIENT_WIGIG_PROD client not supported\n");
 	}
@@ -3634,6 +3765,144 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	return rc;
 }
 
+static int rmnet_ipa3_query_tethering_stats_fnr(
+	struct wan_ioctl_query_tether_stats_all *data)
+{
+	int rc = 0;
+	int num_counters;
+	struct ipa_ioc_flt_rt_query fnr_stats, fnr_stats_sw;
+	struct ipacm_fnr_info fnr_info;
+
+	memset(&fnr_stats, 0, sizeof(struct ipa_ioc_flt_rt_query));
+	memset(&fnr_stats_sw, 0, sizeof(struct ipa_ioc_flt_rt_query));
+	memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+
+	if (!ipa_get_fnr_info(&fnr_info)) {
+		IPAERR("FNR counter haven't configured\n");
+		return -EINVAL;
+	}
+
+	fnr_stats.start_id = fnr_info.hw_counter_offset + UL_HW;
+	fnr_stats.end_id = fnr_info.hw_counter_offset + DL_HW;
+	fnr_stats.reset = data->reset_stats;
+	num_counters = fnr_stats.end_id - fnr_stats.start_id + 1;
+
+	if (num_counters != 2) {
+		IPAWANERR("Only query 2 counters\n");
+		return -EINVAL;
+	}
+
+	fnr_stats.stats = (uint64_t)kcalloc(
+		num_counters,
+		sizeof(struct ipa_flt_rt_stats),
+		GFP_KERNEL);
+	if (!fnr_stats.stats) {
+		IPAERR("Failed to allocate memory for query hw-stats\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_get_flt_rt_stats(&fnr_stats)) {
+		IPAERR("Failed to request stats from h/w\n");
+		rc = -EINVAL;
+		goto free_stats;
+	}
+
+	IPAWANDBG("ul: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash);
+
+	data->tx_bytes =
+		((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+	data->rx_bytes =
+		((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+
+	/* get the sw stats */
+	fnr_stats_sw.start_id = fnr_info.sw_counter_offset + UL_HW_CACHE;
+	fnr_stats_sw.end_id = fnr_info.sw_counter_offset + DL_HW_CACHE;
+	fnr_stats_sw.reset = data->reset_stats;
+	num_counters = fnr_stats_sw.end_id - fnr_stats_sw.start_id + 1;
+
+	if (num_counters != 2) {
+		IPAWANERR("Only query 2 counters\n");
+		return -EINVAL;
+	}
+
+	fnr_stats_sw.stats = (uint64_t)kcalloc(
+		num_counters,
+		sizeof(struct ipa_flt_rt_stats),
+		GFP_KERNEL);
+	if (!fnr_stats_sw.stats) {
+		IPAERR("Failed to allocate memory for query sw-stats\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_get_flt_rt_stats(&fnr_stats_sw)) {
+		IPAERR("Failed to request stats from h/w\n");
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+	IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+
+	/* update the sw-cache */
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts;
+	((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash +=
+	((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash;
+
+	IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+	IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+	  ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+	/* write to the sw cache */
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		UL_HW_CACHE,
+		((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0])) {
+		IPAERR("Failed to set stats to sw-cache %d\n",
+			fnr_info.sw_counter_offset + UL_HW_CACHE);
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		DL_HW_CACHE,
+		((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1])) {
+		IPAERR("Failed to set stats to sw-cache %d\n",
+			fnr_info.sw_counter_offset + DL_HW_CACHE);
+		rc = -EINVAL;
+		goto free_stats2;
+	}
+
+free_stats2:
+	kfree((void *)fnr_stats_sw.stats);
+free_stats:
+	kfree((void *)fnr_stats.stats);
+	return rc;
+}
 
 int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
 	bool reset)
@@ -3691,17 +3960,31 @@ int rmnet_ipa3_query_tethering_stats_all(
 			data->upstreamIface);
 	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
 		IPAWANDBG_LOW(" query wifi-backhaul stats\n");
-		rc = rmnet_ipa3_query_tethering_stats_wifi(
-			&tether_stats, data->reset_stats);
-		if (rc) {
-			IPAWANERR_RL(
-				"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
-			return rc;
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 ||
+			ipa3_ctx->ipa_hw_type == IPA_HW_v4_7 ||
+			!ipa3_ctx->hw_stats.enabled) {
+			IPAWANDBG("hw version %d,hw_stats.enabled %d\n",
+				ipa3_ctx->ipa_hw_type,
+				ipa3_ctx->hw_stats.enabled);
+			rc = rmnet_ipa3_query_tethering_stats_wifi(
+				&tether_stats, data->reset_stats);
+			if (rc) {
+				IPAWANERR_RL(
+					"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+				return rc;
+			}
+			data->tx_bytes = tether_stats.ipv4_tx_bytes
+				+ tether_stats.ipv6_tx_bytes;
+			data->rx_bytes = tether_stats.ipv4_rx_bytes
+				+ tether_stats.ipv6_rx_bytes;
+		} else {
+			rc = rmnet_ipa3_query_tethering_stats_fnr(data);
+			if (rc) {
+				IPAWANERR_RL(
+					"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+				return rc;
+			}
 		}
-		data->tx_bytes = tether_stats.ipv4_tx_bytes
-			+ tether_stats.ipv6_tx_bytes;
-		data->rx_bytes = tether_stats.ipv4_rx_bytes
-			+ tether_stats.ipv6_rx_bytes;
 	} else {
 		IPAWANDBG_LOW(" query modem-backhaul stats\n");
 		tether_stats.ipa_client = data->ipa_client;
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index 6bb8f96..c3d8c97 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -178,10 +178,6 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
 		TETH_ERR("fail to register with PM %d\n", res);
 		return res;
 	}
-	/* vote for turbo in case of MHIP channels*/
-	if (ipa3_is_apq())
-		res = ipa_pm_set_throughput(ipa3_teth_ctx->modem_pm_hdl,
-			5200);
 	res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
 
 	TETH_DBG_FUNC_EXIT();
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
index c1e450d..c4d53df 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -728,6 +728,192 @@ static int ipa_test_hw_stats_set_sw_stats(void *priv)
 	return ret;
 }
 
+static int ipa_test_hw_stats_set_uc_event_ring(void *priv)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter = NULL;
+	int ret = 0;
+
+	/* set uc event ring */
+	IPA_UT_INFO("========set uc event ring ========\n");
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		if (ipa3_ctx->uc_ctx.uc_loaded &&
+			!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+			if (ipa3_uc_setup_event_ring()) {
+				IPA_UT_ERR("failed to set uc_event ring\n");
+				ret = -EFAULT;
+			}
+		} else
+			IPA_UT_ERR("uc-loaded %d, ring-valid %d",
+				ipa3_ctx->uc_ctx.uc_loaded,
+				ipa3_ctx->uc_ctx.uc_event_ring_valid);
+	}
+	IPA_UT_INFO("================ done ============\n");
+
+	/* allocate counters */
+	IPA_UT_INFO("========set hw counter ========\n");
+
+	counter = kzalloc(sizeof(struct ipa_ioc_flt_rt_counter_alloc),
+					  GFP_KERNEL);
+	if (!counter)
+		return -ENOMEM;
+
+	counter->hw_counter.num_counters = 4;
+	counter->sw_counter.num_counters = 4;
+
+	/* allocate counters */
+	ret = ipa3_alloc_counter_id(counter);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_alloc_counter_id fails\n");
+		ret = -ENOMEM;
+	}
+	ipa3_ctx->fnr_info.hw_counter_offset = counter->hw_counter.start_id;
+	ipa3_ctx->fnr_info.sw_counter_offset = counter->sw_counter.start_id;
+	IPA_UT_INFO("hw-offset %d, sw-offset %d\n",
+		ipa3_ctx->fnr_info.hw_counter_offset,
+			ipa3_ctx->fnr_info.sw_counter_offset);
+
+	kfree(counter);
+	return ret;
+}
+
+static int ipa_test_hw_stats_set_quota(void *priv)
+{
+	int ret;
+	uint64_t quota = 500;
+
+	IPA_UT_INFO("========set quota ========\n");
+	ret = ipa3_uc_quota_monitor(quota);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_uc_quota_monitor fails\n");
+		ret = -ENOMEM;
+	}
+	IPA_UT_INFO("================ done ============\n");
+	return ret;
+}
+
+static int ipa_test_hw_stats_set_bw(void *priv)
+{
+	int ret;
+	struct ipa_wdi_bw_info *info = NULL;
+
+	IPA_UT_INFO("========set BW voting ========\n");
+	info = kzalloc(sizeof(struct ipa_wdi_bw_info),
+					  GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->num = 3;
+	info->threshold[0] = 200;
+	info->threshold[1] = 400;
+	info->threshold[2] = 600;
+
+	ret = ipa3_uc_bw_monitor(info);
+	if (ret < 0) {
+		IPA_UT_ERR("ipa3_uc_bw_monitor fails\n");
+		ret = -ENOMEM;
+	}
+
+	IPA_UT_INFO("================ done ============\n");
+
+	kfree(info);
+	return ret;
+}
+static int ipa_test_hw_stats_hit_quota(void *priv)
+{
+	int ret = 0, counter_index, i;
+	struct ipa_flt_rt_stats stats;
+
+	/* set sw counters */
+	IPA_UT_INFO("========set quota hit========\n");
+	counter_index = ipa3_ctx->fnr_info.sw_counter_offset + UL_WLAN_TX;
+
+	for (i = 0; i < 5; i++) {
+		IPA_UT_INFO("========set 100 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 100;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 200 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 200;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 300 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 300;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 500 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 500;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 600 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 600;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+		msleep(1000);
+		IPA_UT_INFO("========set 1000 ========\n");
+		memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+		stats.num_bytes = 1000;
+		stats.num_pkts = 69;
+		IPA_UT_INFO(
+			"set counter %u pkt_cnt %u bytes cnt %llu\n",
+			counter_index, stats.num_pkts, stats.num_bytes);
+		ret = ipa_set_flt_rt_stats(counter_index, stats);
+		if (ret < 0) {
+			IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+			ret = -ENOMEM;
+		}
+	}
+	IPA_UT_INFO("================ done ============\n");
+	return ret;
+}
+
+
+
 /* Suite definition block */
 IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
 	ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown)
@@ -758,4 +944,20 @@ IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
 		ipa_test_hw_stats_set_sw_stats, false,
 		IPA_HW_v4_5, IPA_HW_MAX),
 
+	IPA_UT_ADD_TEST(set_uc_evtring, "Set uc event ring",
+		ipa_test_hw_stats_set_uc_event_ring, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(set_quota, "Set quota",
+		ipa_test_hw_stats_set_quota, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(set_bw_voting, "Set bw_voting",
+		ipa_test_hw_stats_set_bw, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(hit_quota, "quota hits",
+		ipa_test_hw_stats_hit_quota, false,
+		IPA_HW_v4_5, IPA_HW_MAX),
+
 } IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index 4bd4434..0dfb8184 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -403,10 +403,10 @@ static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
 	meta_type = (long)(file->private_data);
 	IPA_UT_DBG("Meta test type %ld\n", meta_type);
 
-	buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+	buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE + 1, GFP_KERNEL);
 	if (!buf) {
 		IPA_UT_ERR("failed to allocate %d bytes\n",
-			IPA_UT_DEBUG_READ_BUF_SIZE);
+			IPA_UT_DEBUG_READ_BUF_SIZE + 1);
 		cnt = 0;
 		goto unlock_mutex;
 	}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 528b461..b485eb9 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -126,6 +126,7 @@ struct msm11ad_ctx {
 	struct cpumask boost_cpu_1;
 
 	bool keep_radio_on_during_sleep;
+	bool use_ap_ps;
 	int features;
 };
 
@@ -1054,6 +1055,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
 	 *	qcom,msm-bus,vectors-KBps =
 	 *		<100 512 0 0>,
 	 *		<100 512 600000 800000>;
+	 *	qcom,use-ap-power-save; (ctx->use_ap_ps)
 	 *};
 	 * rc_node stands for "qcom,pcie", selected entries:
 	 * cell-index = <1>; (ctx->rc_index)
@@ -1094,6 +1096,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
 		rc = -EINVAL;
 		goto out_module;
 	}
+	ctx->use_ap_ps = of_property_read_bool(of_node,
+					       "qcom,use-ap-power-save");
 
 	/*== execute ==*/
 	/* turn device on */
@@ -1558,6 +1562,12 @@ static int ops_get_capa(void *handle)
 	if (!ctx->smmu_s1_bypass)
 		capa |= BIT(WIL_PLATFORM_CAPA_SMMU);
 
+	pr_debug("%s: use AP power save is %s\n", __func__, ctx->use_ap_ps ?
+		 "allowed" : "not allowed");
+
+	if (ctx->use_ap_ps)
+		capa |= BIT(WIL_PLATFORM_CAPA_AP_PS);
+
 	return capa;
 }
 
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 1fbd8fc..1a31ef1 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -90,9 +90,7 @@ static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
 static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
 		struct msm_ext_disp_init_data *data)
 {
-	int count = 0;
 	struct msm_ext_disp_list *node;
-	struct list_head *pos = NULL;
 
 	if (!ext_disp || !data) {
 		pr_err("Invalid params\n");
@@ -105,14 +103,8 @@ static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
 
 	node->data = data;
 
-	list_for_each(pos, &ext_disp->display_list)
-		count++;
-
-	data->codec.stream_id = count;
-
 	list_add(&node->list, &ext_disp->display_list);
 
-
 	pr_debug("Added new display (%s) ctld (%d) stream (%d)\n",
 		msm_ext_disp_name(data->codec.type),
 		data->codec.ctrl_id, data->codec.stream_id);
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 9a4ee33..b253405 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -59,6 +59,10 @@ static const char *const pmic_names[] = {
 	[PM8150L_SUBTYPE] = "PM8150L",
 	[PM6150_SUBTYPE] = "PM6150",
 	[PM7250B_SUBTYPE] = "PM7250B",
+	[PM6125_SUBTYPE] = "PM6125",
+	[PM8008_SUBTYPE] = "PM8008",
+	[SMB1355_SUBTYPE] = "SMB1355",
+	[SMB1390_SUBTYPE] = "SMB1390",
 };
 
 struct revid_chip {
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 088d1c2..36bd254 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -685,10 +685,14 @@ static int __init pmc_core_probe(void)
 	if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
 		pmcdev->map = &cnp_reg_map;
 
-	if (lpit_read_residency_count_address(&slp_s0_addr))
+	if (lpit_read_residency_count_address(&slp_s0_addr)) {
 		pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
-	else
+
+		if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
+			return -ENODEV;
+	} else {
 		pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
+	}
 
 	pmcdev->regbase = ioremap(pmcdev->base_addr,
 				  pmcdev->map->regmap_length);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index b1d8043..6a61028 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -423,6 +423,14 @@ static const struct dmi_system_id critclk_systems[] = {
 	},
 	{
 		/* pmc_plt_clk* - are used for ethernet controllers */
+		.ident = "Beckhoff CB4063",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+			DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
+		},
+	},
+	{
+		/* pmc_plt_clk* - are used for ethernet controllers */
 		.ident = "Beckhoff CB6263",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 0b46a81..fe0d9a7 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -45,7 +45,7 @@ static const char * const power_supply_type_text[] = {
 	"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
 	"USB_PD", "USB_PD_DRP", "BrickID",
 	"USB_HVDCP", "USB_HVDCP_3", "Wireless", "USB_FLOAT",
-	"BMS", "Parallel", "Main", "Wipower", "USB_C_UFP", "USB_C_DFP",
+	"BMS", "Parallel", "Main", "USB_C_UFP", "USB_C_DFP",
 	"Charge_Pump",
 };
 
@@ -152,7 +152,8 @@ static ssize_t power_supply_show_property(struct device *dev,
 				dev_dbg(dev, "driver has no data for `%s' property\n",
 					attr->attr.name);
 			else if (ret != -ENODEV && ret != -EAGAIN)
-				dev_err(dev, "driver failed to report `%s' property: %zd\n",
+				dev_err_ratelimited(dev,
+					"driver failed to report `%s' property: %zd\n",
 					attr->attr.name, ret);
 			return ret;
 		}
@@ -463,6 +464,8 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(comp_clamp_level),
 	POWER_SUPPLY_ATTR(adapter_cc_mode),
 	POWER_SUPPLY_ATTR(skin_health),
+	POWER_SUPPLY_ATTR(aicl_done),
+	POWER_SUPPLY_ATTR(voltage_step),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 47f60f4..6f58167 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -63,10 +63,10 @@ struct pl_data {
 	struct votable		*cp_ilim_votable;
 	struct votable		*cp_disable_votable;
 	struct votable		*fcc_main_votable;
+	struct votable		*cp_slave_disable_votable;
 	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct work_struct	pl_taper_work;
-	struct delayed_work	pl_awake_work;
 	struct delayed_work	fcc_stepper_work;
 	bool			taper_work_running;
 	struct power_supply	*main_psy;
@@ -75,6 +75,7 @@ struct pl_data {
 	struct power_supply	*usb_psy;
 	struct power_supply	*dc_psy;
 	struct power_supply	*cp_master_psy;
+	struct power_supply	*cp_slave_psy;
 	int			charge_type;
 	int			total_settled_ua;
 	int			pl_settled_ua;
@@ -87,18 +88,20 @@ struct pl_data {
 	int			parallel_step_fcc_count;
 	int			parallel_step_fcc_residual;
 	int			step_fcc;
+	int			override_main_fcc_ua;
+	int			total_fcc_ua;
 	u32			wa_flags;
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
+	struct charger_param	*chg_param;
 	bool			pl_disable;
 	bool			cp_disabled;
 	int			taper_entry_fv;
 	int			main_fcc_max;
-	int			fcc_step_size_ua;
-	int			fcc_step_delay_ms;
 	/* debugfs directory */
 	struct dentry		*dfs_root;
+	u32			float_voltage_uv;
 };
 
 struct pl_data *the_chip;
@@ -131,9 +134,23 @@ enum {
 	RESTRICT_CHG_CURRENT,
 	FCC_STEPPING_IN_PROGRESS,
 };
+
+enum {
+	PARALLEL_INPUT_MODE,
+	PARALLEL_OUTPUT_MODE,
+};
 /*********
  * HELPER*
  *********/
+static bool is_usb_available(struct pl_data *chip)
+{
+	if (!chip->usb_psy)
+		chip->usb_psy =
+			power_supply_get_by_name("usb");
+
+	return !!chip->usb_psy;
+}
+
 static bool is_cp_available(struct pl_data *chip)
 {
 	if (!chip->cp_master_psy)
@@ -143,24 +160,114 @@ static bool is_cp_available(struct pl_data *chip)
 	return !!chip->cp_master_psy;
 }
 
-static bool cp_ilim_boost_enabled(struct pl_data *chip)
+static int cp_get_parallel_mode(struct pl_data *chip, int mode)
 {
-	union power_supply_propval pval = {-1, };
+	union power_supply_propval pval = {-EINVAL, };
+	int rc = -EINVAL;
 
-	if (is_cp_available(chip))
-		power_supply_get_property(chip->cp_master_psy,
+	if (!is_cp_available(chip))
+		return -EINVAL;
+
+	switch (mode) {
+	case PARALLEL_INPUT_MODE:
+		rc = power_supply_get_property(chip->cp_master_psy,
+				POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+		break;
+	case PARALLEL_OUTPUT_MODE:
+		rc = power_supply_get_property(chip->cp_master_psy,
 				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
+		break;
+	default:
+		pr_err("Invalid mode request %d\n", mode);
+		break;
+	}
 
-	return pval.intval == POWER_SUPPLY_PL_OUTPUT_VPH;
+	if (rc < 0)
+		pr_err("Failed to read CP topology for mode=%d rc=%d\n",
+				mode, rc);
+
+	return pval.intval;
 }
 
+/*
+ * Adapter CC Mode: ILIM over-ridden explicitly, below takes no effect.
+ *
+ * Adapter CV mode: Configuration of ILIM for different topology is as below:
+ * MID-VPH:
+ *	SMB1390 ILIM: independent of FCC and based on the AICL result or
+ *			PD advertised current,  handled directly in SMB1390
+ *			driver.
+ * MID-VBAT:
+ *	 SMB1390 ILIM: based on minimum of FCC portion of SMB1390 or ICL.
+ * USBIN-VBAT:
+ *	SMB1390 ILIM: based on FCC portion of SMB1390 and independent of ICL.
+ */
 static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 {
+	int rc, fcc, main_icl, target_icl = chip->chg_param->hvdcp3_max_icl_ua;
+	union power_supply_propval pval = {0, };
+
+	if (!is_usb_available(chip))
+		return;
+
+	if (!is_cp_available(chip))
+		return;
+
+	if (cp_get_parallel_mode(chip, PARALLEL_OUTPUT_MODE)
+					== POWER_SUPPLY_PL_OUTPUT_VPH)
+		return;
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_REAL_TYPE, &pval);
+	if (rc < 0)
+		return;
+
+	/*
+	 * For HVDCP3 adapters limit max. ILIM based on DT configuration
+	 * of HVDCP3 ICL value.
+	 * Input VBUS:
+	 * target_icl = HVDCP3_ICL - main_ICL
+	 * Input VMID
+	 * target_icl = HVDCP3_ICL
+	 */
+	if (pval.intval == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+		if (((cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE))
+					== POWER_SUPPLY_PL_USBIN_USBIN)) {
+			main_icl = get_effective_result_locked(
+							chip->usb_icl_votable);
+			if ((main_icl >= 0) && (main_icl < target_icl))
+				target_icl -= main_icl;
+		}
+
+		ilim = min(target_icl, ilim);
+	}
+
+	rc = power_supply_get_property(chip->cp_master_psy,
+				POWER_SUPPLY_PROP_MIN_ICL, &pval);
+	if (rc < 0)
+		return;
+
 	if (!chip->cp_ilim_votable)
 		chip->cp_ilim_votable = find_votable("CP_ILIM");
 
-	if (!cp_ilim_boost_enabled(chip) && chip->cp_ilim_votable)
-		vote(chip->cp_ilim_votable, voter, true, ilim);
+	if (chip->cp_ilim_votable) {
+		fcc = get_effective_result_locked(chip->fcc_votable);
+		/*
+		 * If FCC >= (2 * MIN_ICL) then it is safe to enable CP
+		 * with MIN_ICL.
+		 * Configure ILIM as follows:
+		 * if request_ilim < MIN_ICL cofigure ILIM to MIN_ICL.
+		 * otherwise configure ILIM to requested_ilim.
+		 */
+		if ((fcc >= (pval.intval * 2)) && (ilim < pval.intval))
+			vote(chip->cp_ilim_votable, voter, true, pval.intval);
+		else
+			vote(chip->cp_ilim_votable, voter, true, ilim);
+
+		pl_dbg(chip, PR_PARALLEL,
+			"ILIM: vote: %d voter:%s min_ilim=%d fcc = %d\n",
+			ilim, voter, pval.intval, fcc);
+	}
 }
 
 /*******
@@ -456,8 +563,6 @@ ATTRIBUTE_GROUPS(batt_class);
  *  FCC  *
  **********/
 #define EFFICIENCY_PCT	80
-#define DEFAULT_FCC_STEP_SIZE_UA 100000
-#define DEFAULT_FCC_STEP_UPDATE_DELAY_MS 1000
 #define STEP_UP 1
 #define STEP_DOWN -1
 static void get_fcc_split(struct pl_data *chip, int total_ua,
@@ -558,27 +663,53 @@ static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
 static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 			int parallel_fcc_ua)
 {
-	if (!chip->fcc_step_size_ua) {
+	int main_set_fcc_ua, total_fcc_ua;
+
+	if (!chip->chg_param->fcc_step_size_ua) {
 		pr_err("Invalid fcc stepper step size, value 0\n");
 		return;
 	}
 
+	if (is_override_vote_enabled_locked(chip->fcc_main_votable)) {
+		/*
+		 * FCC stepper params need re-calculation in override mode
+		 * only if there is change in Main or total FCC
+		 */
+
+		main_set_fcc_ua = get_effective_result_locked(
+							chip->fcc_main_votable);
+		total_fcc_ua = main_fcc_ua + parallel_fcc_ua;
+
+		if ((main_set_fcc_ua != chip->override_main_fcc_ua)
+				|| (total_fcc_ua != chip->total_fcc_ua)) {
+			chip->override_main_fcc_ua = main_set_fcc_ua;
+			chip->total_fcc_ua = total_fcc_ua;
+			parallel_fcc_ua = (total_fcc_ua
+						- chip->override_main_fcc_ua);
+		} else {
+			goto skip_fcc_step_update;
+		}
+	}
+
 	/* Read current FCC of main charger */
 	chip->main_fcc_ua = get_effective_result(chip->fcc_main_votable);
 	chip->main_step_fcc_dir = (main_fcc_ua > chip->main_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
 	chip->main_step_fcc_count = abs((main_fcc_ua - chip->main_fcc_ua) /
-				chip->fcc_step_size_ua);
+				chip->chg_param->fcc_step_size_ua);
 	chip->main_step_fcc_residual = abs((main_fcc_ua - chip->main_fcc_ua) %
-				chip->fcc_step_size_ua);
+				chip->chg_param->fcc_step_size_ua);
 
 	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
-	chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
-				chip->slave_fcc_ua) / chip->fcc_step_size_ua);
-	chip->parallel_step_fcc_residual = abs((parallel_fcc_ua -
-				chip->slave_fcc_ua) % chip->fcc_step_size_ua);
+	chip->parallel_step_fcc_count
+				= abs((parallel_fcc_ua - chip->slave_fcc_ua) /
+					chip->chg_param->fcc_step_size_ua);
+	chip->parallel_step_fcc_residual
+				= abs((parallel_fcc_ua - chip->slave_fcc_ua) %
+					chip->chg_param->fcc_step_size_ua);
 
+skip_fcc_step_update:
 	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
 		|| chip->main_step_fcc_count || chip->main_step_fcc_residual)
 		chip->step_fcc = 1;
@@ -714,7 +845,9 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 {
 	struct pl_data *chip = data;
 	int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+	int main_fcc_ua = 0, cp_fcc_ua = 0, fcc_thr_ua = 0, rc;
 	union power_supply_propval pval = {0, };
+	bool is_cc_mode = false;
 
 	if (total_fcc_ua < 0)
 		return 0;
@@ -725,21 +858,65 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 	if (!chip->cp_disable_votable)
 		chip->cp_disable_votable = find_votable("CP_DISABLE");
 
-	if (chip->cp_disable_votable) {
-		if (cp_ilim_boost_enabled(chip)) {
-			power_supply_get_property(chip->cp_master_psy,
+	if (!chip->cp_master_psy)
+		chip->cp_master_psy =
+			power_supply_get_by_name("charge_pump_master");
+
+	if (!chip->cp_slave_psy)
+		chip->cp_slave_psy = power_supply_get_by_name("cp_slave");
+
+	if (!chip->cp_slave_disable_votable)
+		chip->cp_slave_disable_votable =
+			find_votable("CP_SLAVE_DISABLE");
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (chip->usb_psy) {
+		rc = power_supply_get_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
+					&pval);
+		if (rc < 0)
+			pr_err("Couldn't get PPS CC mode status rc=%d\n", rc);
+		else
+			is_cc_mode = pval.intval;
+	}
+
+	if (chip->cp_master_psy) {
+		rc = power_supply_get_property(chip->cp_master_psy,
 					POWER_SUPPLY_PROP_MIN_ICL, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get MIN ICL threshold rc=%d\n", rc);
+		else
+			fcc_thr_ua = is_cc_mode ? (3 * pval.intval) :
+							(4 * pval.intval);
+	}
+
+	if (chip->fcc_main_votable)
+		main_fcc_ua =
+			get_effective_result_locked(chip->fcc_main_votable);
+
+	if (main_fcc_ua < 0)
+		main_fcc_ua = 0;
+
+	cp_fcc_ua = total_fcc_ua - main_fcc_ua;
+	if (cp_fcc_ua > 0) {
+		if (chip->cp_slave_psy && chip->cp_slave_disable_votable) {
 			/*
-			 * With ILIM boost feature ILIM configuration is
-			 * independent of battery FCC, disable CP if FCC/2
-			 * falls below MIN_ICL supported by CP.
+			 * Disable Slave CP if FCC share
+			 * falls below threshold.
 			 */
-			if ((total_fcc_ua / 2) < pval.intval)
-				vote(chip->cp_disable_votable, FCC_VOTER,
-						true, 0);
-			else
-				vote(chip->cp_disable_votable, FCC_VOTER,
-						false, 0);
+			vote(chip->cp_slave_disable_votable, FCC_VOTER,
+				(cp_fcc_ua < fcc_thr_ua), 0);
+		}
+
+		if (chip->cp_disable_votable) {
+			/*
+			 * Disable Master CP if FCC share
+			 * falls below 2 * min ICL threshold.
+			 */
+			vote(chip->cp_disable_votable, FCC_VOTER,
+			     (cp_fcc_ua < (2 * pval.intval)), 0);
 		}
 	}
 
@@ -758,10 +935,6 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 
 	rerun_election(chip->pl_disable_votable);
 	/* When FCC changes, trigger psy changed event for CC mode */
-	if (!chip->cp_master_psy)
-		chip->cp_master_psy =
-			power_supply_get_by_name("charge_pump_master");
-
 	if (chip->cp_master_psy)
 		power_supply_changed(chip->cp_master_psy);
 
@@ -825,19 +998,20 @@ static void fcc_stepper_work(struct work_struct *work)
 	}
 
 	if (chip->main_step_fcc_count) {
-		main_fcc += (chip->fcc_step_size_ua * chip->main_step_fcc_dir);
+		main_fcc += (chip->chg_param->fcc_step_size_ua
+					* chip->main_step_fcc_dir);
 		chip->main_step_fcc_count--;
-		reschedule_ms = chip->fcc_step_delay_ms;
+		reschedule_ms = chip->chg_param->fcc_step_delay_ms;
 	} else if (chip->main_step_fcc_residual) {
 		main_fcc += chip->main_step_fcc_residual;
 		chip->main_step_fcc_residual = 0;
 	}
 
 	if (chip->parallel_step_fcc_count) {
-		parallel_fcc += (chip->fcc_step_size_ua *
-			chip->parallel_step_fcc_dir);
+		parallel_fcc += (chip->chg_param->fcc_step_size_ua
+					* chip->parallel_step_fcc_dir);
 		chip->parallel_step_fcc_count--;
-		reschedule_ms = chip->fcc_step_delay_ms;
+		reschedule_ms = chip->chg_param->fcc_step_delay_ms;
 	} else if (chip->parallel_step_fcc_residual) {
 		parallel_fcc += chip->parallel_step_fcc_residual;
 		chip->parallel_step_fcc_residual = 0;
@@ -926,8 +1100,7 @@ static void fcc_stepper_work(struct work_struct *work)
 stepper_exit:
 	chip->main_fcc_ua = main_fcc;
 	chip->slave_fcc_ua = parallel_fcc;
-
-	cp_configure_ilim(chip, FCC_VOTER, chip->main_fcc_ua / 2);
+	cp_configure_ilim(chip, FCC_VOTER, chip->slave_fcc_ua / 2);
 
 	if (reschedule_ms) {
 		schedule_delayed_work(&chip->fcc_stepper_work,
@@ -940,6 +1113,17 @@ static void fcc_stepper_work(struct work_struct *work)
 	vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
 }
 
+static bool is_batt_available(struct pl_data *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
 #define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
 static int pl_fv_vote_callback(struct votable *votable, void *data,
 			int fv_uv, const char *client)
@@ -973,6 +1157,31 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
 		}
 	}
 
+	/*
+	 * check for termination at reduced float voltage and re-trigger
+	 * charging if new float voltage is above last FV.
+	 */
+	if ((chip->float_voltage_uv < fv_uv) && is_batt_available(chip)) {
+		rc = power_supply_get_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_STATUS, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get battery status rc=%d\n", rc);
+		} else {
+			if (pval.intval == POWER_SUPPLY_STATUS_FULL) {
+				pr_debug("re-triggering charging\n");
+				pval.intval = 1;
+				rc = power_supply_set_property(chip->batt_psy,
+					POWER_SUPPLY_PROP_FORCE_RECHARGE,
+					&pval);
+				if (rc < 0)
+					pr_err("Couldn't set force recharge rc=%d\n",
+							rc);
+			}
+		}
+	}
+
+	chip->float_voltage_uv = fv_uv;
+
 	return 0;
 }
 
@@ -984,7 +1193,7 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
 	int rc;
 	struct pl_data *chip = data;
 	union power_supply_propval pval = {0, };
-	bool rerun_aicl = false;
+	bool rerun_aicl = false, dc_present = false;
 
 	if (!chip->main_psy)
 		return 0;
@@ -1044,7 +1253,23 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
-	cp_configure_ilim(chip, ICL_CHANGE_VOTER, icl_ua);
+	/* Configure ILIM based on AICL result only if input mode is USBMID */
+	if (cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE)
+					== POWER_SUPPLY_PL_USBMID_USBMID) {
+		if (chip->dc_psy) {
+			rc = power_supply_get_property(chip->dc_psy,
+					POWER_SUPPLY_PROP_PRESENT, &pval);
+			if (rc < 0) {
+				pr_err("Couldn't get DC PRESENT rc=%d\n", rc);
+				return rc;
+			}
+			dc_present = pval.intval;
+		}
+
+		/* Don't configure ILIM if DC is present */
+		if (!dc_present)
+			cp_configure_ilim(chip, ICL_CHANGE_VOTER, icl_ua);
+	}
 
 	return 0;
 }
@@ -1062,32 +1287,13 @@ static void pl_disable_forever_work(struct work_struct *work)
 		vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
 }
 
-static void pl_awake_work(struct work_struct *work)
-{
-	struct pl_data *chip = container_of(work,
-			struct pl_data, pl_awake_work.work);
-
-	vote(chip->pl_awake_votable, PL_VOTER, false, 0);
-}
-
-static bool is_batt_available(struct pl_data *chip)
-{
-	if (!chip->batt_psy)
-		chip->batt_psy = power_supply_get_by_name("battery");
-
-	if (!chip->batt_psy)
-		return false;
-
-	return true;
-}
-
 static int pl_disable_vote_callback(struct votable *votable,
 		void *data, int pl_disable, const char *client)
 {
 	struct pl_data *chip = data;
 	union power_supply_propval pval = {0, };
 	int master_fcc_ua = 0, total_fcc_ua = 0, slave_fcc_ua = 0;
-	int rc = 0;
+	int rc = 0, cp_ilim;
 	bool disable = false;
 
 	if (!is_main_available(chip))
@@ -1130,10 +1336,6 @@ static int pl_disable_vote_callback(struct votable *votable,
 	total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
 
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
-		/* keep system awake to talk to slave charger through i2c */
-		cancel_delayed_work_sync(&chip->pl_awake_work);
-		vote(chip->pl_awake_votable, PL_VOTER, true, 0);
-
 		rc = validate_parallel_icl(chip, &disable);
 		if (rc < 0)
 			return rc;
@@ -1271,7 +1473,10 @@ static int pl_disable_vote_callback(struct votable *votable,
 			/* main psy gets all share */
 			vote(chip->fcc_main_votable, MAIN_FCC_VOTER, true,
 								total_fcc_ua);
-			cp_configure_ilim(chip, FCC_VOTER, total_fcc_ua / 2);
+			cp_ilim = total_fcc_ua - get_effective_result_locked(
+							chip->fcc_main_votable);
+			if (cp_ilim > 0)
+				cp_configure_ilim(chip, FCC_VOTER, cp_ilim / 2);
 
 			/* reset parallel FCC */
 			chip->slave_fcc_ua = 0;
@@ -1282,16 +1487,19 @@ static int pl_disable_vote_callback(struct votable *votable,
 			if (chip->step_fcc) {
 				vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
 					true, 0);
+				/*
+				 * Configure ILIM above min ILIM of CP to
+				 * ensure CP is not disabled due to ILIM vote.
+				 * Later FCC stepper will take to ILIM to
+				 * target value.
+				 */
+				cp_configure_ilim(chip, FCC_VOTER, 0);
 				schedule_delayed_work(&chip->fcc_stepper_work,
 					0);
 			}
 		}
 
 		rerun_election(chip->fv_votable);
-
-		cancel_delayed_work_sync(&chip->pl_awake_work);
-		schedule_delayed_work(&chip->pl_awake_work,
-						msecs_to_jiffies(5000));
 	}
 
 	/* notify parallel state change */
@@ -1650,22 +1858,11 @@ static int pl_determine_initial_status(struct pl_data *chip)
 
 static void pl_config_init(struct pl_data *chip, int smb_version)
 {
-	chip->fcc_step_size_ua = DEFAULT_FCC_STEP_SIZE_UA;
-	chip->fcc_step_delay_ms = DEFAULT_FCC_STEP_UPDATE_DELAY_MS;
-
 	switch (smb_version) {
-	case PM8150B_SUBTYPE:
-		chip->fcc_step_delay_ms = 100;
-		break;
 	case PMI8998_SUBTYPE:
 	case PM660_SUBTYPE:
 		chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
 		break;
-	case PMI632_SUBTYPE:
-		break;
-	case PM7250B_SUBTYPE:
-		chip->fcc_step_delay_ms = 100;
-		break;
 	default:
 		break;
 	}
@@ -1690,11 +1887,16 @@ static void qcom_batt_create_debugfs(struct pl_data *chip)
 }
 
 #define DEFAULT_RESTRICTED_CURRENT_UA	1000000
-int qcom_batt_init(int smb_version)
+int qcom_batt_init(struct charger_param *chg_param)
 {
 	struct pl_data *chip;
 	int rc = 0;
 
+	if (!chg_param) {
+		pr_err("invalid charger parameter\n");
+		return -EINVAL;
+	}
+
 	/* initialize just once */
 	if (the_chip) {
 		pr_err("was initialized earlier. Failing now\n");
@@ -1708,7 +1910,8 @@ int qcom_batt_init(int smb_version)
 	qcom_batt_create_debugfs(chip);
 
 	chip->slave_pct = 50;
-	pl_config_init(chip, smb_version);
+	chip->chg_param = chg_param;
+	pl_config_init(chip, chg_param->smb_version);
 	chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
 
 	chip->pl_ws = wakeup_source_register("qcom-battery");
@@ -1787,7 +1990,6 @@ int qcom_batt_init(int smb_version)
 	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
 	INIT_WORK(&chip->pl_taper_work, pl_taper_work);
 	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
-	INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
 	INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work);
 
 	rc = pl_register_notifier(chip);
@@ -1845,7 +2047,6 @@ void qcom_batt_deinit(void)
 	cancel_delayed_work_sync(&chip->status_change_work);
 	cancel_work_sync(&chip->pl_taper_work);
 	cancel_work_sync(&chip->pl_disable_forever_work);
-	cancel_delayed_work_sync(&chip->pl_awake_work);
 	cancel_delayed_work_sync(&chip->fcc_stepper_work);
 
 	power_supply_unreg_notifier(&chip->nb);
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
index 53a54e86..fbc6b25 100644
--- a/drivers/power/supply/qcom/battery.h
+++ b/drivers/power/supply/qcom/battery.h
@@ -1,10 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __BATTERY_H
 #define __BATTERY_H
-int qcom_batt_init(int smb_version);
+
+struct charger_param {
+	u32 fcc_step_delay_ms;
+	u32 fcc_step_size_ua;
+	u32 smb_version;
+	u32 hvdcp3_max_icl_ua;
+};
+
+int qcom_batt_init(struct charger_param *param);
 void qcom_batt_deinit(void);
 #endif /* __BATTERY_H */
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 26b9b50..58a4eb9 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -15,6 +15,7 @@
 
 #define FULL_SOC_RAW		255
 #define CAPACITY_DELTA_DECIPCT	500
+#define CENTI_FULL_SOC		10000
 
 #define CENTI_ICORRECT_C0	105
 #define CENTI_ICORRECT_C1	20
@@ -366,7 +367,7 @@ static void cap_learning_post_process(struct cap_learning *cl)
  * cap_wt_learning_process_full_data -
  * @cl: Capacity learning object
  * @delta_batt_soc_pct: percentage change in battery State of Charge
- * @batt_soc_msb: MSB of battery State of Charge
+ * @batt_soc_cp: Battery State of Charge in centi-percentage
  *
  * Calculates the final learnt capacity when
  * weighted capacity learning is enabled.
@@ -374,11 +375,11 @@ static void cap_learning_post_process(struct cap_learning *cl)
  */
 static int cap_wt_learning_process_full_data(struct cap_learning *cl,
 					int delta_batt_soc_pct,
-					int batt_soc_msb)
+					int batt_soc_cp)
 {
 	int64_t del_cap_uah, total_cap_uah,
 		res_cap_uah, wt_learnt_cap_uah;
-	int delta_batt_soc_msb, res_batt_soc_msb;
+	int delta_batt_soc_cp, res_batt_soc_cp;
 
 	/* If the delta is < 10%, then skip processing full data */
 	if (delta_batt_soc_pct < cl->dt.min_delta_batt_soc) {
@@ -386,11 +387,11 @@ static int cap_wt_learning_process_full_data(struct cap_learning *cl,
 		return -ERANGE;
 	}
 
-	delta_batt_soc_msb = batt_soc_msb - cl->init_batt_soc_msb;
-	res_batt_soc_msb = FULL_SOC_RAW - batt_soc_msb;
-	/* Learnt Capacity from end Battery SOC MSB to FULL_SOC_RAW */
+	delta_batt_soc_cp = batt_soc_cp - cl->init_batt_soc_cp;
+	res_batt_soc_cp = CENTI_FULL_SOC - batt_soc_cp;
+	/* Learnt Capacity from end Battery SOC to CENTI_FULL_SOC */
 	res_cap_uah = div64_s64(cl->learned_cap_uah *
-				res_batt_soc_msb, FULL_SOC_RAW);
+				res_batt_soc_cp, CENTI_FULL_SOC);
 	total_cap_uah = cl->init_cap_uah + cl->delta_cap_uah + res_cap_uah;
 	/*
 	 * difference in capacity learnt in this
@@ -398,8 +399,8 @@ static int cap_wt_learning_process_full_data(struct cap_learning *cl,
 	 */
 	del_cap_uah = total_cap_uah - cl->learned_cap_uah;
 	/* Applying weight based on change in battery SOC MSB */
-	wt_learnt_cap_uah = div64_s64(del_cap_uah * delta_batt_soc_msb,
-					FULL_SOC_RAW);
+	wt_learnt_cap_uah = div64_s64(del_cap_uah * delta_batt_soc_cp,
+					CENTI_FULL_SOC);
 	cl->final_cap_uah = cl->learned_cap_uah + wt_learnt_cap_uah;
 
 	pr_debug("wt_learnt_cap_uah=%lld, del_cap_uah=%lld\n",
@@ -413,14 +414,14 @@ static int cap_wt_learning_process_full_data(struct cap_learning *cl,
 /**
  * cap_learning_process_full_data -
  * @cl: Capacity learning object
- * @batt_soc_msb: Most significant byte of Battery State of Charge
+ * @batt_soc_cp: Battery State of Charge in centi-percentage
  *
  * Processes the coulomb counter during charge termination and calculates the
  * delta w.r.to the coulomb counter obtained earlier when the learning begun.
  *
  */
 static int cap_learning_process_full_data(struct cap_learning *cl,
-					int batt_soc_msb)
+					int batt_soc_cp)
 {
 	int rc, cc_soc_sw, cc_soc_delta_pct, delta_batt_soc_pct, batt_soc_pct,
 		cc_soc_fraction;
@@ -432,7 +433,7 @@ static int cap_learning_process_full_data(struct cap_learning *cl,
 		return rc;
 	}
 
-	batt_soc_pct = DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW);
+	batt_soc_pct = DIV_ROUND_CLOSEST(batt_soc_cp, 100);
 	delta_batt_soc_pct = batt_soc_pct - cl->init_batt_soc;
 	cc_soc_delta_pct =
 		div_s64_rem((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 100,
@@ -446,7 +447,7 @@ static int cap_learning_process_full_data(struct cap_learning *cl,
 
 	if (cl->dt.cl_wt_enable) {
 		rc = cap_wt_learning_process_full_data(cl, delta_batt_soc_pct,
-							batt_soc_msb);
+							batt_soc_cp);
 		return rc;
 	}
 
@@ -465,38 +466,47 @@ static int cap_learning_process_full_data(struct cap_learning *cl,
 /**
  * cap_learning_begin -
  * @cl: Capacity learning object
- * @batt_soc: Battery State of Charge (SOC)
+ * @batt_soc_cp: Battery State of Charge in centi-percentage
  *
  * Gets the coulomb counter from FG/QG when the conditions are suitable for
  * beginning capacity learning. Also, primes the coulomb counter based on
  * battery SOC if required.
  *
  */
-static int cap_learning_begin(struct cap_learning *cl, u32 batt_soc)
+#define BATT_SOC_32BIT	GENMASK(31, 0)
+static int cap_learning_begin(struct cap_learning *cl, u32 batt_soc_cp)
 {
-	int rc, cc_soc_sw, batt_soc_msb, batt_soc_pct;
+	int rc, cc_soc_sw, batt_soc_pct;
+	u32 batt_soc_prime;
 
-	batt_soc_msb = batt_soc >> 24;
-	batt_soc_pct = DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW);
-
-	if (!cl->dt.cl_wt_enable) {
-		if (batt_soc_pct > cl->dt.max_start_soc ||
-				batt_soc_pct < cl->dt.min_start_soc) {
-			pr_debug("Battery SOC %d is high/low, not starting\n",
-					batt_soc_pct);
-			return -EINVAL;
-		}
+	if (cl->ok_to_begin && !cl->ok_to_begin(cl->data)) {
+		pr_debug("Not OK to begin\n");
+		return -EINVAL;
 	}
 
-	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_msb,
-					FULL_SOC_RAW);
+	batt_soc_pct = DIV_ROUND_CLOSEST(batt_soc_cp, 100);
+
+	if ((cl->dt.max_start_soc != -EINVAL &&
+			batt_soc_pct > cl->dt.max_start_soc) ||
+			(cl->dt.min_start_soc != -EINVAL &&
+			batt_soc_pct < cl->dt.min_start_soc)) {
+		pr_debug("Battery SOC %d is high/low, not starting\n",
+					batt_soc_pct);
+		return -EINVAL;
+	}
+
+	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_cp,
+					CENTI_FULL_SOC);
 
 	if (cl->prime_cc_soc) {
 		/*
 		 * Prime cc_soc_sw with battery SOC when capacity learning
 		 * begins.
 		 */
-		rc = cl->prime_cc_soc(cl->data, batt_soc);
+		batt_soc_prime = div64_u64(
+				(uint64_t)batt_soc_cp * BATT_SOC_32BIT,
+							CENTI_FULL_SOC);
+		rc = cl->prime_cc_soc(cl->data, batt_soc_prime);
 		if (rc < 0) {
 			pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
 			goto out;
@@ -511,9 +521,9 @@ static int cap_learning_begin(struct cap_learning *cl, u32 batt_soc)
 
 	cl->init_cc_soc_sw = cc_soc_sw;
 	cl->init_batt_soc = batt_soc_pct;
-	cl->init_batt_soc_msb = batt_soc_msb;
+	cl->init_batt_soc_cp = batt_soc_cp;
 	pr_debug("Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
-		batt_soc_msb, cl->init_cc_soc_sw);
+		batt_soc_cp, cl->init_cc_soc_sw);
 out:
 	return rc;
 }
@@ -521,17 +531,17 @@ static int cap_learning_begin(struct cap_learning *cl, u32 batt_soc)
 /**
  * cap_learning_done -
  * @cl: Capacity learning object
- * @batt_soc_msb: Most significant byte of battery State of Charge
+ * @batt_soc_cp: Battery State of Charge in centi-percentage
  *
  * Top level function for getting coulomb counter and post processing the
  * data once the capacity learning is complete after charge termination.
  *
  */
-static int cap_learning_done(struct cap_learning *cl, int batt_soc_msb)
+static int cap_learning_done(struct cap_learning *cl, int batt_soc_cp)
 {
 	int rc;
 
-	rc = cap_learning_process_full_data(cl, batt_soc_msb);
+	rc = cap_learning_process_full_data(cl, batt_soc_cp);
 	if (rc < 0) {
 		pr_debug("Error in processing cap learning full data, rc=%d\n",
 			rc);
@@ -555,19 +565,19 @@ static int cap_learning_done(struct cap_learning *cl, int batt_soc_msb)
 /**
  * cap_wt_learning_update -
  * @cl: Capacity learning object
- * @batt_soc_msb: Most significant byte of battery State of Charge
+ * @batt_soc_cp: Battery State of Charge in centi-percentage
  * @input_present: Indicator for input presence
  *
  * Called by cap_learning_update when weighted learning is enabled
  *
  */
-static void cap_wt_learning_update(struct cap_learning *cl, int batt_soc_msb,
+static void cap_wt_learning_update(struct cap_learning *cl, int batt_soc_cp,
 					bool input_present)
 {
 	int rc;
 
 	if (!input_present) {
-		rc = cap_learning_done(cl, batt_soc_msb);
+		rc = cap_learning_done(cl, batt_soc_cp);
 		if (rc < 0)
 			pr_debug("Error in completing capacity learning, rc=%d\n",
 				rc);
@@ -590,10 +600,11 @@ static void cap_wt_learning_update(struct cap_learning *cl, int batt_soc_msb,
  *
  */
 void cap_learning_update(struct cap_learning *cl, int batt_temp,
-			int batt_soc, int charge_status, bool charge_done,
+			int batt_soc_cp, int charge_status, bool charge_done,
 			bool input_present, bool qnovo_en)
 {
-	int rc, batt_soc_msb, batt_soc_prime;
+	int rc;
+	u32 batt_soc_prime;
 	bool prime_cc = false;
 
 	if (!cl)
@@ -608,18 +619,16 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp,
 		goto out;
 	}
 
-	batt_soc_msb = (u32)batt_soc >> 24;
 	pr_debug("Charge_status: %d active: %d batt_soc: %d\n",
-		charge_status, cl->active, batt_soc_msb);
+		charge_status, cl->active, batt_soc_cp);
 
 	if (cl->active && cl->dt.cl_wt_enable)
-		cap_wt_learning_update(cl, batt_soc_msb,
-					input_present);
+		cap_wt_learning_update(cl, batt_soc_cp, input_present);
 
 	/* Initialize the starting point of learning capacity */
 	if (!cl->active) {
 		if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
-			rc = cap_learning_begin(cl, batt_soc);
+			rc = cap_learning_begin(cl, batt_soc_cp);
 			cl->active = (rc == 0);
 		} else {
 			if (charge_status == POWER_SUPPLY_STATUS_DISCHARGING ||
@@ -628,7 +637,7 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp,
 		}
 	} else {
 		if (charge_done) {
-			rc = cap_learning_done(cl, batt_soc_msb);
+			rc = cap_learning_done(cl, batt_soc_cp);
 			if (rc < 0)
 				pr_err("Error in completing capacity learning, rc=%d\n",
 					rc);
@@ -640,7 +649,7 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp,
 		if (charge_status == POWER_SUPPLY_STATUS_DISCHARGING &&
 				!input_present) {
 			pr_debug("Capacity learning aborted @ battery SOC %d\n",
-				 batt_soc_msb);
+				 batt_soc_cp);
 			cl->active = false;
 			cl->init_cap_uah = 0;
 			prime_cc = true;
@@ -657,7 +666,7 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp,
 				 */
 			} else {
 				pr_debug("Capacity learning aborted @ battery SOC %d\n",
-					batt_soc_msb);
+					batt_soc_cp);
 				cl->active = false;
 				cl->init_cap_uah = 0;
 				prime_cc = true;
@@ -671,10 +680,13 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp,
 	 */
 
 	if (prime_cc && cl->prime_cc_soc) {
+		/* pass 32-bit batt_soc to the priming logic */
 		if (charge_done)
 			batt_soc_prime = cl->cc_soc_max;
 		else
-			batt_soc_prime = batt_soc;
+			batt_soc_prime = div64_u64(
+				(uint64_t)batt_soc_cp * BATT_SOC_32BIT,
+							CENTI_FULL_SOC);
 
 		rc = cl->prime_cc_soc(cl->data, batt_soc_prime);
 		if (rc < 0)
@@ -1068,11 +1080,33 @@ static int get_time_to_full_locked(struct ttf *ttf, int *val)
 		i, soc_per_step, msoc_this_step, msoc_next_step,
 		ibatt_this_step, t_predicted_this_step, ttf_slope,
 		t_predicted_cv, t_predicted = 0, charge_type = 0, i_step,
-		float_volt_uv = 0;
+		float_volt_uv = 0, valid = 0, charge_status = 0;
 	int multiplier, curr_window = 0, pbatt_avg;
 	bool power_approx = false;
 	s64 delta_ms;
 
+	rc = ttf->get_ttf_param(ttf->data, TTF_TTE_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_tte_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -1;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status != POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -1;
+		return 0;
+	}
+
 	rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
 	if (rc < 0) {
 		pr_err("failed to get msoc rc=%d\n", rc);
@@ -1379,7 +1413,8 @@ static void ttf_work(struct work_struct *work)
 {
 	struct ttf *ttf = container_of(work,
 				struct ttf, ttf_work.work);
-	int rc, ibatt_now, vbatt_now, ttf_now, charge_status, ibatt_avg;
+	int rc, ibatt_now, vbatt_now, ttf_now, charge_status, ibatt_avg,
+		msoc = 0, charge_done;
 	ktime_t ktime_now;
 
 	mutex_lock(&ttf->lock);
@@ -1388,8 +1423,25 @@ static void ttf_work(struct work_struct *work)
 		pr_err("failed to get charge_status rc=%d\n", rc);
 		goto end_work;
 	}
-	if (charge_status != POWER_SUPPLY_STATUS_CHARGING &&
-			charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_DONE, &charge_done);
+	if (rc < 0) {
+		pr_err("failed to get charge_done rc=%d\n", rc);
+		goto end_work;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc, rc=%d\n", rc);
+		goto end_work;
+	}
+	pr_debug("TTF: charge_status:%d charge_done:%d msoc:%d\n",
+			charge_status, charge_done, msoc);
+	/*
+	 * Do not schedule ttf work when SOC is 100%
+	 * or charge terminated
+	 */
+	if ((msoc == 100) || charge_done)
 		goto end_work;
 
 	rc =  ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now);
@@ -1469,7 +1521,30 @@ static void ttf_work(struct work_struct *work)
  */
 int ttf_get_time_to_empty(struct ttf *ttf, int *val)
 {
-	int rc, ibatt_avg, msoc, act_cap_mah, divisor;
+	int rc, ibatt_avg, msoc, act_cap_mah, divisor, valid = 0,
+		charge_status = 0;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_TTE_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_tte_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -1;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -1;
+		return 0;
+	}
 
 	rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg);
 	if (rc < 0) {
@@ -1502,6 +1577,10 @@ int ttf_get_time_to_empty(struct ttf *ttf, int *val)
 	divisor = ibatt_avg * divisor / 100;
 	divisor = max(100, divisor);
 	*val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
+
+	pr_debug("TTF: ibatt_avg=%d msoc=%d act_cap_mah=%d TTE=%d\n",
+			ibatt_avg, msoc, act_cap_mah, *val);
+
 	return 0;
 }
 
diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h
index 18ca69c..34442c7 100644
--- a/drivers/power/supply/qcom/fg-alg.h
+++ b/drivers/power/supply/qcom/fg-alg.h
@@ -43,6 +43,7 @@ struct cl_params {
 	int	min_cap_limit;
 	int	skew_decipct;
 	int	min_delta_batt_soc;
+	int	ibat_flt_thr_ma;
 	bool	cl_wt_enable;
 };
 
@@ -51,7 +52,7 @@ struct cap_learning {
 	int			init_cc_soc_sw;
 	int			cc_soc_max;
 	int			init_batt_soc;
-	int			init_batt_soc_msb;
+	int			init_batt_soc_cp;
 	int64_t			nom_cap_uah;
 	int64_t			init_cap_uah;
 	int64_t			final_cap_uah;
@@ -60,6 +61,7 @@ struct cap_learning {
 	bool			active;
 	struct mutex		lock;
 	struct cl_params	dt;
+	bool (*ok_to_begin)(void *data);
 	int (*get_learned_capacity)(void *data, int64_t *learned_cap_uah);
 	int (*store_learned_capacity)(void *data, int64_t learned_cap_uah);
 	int (*get_cc_soc)(void *data, int *cc_soc_sw);
@@ -85,6 +87,8 @@ enum ttf_param {
 	TTF_VFLOAT,
 	TTF_CHG_TYPE,
 	TTF_CHG_STATUS,
+	TTF_TTE_VALID,
+	TTF_CHG_DONE,
 };
 
 struct ttf_circ_buf {
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 12cc4f6..6bd4ae8 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -72,6 +72,8 @@
 #define FG_PARALLEL_EN_VOTER	"fg_parallel_en"
 #define MEM_ATTN_IRQ_VOTER	"fg_mem_attn_irq"
 
+#define DEBUG_BOARD_VOTER	"fg_debug_board"
+
 #define BUCKET_COUNT			8
 #define BUCKET_SOC_PCT			(256 / BUCKET_COUNT)
 
@@ -103,6 +105,7 @@ enum fg_debug_flag {
 	FG_BUS_READ		= BIT(6), /* Show REGMAP reads */
 	FG_CAP_LEARN		= BIT(7), /* Show capacity learning */
 	FG_TTF			= BIT(8), /* Show time to full */
+	FG_FVSS			= BIT(9), /* Show FVSS */
 };
 
 /* SRAM access */
@@ -171,6 +174,7 @@ enum fg_sram_param_id {
 	FG_SRAM_VBAT_TAU,
 	FG_SRAM_VBAT_FINAL,
 	FG_SRAM_IBAT_FINAL,
+	FG_SRAM_IBAT_FLT,
 	FG_SRAM_ESR,
 	FG_SRAM_ESR_MDL,
 	FG_SRAM_ESR_ACT,
@@ -498,6 +502,8 @@ extern int fg_decode_voltage_15b(struct fg_sram_param *sp,
 	enum fg_sram_param_id id, int val);
 extern int fg_decode_current_16b(struct fg_sram_param *sp,
 	enum fg_sram_param_id id, int val);
+extern int fg_decode_current_24b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
 extern int fg_decode_cc_soc(struct fg_sram_param *sp,
 	enum fg_sram_param_id id, int value);
 extern int fg_decode_value_16b(struct fg_sram_param *sp,
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 343e996..4afc575 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -47,6 +47,23 @@ int fg_decode_voltage_15b(struct fg_sram_param *sp,
 	return sp[id].value;
 }
 
+#define CURRENT_24BIT_MSB_MASK	GENMASK(27, 16)
+#define CURRENT_24BIT_LSB_MASK	GENMASK(11, 0)
+int fg_decode_current_24b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int value)
+{
+	int msb, lsb, val;
+
+	msb = value & CURRENT_24BIT_MSB_MASK;
+	lsb = value & CURRENT_24BIT_LSB_MASK;
+	val = (msb >> 4) | lsb;
+	val = sign_extend32(val, 23);
+	sp[id].value = div_s64((s64)val * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+			sp[id].value);
+	return sp[id].value;
+}
+
 int fg_decode_current_16b(struct fg_sram_param *sp,
 				enum fg_sram_param_id id, int value)
 {
diff --git a/drivers/power/supply/qcom/hl6111r.c b/drivers/power/supply/qcom/hl6111r.c
index 6f0d297..88436d6 100644
--- a/drivers/power/supply/qcom/hl6111r.c
+++ b/drivers/power/supply/qcom/hl6111r.c
@@ -31,6 +31,7 @@ struct hl6111r {
 
 struct vout_range {
 	int min_mv;
+	int max_mv;
 	int step_mv;
 };
 
@@ -255,11 +256,11 @@ static int hl6111r_get_temp(struct hl6111r *chip, int *val)
 }
 
 static const struct vout_range hl6111r_vout_range[] = {
-	/* {Range's min value (mV), Range's step size (mV) */
-	{4940, 20},
-	{7410, 30},
-	{9880, 40},
-	{3952, 16}
+	/* {Range's min value (mV), max value (mV), Range's step size (mV) */
+	{4940, 10040, 20},
+	{7410, 15060, 30},
+	{9880, 20080, 40},
+	{3952, 8032, 16}
 };
 
 static int hl6111r_get_vout_target(struct hl6111r *chip, int *val)
@@ -297,6 +298,20 @@ static int hl6111r_get_vout_target(struct hl6111r *chip, int *val)
 	return rc;
 }
 
+static int hl6111r_get_chip_version(struct hl6111r *chip, int *val)
+{
+	int rc;
+	u8 id;
+
+	rc = hl6111r_read(chip, ID_REG, &id);
+	if (rc < 0)
+		return rc;
+
+	*val = id;
+
+	return 0;
+}
+
 /* Callbacks for settable properties */
 
 #define HL6111R_MIN_VOLTAGE_UV	4940000
@@ -379,9 +394,12 @@ static enum power_supply_property hl6111r_psy_props[] = {
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
 	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_STEP,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
 	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+	POWER_SUPPLY_PROP_CHIP_VERSION,
 };
 
 static int hl6111r_get_prop(struct power_supply *psy,
@@ -418,6 +436,17 @@ static int hl6111r_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
 		rc = hl6111r_get_current_avg(chip, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		/* Return only range 0's max value for now */
+		*val = (hl6111r_vout_range[0].max_mv * 1000);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_STEP:
+		/*
+		 * Using only 20 mV for now, to correspond to range 0.
+		 * Return value in uV.
+		 */
+		*val = (hl6111r_vout_range[0].step_mv * 1000);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
 		*val = HL6111R_MAX_VOLTAGE_UV;
 		break;
@@ -427,6 +456,9 @@ static int hl6111r_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
 		rc = hl6111r_get_vout_target(chip, val);
 		break;
+	case POWER_SUPPLY_PROP_CHIP_VERSION:
+		rc = hl6111r_get_chip_version(chip, val);
+		break;
 	default:
 		rc = -EINVAL;
 		break;
diff --git a/drivers/power/supply/qcom/hl6111r.h b/drivers/power/supply/qcom/hl6111r.h
index cd4043e..c88cfedbb 100644
--- a/drivers/power/supply/qcom/hl6111r.h
+++ b/drivers/power/supply/qcom/hl6111r.h
@@ -19,6 +19,8 @@
 
 #define VOUT_TARGET_REG		0x0E
 
+#define ID_REG			0xA
+
 #define IOUT_LIM_SEL_REG	0x28
 #define IOUT_LIM_SEL_MASK	GENMASK(7, 3)
 #define IOUT_LIM_SHIFT		3
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index abcc484..838e3f7 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -166,7 +166,7 @@ static int get_client_id(struct votable *votable, const char *client_str)
 
 static char *get_client_str(struct votable *votable, int client_id)
 {
-	if (client_id == -EINVAL)
+	if (!votable || (client_id == -EINVAL))
 		return NULL;
 
 	return votable->client_strs[client_id];
@@ -183,6 +183,38 @@ void unlock_votable(struct votable *votable)
 }
 
 /**
+ * is_override_vote_enabled() -
+ * is_override_vote_enabled_locked() -
+ *		The unlocked and locked variants of getting whether override
+		vote is enabled.
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	True if the client's vote is enabled; false otherwise.
+ */
+bool is_override_vote_enabled_locked(struct votable *votable)
+{
+	if (!votable)
+		return false;
+
+	return votable->override_result != -EINVAL;
+}
+
+bool is_override_vote_enabled(struct votable *votable)
+{
+	bool enable;
+
+	if (!votable)
+		return false;
+
+	lock_votable(votable);
+	enable = is_override_vote_enabled_locked(votable);
+	unlock_votable(votable);
+
+	return enable;
+}
+
+/**
  * is_client_vote_enabled() -
  * is_client_vote_enabled_locked() -
  *		The unlocked and locked variants of getting whether a client's
@@ -196,8 +228,13 @@ void unlock_votable(struct votable *votable)
 bool is_client_vote_enabled_locked(struct votable *votable,
 							const char *client_str)
 {
-	int client_id = get_client_id(votable, client_str);
 
+	int client_id;
+
+	if (!votable || !client_str)
+		return false;
+
+	client_id = get_client_id(votable, client_str);
 	if (client_id < 0)
 		return false;
 
@@ -208,6 +245,9 @@ bool is_client_vote_enabled(struct votable *votable, const char *client_str)
 {
 	bool enabled;
 
+	if (!votable || !client_str)
+		return false;
+
 	lock_votable(votable);
 	enabled = is_client_vote_enabled_locked(votable, client_str);
 	unlock_votable(votable);
@@ -228,8 +268,12 @@ bool is_client_vote_enabled(struct votable *votable, const char *client_str)
  */
 int get_client_vote_locked(struct votable *votable, const char *client_str)
 {
-	int client_id = get_client_id(votable, client_str);
+	int client_id;
 
+	if (!votable || !client_str)
+		return -EINVAL;
+
+	client_id = get_client_id(votable, client_str);
 	if (client_id < 0)
 		return -EINVAL;
 
@@ -244,6 +288,9 @@ int get_client_vote(struct votable *votable, const char *client_str)
 {
 	int value;
 
+	if (!votable || !client_str)
+		return -EINVAL;
+
 	lock_votable(votable);
 	value = get_client_vote_locked(votable, client_str);
 	unlock_votable(votable);
@@ -269,6 +316,9 @@ int get_client_vote(struct votable *votable, const char *client_str)
  */
 int get_effective_result_locked(struct votable *votable)
 {
+	if (!votable)
+		return -EINVAL;
+
 	if (votable->force_active)
 		return votable->force_val;
 
@@ -282,6 +332,9 @@ int get_effective_result(struct votable *votable)
 {
 	int value;
 
+	if (!votable)
+		return -EINVAL;
+
 	lock_votable(votable);
 	value = get_effective_result_locked(votable);
 	unlock_votable(votable);
@@ -308,6 +361,9 @@ int get_effective_result(struct votable *votable)
  */
 const char *get_effective_client_locked(struct votable *votable)
 {
+	if (!votable)
+		return NULL;
+
 	if (votable->force_active)
 		return DEBUG_FORCE_CLIENT;
 
@@ -321,6 +377,9 @@ const char *get_effective_client(struct votable *votable)
 {
 	const char *client_str;
 
+	if (!votable)
+		return NULL;
+
 	lock_votable(votable);
 	client_str = get_effective_client_locked(votable);
 	unlock_votable(votable);
@@ -358,6 +417,9 @@ int vote(struct votable *votable, const char *client_str, bool enabled, int val)
 	int rc = 0;
 	bool similar_vote = false;
 
+	if (!votable || !client_str)
+		return -EINVAL;
+
 	lock_votable(votable);
 
 	client_id = get_client_id(votable, client_str);
@@ -463,6 +525,9 @@ int vote_override(struct votable *votable, const char *override_client,
 {
 	int rc = 0;
 
+	if (!votable || !override_client)
+		return -EINVAL;
+
 	lock_votable(votable);
 	if (votable->force_active) {
 		votable->override_result = enabled ? val : -EINVAL;
@@ -493,6 +558,9 @@ int rerun_election(struct votable *votable)
 	int rc = 0;
 	int effective_result;
 
+	if (!votable)
+		return -EINVAL;
+
 	lock_votable(votable);
 	effective_result = get_effective_result_locked(votable);
 	if (votable->callback)
@@ -510,6 +578,9 @@ struct votable *find_votable(const char *name)
 	struct votable *v;
 	bool found = false;
 
+	if (!name)
+		return NULL;
+
 	spin_lock_irqsave(&votable_list_slock, flags);
 	if (list_empty(&votable_list))
 		goto out;
@@ -642,6 +713,9 @@ struct votable *create_votable(const char *name,
 	struct votable *votable;
 	unsigned long flags;
 
+	if (!name)
+		return ERR_PTR(-EINVAL);
+
 	votable = find_votable(name);
 	if (votable)
 		return ERR_PTR(-EEXIST);
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 928fb6e..b670537 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -57,6 +57,8 @@ static struct tables table[] = {
 
 static struct qg_battery_data *the_battery;
 
+static void qg_battery_profile_free(void);
+
 static int qg_battery_data_open(struct inode *inode, struct file *file)
 {
 	struct qg_battery_data *battery = container_of(inode->i_cdev,
@@ -427,42 +429,56 @@ int qg_batterydata_init(struct device_node *profile_node)
 	int rc = 0;
 	struct qg_battery_data *battery;
 
-	battery = kzalloc(sizeof(*battery), GFP_KERNEL);
-	if (!battery)
-		return -ENOMEM;
+	/*
+	 * If a battery profile is already initialized, free the existing
+	 * profile data and re-allocate and load the new profile. This is
+	 * required for multi-profile load support.
+	 */
+	if (the_battery) {
+		battery = the_battery;
+		battery->profile_node = NULL;
+		qg_battery_profile_free();
+	} else {
+		battery = kzalloc(sizeof(*battery), GFP_KERNEL);
+		if (!battery)
+			return -ENOMEM;
+		/* char device to access battery-profile data */
+		rc = alloc_chrdev_region(&battery->dev_no, 0, 1,
+							"qg_battery");
+		if (rc < 0) {
+			pr_err("Failed to allocate chrdev rc=%d\n", rc);
+			goto free_battery;
+		}
+
+		cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
+		rc = cdev_add(&battery->battery_cdev,
+						battery->dev_no, 1);
+		if (rc) {
+			pr_err("Failed to add battery_cdev rc=%d\n", rc);
+			goto unregister_chrdev;
+		}
+
+		battery->battery_class = class_create(THIS_MODULE,
+							"qg_battery");
+		if (IS_ERR_OR_NULL(battery->battery_class)) {
+			pr_err("Failed to create qg-battery class\n");
+			rc = -ENODEV;
+			goto delete_cdev;
+		}
+
+		battery->battery_device = device_create(
+						battery->battery_class,
+						NULL, battery->dev_no,
+						NULL, "qg_battery");
+		if (IS_ERR_OR_NULL(battery->battery_device)) {
+			pr_err("Failed to create battery_device device\n");
+			rc = -ENODEV;
+			goto destroy_class;
+		}
+		the_battery = battery;
+	}
 
 	battery->profile_node = profile_node;
-
-	/* char device to access battery-profile data */
-	rc = alloc_chrdev_region(&battery->dev_no, 0, 1, "qg_battery");
-	if (rc < 0) {
-		pr_err("Failed to allocate chrdev rc=%d\n", rc);
-		goto free_battery;
-	}
-
-	cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
-	rc = cdev_add(&battery->battery_cdev, battery->dev_no, 1);
-	if (rc) {
-		pr_err("Failed to add battery_cdev rc=%d\n", rc);
-		goto unregister_chrdev;
-	}
-
-	battery->battery_class = class_create(THIS_MODULE, "qg_battery");
-	if (IS_ERR_OR_NULL(battery->battery_class)) {
-		pr_err("Failed to create qg-battery class\n");
-		rc = -ENODEV;
-		goto delete_cdev;
-	}
-
-	battery->battery_device = device_create(battery->battery_class,
-					NULL, battery->dev_no,
-					NULL, "qg_battery");
-	if (IS_ERR_OR_NULL(battery->battery_device)) {
-		pr_err("Failed to create battery_device device\n");
-		rc = -ENODEV;
-		goto delete_cdev;
-	}
-
 	/* parse the battery profile */
 	rc = qg_parse_battery_profile(battery);
 	if (rc < 0) {
@@ -470,14 +486,14 @@ int qg_batterydata_init(struct device_node *profile_node)
 		goto destroy_device;
 	}
 
-	the_battery = battery;
-
-	pr_info("QG Battery-profile loaded, '/dev/qg_battery' created!\n");
+	pr_info("QG Battery-profile loaded\n");
 
 	return 0;
 
 destroy_device:
 	device_destroy(battery->battery_class, battery->dev_no);
+destroy_class:
+	class_destroy(battery->battery_class);
 delete_cdev:
 	cdev_del(&battery->battery_cdev);
 unregister_chrdev:
@@ -487,27 +503,32 @@ int qg_batterydata_init(struct device_node *profile_node)
 	return rc;
 }
 
-void qg_batterydata_exit(void)
+static void qg_battery_profile_free(void)
 {
 	int i, j;
 
+	/* delete all the battery profile memory */
+	for (i = 0; i < TABLE_MAX; i++) {
+		kfree(the_battery->profile[i].name);
+		kfree(the_battery->profile[i].row_entries);
+		kfree(the_battery->profile[i].col_entries);
+		for (j = 0; j < the_battery->profile[i].rows; j++) {
+			if (the_battery->profile[i].data)
+				kfree(the_battery->profile[i].data[j]);
+		}
+		kfree(the_battery->profile[i].data);
+	}
+}
+
+void qg_batterydata_exit(void)
+{
 	if (the_battery) {
 		/* unregister the device node */
 		device_destroy(the_battery->battery_class, the_battery->dev_no);
+		class_destroy(the_battery->battery_class);
 		cdev_del(&the_battery->battery_cdev);
 		unregister_chrdev_region(the_battery->dev_no, 1);
-
-		/* delete all the battery profile memory */
-		for (i = 0; i < TABLE_MAX; i++) {
-			kfree(the_battery->profile[i].name);
-			kfree(the_battery->profile[i].row_entries);
-			kfree(the_battery->profile[i].col_entries);
-			for (j = 0; j < the_battery->profile[i].rows; j++) {
-				if (the_battery->profile[i].data)
-					kfree(the_battery->profile[i].data[j]);
-			}
-			kfree(the_battery->profile[i].data);
-		}
+		qg_battery_profile_free();
 	}
 
 	kfree(the_battery);
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index a0c07de..f8e4881 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -58,6 +58,7 @@ struct qg_dt {
 	int			min_sleep_time_secs;
 	int			sys_min_volt_mv;
 	int			fvss_vbat_mv;
+	int			tcss_entry_soc;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
@@ -69,6 +70,8 @@ struct qg_dt {
 	bool			qg_sleep_config;
 	bool			qg_fast_chg_cfg;
 	bool			fvss_enable;
+	bool			multi_profile_load;
+	bool			tcss_enable;
 };
 
 struct qg_esr_data {
@@ -85,10 +88,12 @@ struct qpnp_qg {
 	struct pmic_revid_data	*pmic_rev_id;
 	struct regmap		*regmap;
 	struct qpnp_vadc_chip	*vadc_dev;
+	struct soh_profile      *sp;
 	struct power_supply	*qg_psy;
 	struct class		*qg_class;
 	struct device		*qg_device;
 	struct cdev		qg_cdev;
+	struct device_node      *batt_node;
 	struct dentry		*dfs_root;
 	dev_t			dev_no;
 	struct work_struct	udata_work;
@@ -132,6 +137,7 @@ struct qpnp_qg {
 	bool			charge_full;
 	bool			force_soc;
 	bool			fvss_active;
+	bool			tcss_active;
 	int			charge_status;
 	int			charge_type;
 	int			chg_iterm_ma;
@@ -142,6 +148,11 @@ struct qpnp_qg {
 	int			soc_reporting_ready;
 	int			last_fifo_v_uv;
 	int			last_fifo_i_ua;
+	int			prev_fifo_i_ua;
+	int			soc_tcss_entry;
+	int			ibat_tcss_entry;
+	int			soc_tcss;
+	int			tcss_entry_count;
 	u32			fifo_done_count;
 	u32			wa_flags;
 	u32			seq_no;
@@ -170,6 +181,7 @@ struct qpnp_qg {
 	int			sys_soc;
 	int			last_adj_ssoc;
 	int			recharge_soc;
+	int			batt_age_level;
 	struct alarm		alarm_timer;
 	u32			sdam_data[SDAM_MAX];
 
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 6e001de..0af5993 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -121,6 +121,7 @@
 #define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET	0x6E /* 4-byte 0x6E-0x71 */
 #define QG_SDAM_ESR_CHARGE_SF_OFFSET		0x72 /* 2-byte 0x72-0x73 */
 #define QG_SDAM_ESR_DISCHARGE_SF_OFFSET		0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_BATT_AGE_LEVEL_OFFSET		0x76 /* 1-byte 0x76 */
 #define QG_SDAM_MAGIC_OFFSET			0x80 /* 4-byte 0x80-0x83 */
 #define QG_SDAM_MAX_OFFSET			0xA4
 
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index e641d11..aa357e5 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -81,6 +81,11 @@ static struct qg_sdam_info sdam_info[] = {
 		.offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
 		.length = 2,
 	},
+	[SDAM_BATT_AGE_LEVEL] = {
+		.name   = "SDAM_BATT_AGE_LEVEL_OFFSET",
+		.offset = QG_SDAM_BATT_AGE_LEVEL_OFFSET,
+		.length = 1,
+	},
 	[SDAM_MAGIC] = {
 		.name	= "SDAM_MAGIC_OFFSET",
 		.offset = QG_SDAM_MAGIC_OFFSET,
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index b0e48ed..32ce6f8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,7 @@ enum qg_sdam_param {
 	SDAM_ESR_CHARGE_SF,
 	SDAM_ESR_DISCHARGE_SF,
 	SDAM_MAGIC,
+	SDAM_BATT_AGE_LEVEL,
 	SDAM_MAX,
 };
 
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 3ecd32b..8454ff6 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -171,6 +171,103 @@ static int qg_process_fvss_soc(struct qpnp_qg *chip, int sys_soc)
 	return sys_soc;
 }
 
+#define IBAT_HYST_PC			10
+#define TCSS_ENTRY_COUNT		2
+static int qg_process_tcss_soc(struct qpnp_qg *chip, int sys_soc)
+{
+	int rc, ibatt_diff = 0, ibat_inc_hyst = 0;
+	int qg_iterm_ua = (-1 * chip->dt.iterm_ma * 1000);
+	int soc_ibat, wt_ibat, wt_sys;
+	union power_supply_propval prop = {0, };
+
+	if (!chip->dt.tcss_enable)
+		goto exit_soc_scale;
+
+	if (chip->sys_soc < (chip->dt.tcss_entry_soc * 100))
+		goto exit_soc_scale;
+
+	if (chip->sys_soc >= QG_MAX_SOC && chip->soc_tcss >= QG_MAX_SOC)
+		goto exit_soc_scale;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &prop);
+	if (!rc && (prop.intval == POWER_SUPPLY_HEALTH_COOL ||
+			prop.intval == POWER_SUPPLY_HEALTH_WARM))
+		goto exit_soc_scale;
+
+	if (chip->last_fifo_i_ua >= 0)
+		goto exit_soc_scale;
+	else if (++chip->tcss_entry_count < TCSS_ENTRY_COUNT)
+		goto skip_entry_count;
+
+	if (!chip->tcss_active) {
+		chip->soc_tcss = sys_soc;
+		chip->soc_tcss_entry = sys_soc;
+		chip->ibat_tcss_entry = min(chip->last_fifo_i_ua, qg_iterm_ua);
+		chip->prev_fifo_i_ua = chip->last_fifo_i_ua;
+		chip->tcss_active = true;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, &prop);
+	if (!rc && prop.intval) {
+		qg_dbg(chip, QG_DEBUG_SOC,
+			"Input limited sys_soc=%d soc_tcss=%d\n",
+					sys_soc, chip->soc_tcss);
+		if (chip->soc_tcss > sys_soc)
+			sys_soc = chip->soc_tcss;
+		goto exit_soc_scale;
+	}
+
+	ibatt_diff = chip->last_fifo_i_ua - chip->prev_fifo_i_ua;
+	if (ibatt_diff > 0) {
+		/*
+		 * if the battery charge current has suddendly dropped, allow it
+		 * to decrease only by a small fraction to avoid a SOC jump.
+		 */
+		ibat_inc_hyst = (chip->prev_fifo_i_ua * IBAT_HYST_PC) / 100;
+		if (ibatt_diff > abs(ibat_inc_hyst))
+			chip->prev_fifo_i_ua -= ibat_inc_hyst;
+		else
+			chip->prev_fifo_i_ua = chip->last_fifo_i_ua;
+	}
+
+	chip->prev_fifo_i_ua = min(chip->prev_fifo_i_ua, qg_iterm_ua);
+	soc_ibat = qg_linear_interpolate(chip->soc_tcss_entry,
+					chip->ibat_tcss_entry,
+					QG_MAX_SOC,
+					qg_iterm_ua,
+					chip->prev_fifo_i_ua);
+	soc_ibat = CAP(QG_MIN_SOC, QG_MAX_SOC, soc_ibat);
+
+	wt_ibat = qg_linear_interpolate(1, chip->soc_tcss_entry,
+					10000, 10000, soc_ibat);
+	wt_ibat = CAP(QG_MIN_SOC, QG_MAX_SOC, wt_ibat);
+	wt_sys = 10000 - wt_ibat;
+
+	chip->soc_tcss = DIV_ROUND_CLOSEST((soc_ibat * wt_ibat) +
+					(wt_sys * sys_soc), 10000);
+	chip->soc_tcss = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->soc_tcss);
+
+	qg_dbg(chip, QG_DEBUG_SOC,
+		"TCSS: fifo_i=%d prev_fifo_i=%d ibatt_tcss_entry=%d qg_term=%d soc_tcss_entry=%d sys_soc=%d soc_ibat=%d wt_ibat=%d wt_sys=%d soc_tcss=%d\n",
+			chip->last_fifo_i_ua, chip->prev_fifo_i_ua,
+			chip->ibat_tcss_entry, qg_iterm_ua,
+			chip->soc_tcss_entry, sys_soc, soc_ibat,
+			wt_ibat, wt_sys, chip->soc_tcss);
+
+	return chip->soc_tcss;
+
+exit_soc_scale:
+	chip->tcss_entry_count = 0;
+skip_entry_count:
+	chip->tcss_active = false;
+	qg_dbg(chip, QG_DEBUG_SOC, "TCSS: Quit - enabled=%d sys_soc=%d tcss_entry_count=%d fifo_i_ua=%d\n",
+			chip->dt.tcss_enable, sys_soc, chip->tcss_entry_count,
+			chip->last_fifo_i_ua);
+	return sys_soc;
+}
+
 int qg_adjust_sys_soc(struct qpnp_qg *chip)
 {
 	int soc, vbat_uv, rc;
@@ -178,6 +275,8 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 
 	chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
 
+	chip->sys_soc = qg_process_tcss_soc(chip, chip->sys_soc);
+
 	if (chip->sys_soc <= 50) { /* 0.5% */
 		/* Hold SOC to 1% of VBAT has not dropped below cutoff */
 		rc = qg_get_battery_voltage(chip, &vbat_uv);
@@ -306,7 +405,7 @@ static bool maint_soc_timeout(struct qpnp_qg *chip)
 
 static void update_msoc(struct qpnp_qg *chip)
 {
-	int rc = 0, sdam_soc, batt_temp = 0,  batt_soc_32bit = 0;
+	int rc = 0, sdam_soc, batt_temp = 0;
 	bool input_present = is_input_present(chip);
 
 	if (chip->catch_up_soc > chip->msoc) {
@@ -345,11 +444,8 @@ static void update_msoc(struct qpnp_qg *chip)
 		rc = qg_get_battery_temp(chip, &batt_temp);
 		if (rc < 0) {
 			pr_err("Failed to read BATT_TEMP rc=%d\n", rc);
-		} else {
-			batt_soc_32bit = div64_u64(
-						chip->batt_soc * BATT_SOC_32BIT,
-						QG_SOC_FULL);
-			cap_learning_update(chip->cl, batt_temp, batt_soc_32bit,
+		} else if (chip->batt_soc >= 0) {
+			cap_learning_update(chip->cl, batt_temp, chip->batt_soc,
 					chip->charge_status, chip->charge_done,
 					input_present, false);
 		}
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index a868f1d..c619b9a 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -180,6 +180,8 @@
 #define RSLOW_SCALE_FN_CHG_V2_OFFSET	0
 #define ACT_BATT_CAP_v2_WORD		287
 #define ACT_BATT_CAP_v2_OFFSET		0
+#define IBAT_FLT_WORD			322
+#define IBAT_FLT_OFFSET			0
 #define VBAT_FLT_WORD			326
 #define VBAT_FLT_OFFSET			0
 #define RSLOW_v2_WORD			371
@@ -281,6 +283,7 @@ struct fg_gen4_chip {
 	struct work_struct	pl_current_en_work;
 	struct completion	mem_attn;
 	struct mutex		soc_scale_lock;
+	struct mutex		esr_calib_lock;
 	ktime_t			last_restart_time;
 	char			batt_profile[PROFILE_LEN];
 	enum slope_limit_status	slope_limit_sts;
@@ -296,6 +299,7 @@ struct fg_gen4_chip {
 	int			soc_scale_msoc;
 	int			prev_soc_scale_msoc;
 	int			soc_scale_slope;
+	int			msoc_actual;
 	int			vbatt_avg;
 	int			vbatt_now;
 	int			vbatt_res;
@@ -488,6 +492,8 @@ static struct fg_sram_param pm8150b_v2_sram_params[] = {
 		0, NULL, fg_decode_voltage_15b),
 	PARAM(IBAT_FINAL, IBAT_FINAL_WORD, IBAT_FINAL_OFFSET, 2, 1000, 488282,
 		0, NULL, fg_decode_current_16b),
+	PARAM(IBAT_FLT, IBAT_FLT_WORD, IBAT_FLT_OFFSET, 4, 10000, 19073, 0,
+		NULL, fg_decode_current_24b),
 	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
 		fg_decode_value_16b),
 	PARAM(ESR_MDL, ESR_MDL_WORD, ESR_MDL_OFFSET, 2, 1000, 244141, 0,
@@ -1122,7 +1128,7 @@ static int fg_gen4_get_prop_soc_scale(struct fg_gen4_chip *chip)
 	chip->vbatt_now = DIV_ROUND_CLOSEST(chip->vbatt_now, 1000);
 	chip->vbatt_avg = DIV_ROUND_CLOSEST(chip->vbatt_avg, 1000);
 	chip->vbatt_res = chip->vbatt_avg - chip->dt.cutoff_volt_mv;
-	pr_debug("FVSS: Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
+	fg_dbg(fg, FG_FVSS, "Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
 		chip->vbatt_now, chip->vbatt_avg, chip->vbatt_res);
 
 	return rc;
@@ -1196,10 +1202,13 @@ static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val)
 		return -ENODEV;
 
 	fg = &chip->fg;
-	if (fg->battery_missing)
-		return -EPERM;
 
 	switch (param) {
+	case TTF_TTE_VALID:
+		*val = 1;
+		if (fg->battery_missing || is_debug_batt_id(fg))
+			*val = 0;
+		break;
 	case TTF_MSOC:
 		rc = fg_gen4_get_prop_capacity(fg, val);
 		break;
@@ -1264,6 +1273,9 @@ static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val)
 	case TTF_CHG_STATUS:
 		*val = fg->charge_status;
 		break;
+	case TTF_CHG_DONE:
+		*val = fg->charge_done;
+		break;
 	default:
 		pr_err_ratelimited("Unsupported parameter %d\n", param);
 		rc = -EINVAL;
@@ -1346,6 +1358,39 @@ static int fg_gen4_prime_cc_soc_sw(void *data, u32 batt_soc)
 	return rc;
 }
 
+static bool fg_gen4_cl_ok_to_begin(void *data)
+{
+	struct fg_gen4_chip *chip = data;
+	struct fg_dev *fg;
+	int rc, val = 0;
+
+	if (!chip)
+		return false;
+
+	fg = &chip->fg;
+
+	if (chip->cl->dt.ibat_flt_thr_ma <= 0)
+		return true;
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_IBAT_FLT, &val);
+	if (rc < 0) {
+		pr_err("Failed to get filtered battery current, rc = %d\n",
+			rc);
+		return true;
+	}
+
+	/* convert to milli-units */
+	val = DIV_ROUND_CLOSEST(val, 1000);
+
+	pr_debug("IBAT_FLT thr: %d val: %d\n", chip->cl->dt.ibat_flt_thr_ma,
+		val);
+
+	if (abs(val) > chip->cl->dt.ibat_flt_thr_ma)
+		return false;
+
+	return true;
+}
+
 static int fg_gen4_get_cc_soc_sw(void *data, int *cc_soc_sw)
 {
 	struct fg_gen4_chip *chip = data;
@@ -1584,7 +1629,7 @@ static int fg_gen4_set_ki_coeff_dischg(struct fg_dev *fg, int ki_coeff_low,
 	return 0;
 }
 
-#define KI_COEFF_LOW_DISCHG_DEFAULT	122
+#define KI_COEFF_LOW_DISCHG_DEFAULT	367
 #define KI_COEFF_MED_DISCHG_DEFAULT	62
 #define KI_COEFF_HI_DISCHG_DEFAULT	0
 static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg)
@@ -2356,7 +2401,31 @@ static void fg_gen4_post_profile_load(struct fg_gen4_chip *chip)
 {
 	struct fg_dev *fg = &chip->fg;
 	int rc, act_cap_mah;
-	u8 buf[16];
+	u8 buf[16] = {0};
+
+	if (chip->dt.multi_profile_load &&
+		chip->batt_age_level != chip->last_batt_age_level) {
+
+		/* Keep ESR fast calib config disabled */
+		fg_gen4_esr_fast_calib_config(chip, false);
+		chip->esr_fast_calib = false;
+
+		mutex_lock(&chip->esr_calib_lock);
+
+		rc = fg_sram_write(fg, ESR_DELTA_DISCHG_WORD,
+				ESR_DELTA_DISCHG_OFFSET, buf, 2,
+				FG_IMA_DEFAULT);
+		if (rc < 0)
+			pr_err("Error in writing ESR_DELTA_DISCHG, rc=%d\n",
+				rc);
+
+		rc = fg_sram_write(fg, ESR_DELTA_CHG_WORD, ESR_DELTA_CHG_OFFSET,
+				buf, 2, FG_IMA_DEFAULT);
+		if (rc < 0)
+			pr_err("Error in writing ESR_DELTA_CHG, rc=%d\n", rc);
+
+		mutex_unlock(&chip->esr_calib_lock);
+	}
 
 	/* If SDAM cookie is not set, read back from SRAM and load it in SDAM */
 	if (chip->fg_nvmem && !is_sdam_cookie_set(chip)) {
@@ -2402,7 +2471,7 @@ static void profile_load_work(struct work_struct *work)
 				profile_load_work.work);
 	struct fg_gen4_chip *chip = container_of(fg,
 				struct fg_gen4_chip, fg);
-	int64_t nom_cap_uah;
+	int64_t nom_cap_uah, learned_cap_uah = 0;
 	u8 val, buf[2];
 	int rc;
 
@@ -2436,6 +2505,16 @@ static void profile_load_work(struct work_struct *work)
 
 	fg_dbg(fg, FG_STATUS, "profile loading started\n");
 
+	if (chip->dt.multi_profile_load &&
+		chip->batt_age_level != chip->last_batt_age_level) {
+		rc = fg_gen4_get_learned_capacity(chip, &learned_cap_uah);
+		if (rc < 0)
+			pr_err("Error in getting learned capacity rc=%d\n", rc);
+		else
+			fg_dbg(fg, FG_STATUS, "learned capacity: %lld uAh\n",
+				learned_cap_uah);
+	}
+
 	rc = qpnp_fg_gen4_load_profile(chip);
 	if (rc < 0)
 		goto out;
@@ -2446,21 +2525,26 @@ static void profile_load_work(struct work_struct *work)
 	if (fg->wa_flags & PM8150B_V1_DMA_WA)
 		msleep(1000);
 
-	/*
-	 * Whenever battery profile is loaded, read nominal capacity and write
-	 * it to actual (or aged) capacity as it is outside the profile region
-	 * and might contain OTP values.
-	 */
-	rc = fg_sram_read(fg, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
-			FG_IMA_DEFAULT);
-	if (rc < 0) {
-		pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
-			NOM_CAP_OFFSET, rc);
-	} else {
-		nom_cap_uah = (buf[0] | buf[1] << 8) * 1000;
-		rc = fg_gen4_store_learned_capacity(chip, nom_cap_uah);
-		if (rc < 0)
-			pr_err("Error in writing to ACT_BATT_CAP rc=%d\n", rc);
+	if (learned_cap_uah == 0) {
+		/*
+		 * Whenever battery profile is loaded, read nominal capacity and
+		 * write it to actual (or aged) capacity as it is outside the
+		 * profile region and might contain OTP values. learned_cap_uah
+		 * would have non-zero value if multiple profile loading is
+		 * enabled and a profile got loaded already.
+		 */
+		rc = fg_sram_read(fg, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in reading %04x[%d] rc=%d\n",
+				NOM_CAP_WORD, NOM_CAP_OFFSET, rc);
+		} else {
+			nom_cap_uah = (buf[0] | buf[1] << 8) * 1000;
+			rc = fg_gen4_store_learned_capacity(chip, nom_cap_uah);
+			if (rc < 0)
+				pr_err("Error in writing to ACT_BATT_CAP rc=%d\n",
+					rc);
+		}
 	}
 done:
 	rc = fg_sram_read(fg, PROFILE_INTEGRITY_WORD,
@@ -3215,7 +3299,7 @@ static int fg_gen4_enter_soc_scale(struct fg_gen4_chip *chip)
 	}
 
 	chip->soc_scale_mode = true;
-	pr_debug("FVSS: Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
+	fg_dbg(fg, FG_FVSS, "Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
 		chip->soc_scale_slope, chip->scale_timer);
 	alarm_start_relative(&chip->soc_scale_alarm_timer,
 				ms_to_ktime(chip->scale_timer));
@@ -3245,21 +3329,26 @@ static void fg_gen4_write_scale_msoc(struct fg_gen4_chip *chip)
 
 static void fg_gen4_exit_soc_scale(struct fg_gen4_chip *chip)
 {
+	struct fg_dev *fg = &chip->fg;
+
 	if (chip->soc_scale_mode) {
 		alarm_cancel(&chip->soc_scale_alarm_timer);
-		cancel_work_sync(&chip->soc_scale_work);
+		if (work_busy(&chip->soc_scale_work) != WORK_BUSY_RUNNING)
+			cancel_work_sync(&chip->soc_scale_work);
+
 		/* While exiting soc_scale_mode, Update MSOC register */
 		fg_gen4_write_scale_msoc(chip);
 	}
 
 	chip->soc_scale_mode = false;
-	pr_debug("FVSS: Exit FVSS mode\n");
+	fg_dbg(fg, FG_FVSS, "Exit FVSS mode, work_status=%d\n",
+				work_busy(&chip->soc_scale_work));
 }
 
 static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 {
 	struct fg_dev *fg = &chip->fg;
-	int rc, msoc_actual;
+	int rc;
 
 	if (!chip->dt.soc_scale_mode)
 		return 0;
@@ -3270,7 +3359,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 		goto fail_soc_scale;
 	}
 
-	rc = fg_get_msoc(fg, &msoc_actual);
+	rc = fg_get_msoc(fg, &chip->msoc_actual);
 	if (rc < 0) {
 		pr_err("Failed to get msoc rc=%d\n", rc);
 		goto fail_soc_scale;
@@ -3289,7 +3378,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
 		 * Stay in SOC scale mode till H/W SOC catch scaled SOC
 		 * while charging.
 		 */
-		if (msoc_actual >= chip->soc_scale_msoc)
+		if (chip->msoc_actual >= chip->soc_scale_msoc)
 			fg_gen4_exit_soc_scale(chip);
 	}
 
@@ -3589,12 +3678,14 @@ static irqreturn_t fg_delta_bsoc_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+#define CENTI_FULL_SOC		10000
 static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
 {
 	struct fg_dev *fg = data;
 	struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg);
 	int rc, batt_soc, batt_temp, msoc_raw;
 	bool input_present = is_input_present(fg);
+	u32 batt_soc_cp;
 
 	rc = fg_get_msoc_raw(fg, &msoc_raw);
 	if (!rc)
@@ -3614,10 +3705,14 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
 	if (rc < 0) {
 		pr_err("Failed to read battery temp rc: %d\n", rc);
 	} else {
-		if (chip->cl->active)
-			cap_learning_update(chip->cl, batt_temp, batt_soc,
+		if (chip->cl->active) {
+			batt_soc_cp = div64_u64(
+					(u64)(u32)batt_soc * CENTI_FULL_SOC,
+					BATT_SOC_32BIT);
+			cap_learning_update(chip->cl, batt_temp, batt_soc_cp,
 				fg->charge_status, fg->charge_done,
 				input_present, is_qnovo_en(fg));
+		}
 
 		rc = fg_gen4_slope_limit_config(chip, batt_temp);
 		if (rc < 0)
@@ -3831,6 +3926,8 @@ static void esr_calib_work(struct work_struct *work)
 	s16 esr_raw, esr_char_raw, esr_delta, esr_meas_diff, esr_filtered;
 	u8 buf[2];
 
+	mutex_lock(&chip->esr_calib_lock);
+
 	if (chip->delta_esr_count > chip->dt.delta_esr_disable_count ||
 		chip->esr_fast_calib_done) {
 		fg_dbg(fg, FG_STATUS, "delta_esr_count: %d esr_fast_calib_done:%d\n",
@@ -3927,6 +4024,7 @@ static void esr_calib_work(struct work_struct *work)
 	chip->delta_esr_count++;
 	fg_dbg(fg, FG_STATUS, "Wrote ESR delta [0x%x 0x%x]\n", buf[0], buf[1]);
 out:
+	mutex_unlock(&chip->esr_calib_lock);
 	vote(fg->awake_votable, ESR_CALIB, false, 0);
 }
 
@@ -3960,14 +4058,36 @@ static void soc_scale_work(struct work_struct *work)
 	if (rc < 0)
 		pr_err("Failed to validate SOC scale mode, rc=%d\n", rc);
 
+	/* re-validate soc scale mode as we may have exited FVSS */
+	if (!chip->soc_scale_mode) {
+		fg_dbg(fg, FG_FVSS, "exit soc scale mode\n");
+		return;
+	}
+
 	if (chip->vbatt_res <= 0)
 		chip->vbatt_res = 0;
 
 	mutex_lock(&chip->soc_scale_lock);
 	soc = DIV_ROUND_CLOSEST(chip->vbatt_res,
 				chip->soc_scale_slope);
-	/* If calculated SOC is higher than current SOC, report current SOC */
-	if (soc > chip->prev_soc_scale_msoc) {
+	chip->soc_scale_msoc = soc;
+	chip->scale_timer = chip->dt.scale_timer_ms;
+
+	fg_dbg(fg, FG_FVSS, "soc: %d last soc: %d msoc_actual: %d\n", soc,
+			chip->prev_soc_scale_msoc, chip->msoc_actual);
+	if ((chip->prev_soc_scale_msoc - chip->msoc_actual) > soc_thr_percent) {
+		/*
+		 * If difference between previous SW calculated SOC and HW SOC
+		 * is higher than SOC threshold, then handle this by
+		 * showing previous SW SOC - SOC threshold.
+		 */
+		chip->soc_scale_msoc = chip->prev_soc_scale_msoc -
+					soc_thr_percent;
+	} else if (soc > chip->prev_soc_scale_msoc) {
+		/*
+		 * If calculated SOC is higher than current SOC, report current
+		 * SOC
+		 */
 		chip->soc_scale_msoc = chip->prev_soc_scale_msoc;
 		chip->scale_timer = chip->dt.scale_timer_ms;
 	} else if ((chip->prev_soc_scale_msoc - soc) > soc_thr_percent) {
@@ -3982,9 +4102,6 @@ static void soc_scale_work(struct work_struct *work)
 					soc_thr_percent;
 		chip->scale_timer = chip->dt.scale_timer_ms /
 				(chip->prev_soc_scale_msoc - soc);
-	} else {
-		chip->soc_scale_msoc = soc;
-		chip->scale_timer = chip->dt.scale_timer_ms;
 	}
 
 	if (chip->soc_scale_msoc < 0)
@@ -3997,7 +4114,7 @@ static void soc_scale_work(struct work_struct *work)
 	}
 
 	chip->prev_soc_scale_msoc = chip->soc_scale_msoc;
-	pr_debug("FVSS: Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
+	fg_dbg(fg, FG_FVSS, "Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
 		soc, chip->soc_scale_msoc, chip->scale_timer);
 	alarm_start_relative(&chip->soc_scale_alarm_timer,
 				ms_to_ktime(chip->scale_timer));
@@ -4040,6 +4157,7 @@ static void status_change_work(struct work_struct *work)
 	struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg);
 	int rc, batt_soc, batt_temp;
 	bool input_present, qnovo_en;
+	u32 batt_soc_cp;
 
 	if (fg->battery_missing) {
 		pm_relax(fg->dev);
@@ -4084,10 +4202,13 @@ static void status_change_work(struct work_struct *work)
 	cycle_count_update(chip->counter, (u32)batt_soc >> 24,
 		fg->charge_status, fg->charge_done, input_present);
 
-	if (fg->charge_status != fg->prev_charge_status)
-		cap_learning_update(chip->cl, batt_temp, batt_soc,
+	if (fg->charge_status != fg->prev_charge_status) {
+		batt_soc_cp = div64_u64((u64)(u32)batt_soc * CENTI_FULL_SOC,
+					BATT_SOC_32BIT);
+		cap_learning_update(chip->cl, batt_temp, batt_soc_cp,
 			fg->charge_status, fg->charge_done, input_present,
 			qnovo_en);
+	}
 
 	rc = fg_gen4_charge_full_update(fg);
 	if (rc < 0)
@@ -4364,6 +4485,9 @@ static int fg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 		rc = fg_get_battery_current(fg, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		rc = fg_get_sram_prop(fg, FG_SRAM_IBAT_FLT, &pval->intval);
+		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		rc = fg_gen4_get_battery_temp(fg, &pval->intval);
 		break;
@@ -4603,6 +4727,7 @@ static enum power_supply_property fg_psy_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_OCV,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
 	POWER_SUPPLY_PROP_RESISTANCE_ID,
 	POWER_SUPPLY_PROP_RESISTANCE,
 	POWER_SUPPLY_PROP_ESR_ACTUAL,
@@ -4854,6 +4979,7 @@ static int fg_alg_init(struct fg_gen4_chip *chip)
 	cl->prime_cc_soc = fg_gen4_prime_cc_soc_sw;
 	cl->get_learned_capacity = fg_gen4_get_learned_capacity;
 	cl->store_learned_capacity = fg_gen4_store_learned_capacity;
+	cl->ok_to_begin = fg_gen4_cl_ok_to_begin;
 	cl->data = chip;
 
 	rc = cap_learning_init(cl);
@@ -5422,7 +5548,7 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg)
 		}
 	}
 
-	chip->dt.ki_coeff_low_chg = 183;
+	chip->dt.ki_coeff_low_chg = 184;
 	of_property_read_u32(node, "qcom,ki-coeff-low-chg",
 		&chip->dt.ki_coeff_low_chg);
 
@@ -5434,11 +5560,11 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg)
 	of_property_read_u32(node, "qcom,ki-coeff-hi-chg",
 		&chip->dt.ki_coeff_hi_chg);
 
-	chip->dt.ki_coeff_lo_med_chg_thr_ma = 1000;
+	chip->dt.ki_coeff_lo_med_chg_thr_ma = 500;
 	of_property_read_u32(node, "qcom,ki-coeff-chg-low-med-thresh-ma",
 		&chip->dt.ki_coeff_lo_med_chg_thr_ma);
 
-	chip->dt.ki_coeff_med_hi_chg_thr_ma = 1500;
+	chip->dt.ki_coeff_med_hi_chg_thr_ma = 1000;
 	of_property_read_u32(node, "qcom,ki-coeff-chg-med-hi-thresh-ma",
 		&chip->dt.ki_coeff_med_hi_chg_thr_ma);
 
@@ -5672,9 +5798,6 @@ static void fg_gen4_parse_cl_params_dt(struct fg_gen4_chip *chip)
 	of_property_read_u32(node, "qcom,cl-min-delta-batt-soc",
 					&chip->cl->dt.min_delta_batt_soc);
 
-	chip->cl->dt.cl_wt_enable = of_property_read_bool(node,
-						"qcom,cl-wt-enable");
-
 	chip->cl->dt.min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
 	of_property_read_u32(node, "qcom,cl-min-temp", &chip->cl->dt.min_temp);
 
@@ -5698,6 +5821,16 @@ static void fg_gen4_parse_cl_params_dt(struct fg_gen4_chip *chip)
 				&chip->cl->dt.max_cap_limit);
 
 	of_property_read_u32(node, "qcom,cl-skew", &chip->cl->dt.skew_decipct);
+
+	if (of_property_read_bool(node, "qcom,cl-wt-enable")) {
+		chip->cl->dt.cl_wt_enable = true;
+		chip->cl->dt.max_start_soc = -EINVAL;
+		chip->cl->dt.min_start_soc = -EINVAL;
+	}
+
+	chip->cl->dt.ibat_flt_thr_ma = 100;
+	of_property_read_u32(node, "qcom,cl-ibat-flt-thresh-ma",
+		&chip->cl->dt.ibat_flt_thr_ma);
 }
 
 static int fg_gen4_parse_revid_dt(struct fg_gen4_chip *chip)
@@ -6021,6 +6154,34 @@ static void fg_gen4_cleanup(struct fg_gen4_chip *chip)
 	dev_set_drvdata(fg->dev, NULL);
 }
 
+static void fg_gen4_post_init(struct fg_gen4_chip *chip)
+{
+	int i;
+	struct fg_dev *fg = &chip->fg;
+
+	if (!is_debug_batt_id(fg))
+		return;
+
+	/* Disable all wakeable IRQs for a debug battery */
+	vote(fg->delta_bsoc_irq_en_votable, DEBUG_BOARD_VOTER, false, 0);
+	vote(chip->delta_esr_irq_en_votable, DEBUG_BOARD_VOTER, false, 0);
+	vote(chip->mem_attn_irq_en_votable, DEBUG_BOARD_VOTER, false, 0);
+
+	for (i = 0; i < FG_GEN4_IRQ_MAX; i++) {
+		if (fg->irqs[i].irq && fg->irqs[i].wakeable) {
+			if (i == BSOC_DELTA_IRQ || i == ESR_DELTA_IRQ ||
+					i == MEM_ATTN_IRQ) {
+				continue;
+			} else {
+				disable_irq_wake(fg->irqs[i].irq);
+				disable_irq_nosync(fg->irqs[i].irq);
+			}
+		}
+	}
+
+	fg_dbg(fg, FG_STATUS, "Disabled wakeable irqs for debug board\n");
+}
+
 static int fg_gen4_probe(struct platform_device *pdev)
 {
 	struct fg_gen4_chip *chip;
@@ -6044,6 +6205,7 @@ static int fg_gen4_probe(struct platform_device *pdev)
 	chip->ki_coeff_full_soc[1] = -EINVAL;
 	chip->esr_soh_cycle_count = -EINVAL;
 	chip->calib_level = -EINVAL;
+	chip->soh = -EINVAL;
 	fg->regmap = dev_get_regmap(fg->dev->parent, NULL);
 	if (!fg->regmap) {
 		dev_err(fg->dev, "Parent regmap is unavailable\n");
@@ -6054,6 +6216,7 @@ static int fg_gen4_probe(struct platform_device *pdev)
 	mutex_init(&fg->sram_rw_lock);
 	mutex_init(&fg->charge_full_lock);
 	mutex_init(&chip->soc_scale_lock);
+	mutex_init(&chip->esr_calib_lock);
 	init_completion(&fg->soc_update);
 	init_completion(&fg->soc_ready);
 	init_completion(&chip->mem_attn);
@@ -6243,6 +6406,8 @@ static int fg_gen4_probe(struct platform_device *pdev)
 	if (!fg->battery_missing)
 		schedule_delayed_work(&fg->profile_load_work, 0);
 
+	fg_gen4_post_init(chip);
+
 	pr_debug("FG GEN4 driver probed successfully\n");
 	return 0;
 exit:
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 3c8ec13..eea69fe 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -91,6 +91,7 @@ static struct attribute *qg_attrs[] = {
 ATTRIBUTE_GROUPS(qg);
 
 static int qg_process_rt_fifo(struct qpnp_qg *chip);
+static int qg_load_battery_profile(struct qpnp_qg *chip);
 
 static bool is_battery_present(struct qpnp_qg *chip)
 {
@@ -1540,8 +1541,6 @@ static int qg_get_learned_capacity(void *data, int64_t *learned_cap_uah)
 	}
 	*learned_cap_uah = cc_mah * 1000;
 
-	qg_dbg(chip, QG_DEBUG_ALG_CL, "Retrieved learned capacity %llduah\n",
-					*learned_cap_uah);
 	return 0;
 }
 
@@ -1570,6 +1569,47 @@ static int qg_store_learned_capacity(void *data, int64_t learned_cap_uah)
 	return 0;
 }
 
+static int qg_get_batt_age_level(void *data, u32 *batt_age_level)
+{
+	struct qpnp_qg *chip = data;
+	int rc;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing || is_debug_batt_id(chip))
+		return -ENODEV;
+
+	*batt_age_level = 0;
+	rc = qg_sdam_read(SDAM_BATT_AGE_LEVEL, batt_age_level);
+	if (rc < 0) {
+		pr_err("Error in reading batt_age_level, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qg_store_batt_age_level(void *data, u32 batt_age_level)
+{
+	struct qpnp_qg *chip = data;
+	int rc;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing)
+		return -ENODEV;
+
+	rc = qg_sdam_write(SDAM_BATT_AGE_LEVEL, batt_age_level);
+	if (rc < 0) {
+		pr_err("Error in writing batt_age_level, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int qg_get_cc_soc(void *data, int *cc_soc)
 {
 	struct qpnp_qg *chip = data;
@@ -1577,6 +1617,11 @@ static int qg_get_cc_soc(void *data, int *cc_soc)
 	if (!chip)
 		return -ENODEV;
 
+	if (is_debug_batt_id(chip) || chip->battery_missing) {
+		*cc_soc = -EINVAL;
+		return 0;
+	}
+
 	if (chip->cc_soc == INT_MIN)
 		return -EINVAL;
 
@@ -1705,6 +1750,11 @@ static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
 	int rc, cc_soc = 0;
 	int64_t temp = 0;
 
+	if (is_debug_batt_id(chip) || chip->battery_missing) {
+		*charge_counter = -EINVAL;
+		return 0;
+	}
+
 	rc = qg_get_learned_capacity(chip, &temp);
 	if (rc < 0 || !temp)
 		rc = qg_get_nominal_capacity((int *)&temp, 250, true);
@@ -1778,10 +1828,12 @@ static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
 	if (!chip)
 		return -ENODEV;
 
-	if (chip->battery_missing || !chip->profile_loaded)
-		return -ENODEV;
-
 	switch (param) {
+	case TTF_TTE_VALID:
+		*val = 1;
+		if (chip->battery_missing || is_debug_batt_id(chip))
+			*val = 0;
+		break;
 	case TTF_MSOC:
 		rc = qg_get_battery_capacity(chip, val);
 		break;
@@ -1828,6 +1880,9 @@ static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
 	case TTF_CHG_STATUS:
 		*val = chip->charge_status;
 		break;
+	case TTF_CHG_DONE:
+		*val = chip->charge_done;
+		break;
 	default:
 		pr_err("Unsupported property %d\n", param);
 		rc = -EINVAL;
@@ -1940,6 +1995,40 @@ static int qg_reset(struct qpnp_qg *chip)
 	return rc;
 }
 
+static int qg_setprop_batt_age_level(struct qpnp_qg *chip, int batt_age_level)
+{
+	int rc = 0;
+
+	if (!chip->dt.multi_profile_load)
+		return 0;
+
+	if (batt_age_level < 0) {
+		pr_err("Invalid age-level %d\n", batt_age_level);
+		return -EINVAL;
+	}
+
+	if (chip->batt_age_level == batt_age_level) {
+		qg_dbg(chip, QG_DEBUG_PROFILE, "Same age-level %d\n",
+						chip->batt_age_level);
+		return 0;
+	}
+
+	chip->batt_age_level = batt_age_level;
+	rc = qg_load_battery_profile(chip);
+	if (rc < 0) {
+		pr_err("failed to load profile\n");
+	} else {
+		rc = qg_store_batt_age_level(chip, batt_age_level);
+		if (rc < 0)
+			pr_err("error in storing batt_age_level rc =%d\n", rc);
+	}
+
+	qg_dbg(chip, QG_DEBUG_PROFILE, "Profile with batt_age_level = %d loaded\n",
+						chip->batt_age_level);
+
+	return rc;
+}
+
 static int qg_psy_set_property(struct power_supply *psy,
 			       enum power_supply_property psp,
 			       const union power_supply_propval *pval)
@@ -1971,6 +2060,8 @@ static int qg_psy_set_property(struct power_supply *psy,
 		chip->soh = pval->intval;
 		qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n",
 				chip->soh, chip->esr_actual, chip->esr_nominal);
+		if (chip->sp)
+			soh_profile_update(chip->sp, chip->soh);
 		break;
 	case POWER_SUPPLY_PROP_ESR_ACTUAL:
 		chip->esr_actual = pval->intval;
@@ -1981,6 +2072,9 @@ static int qg_psy_set_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FG_RESET:
 		qg_reset(chip);
 		break;
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+		rc = qg_setprop_batt_age_level(chip, pval->intval);
+		break;
 	default:
 		break;
 	}
@@ -2110,6 +2204,9 @@ static int qg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_SCALE_MODE_EN:
 		pval->intval = chip->fvss_active;
 		break;
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+		pval->intval = chip->batt_age_level;
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -2127,6 +2224,7 @@ static int qg_property_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_ESR_NOMINAL:
 	case POWER_SUPPLY_PROP_SOH:
 	case POWER_SUPPLY_PROP_FG_RESET:
+	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
 		return 1;
 	default:
 		break;
@@ -2169,6 +2267,7 @@ static enum power_supply_property qg_psy_props[] = {
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_POWER_NOW,
 	POWER_SUPPLY_PROP_SCALE_MODE_EN,
+	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -2181,6 +2280,17 @@ static const struct power_supply_desc qg_psy_desc = {
 	.property_is_writeable = qg_property_is_writeable,
 };
 
+#define DEFAULT_CL_BEGIN_IBAT_UA	(-100000)
+static bool qg_cl_ok_to_begin(void *data)
+{
+	struct qpnp_qg *chip = data;
+
+	if (chip->last_fifo_i_ua < DEFAULT_CL_BEGIN_IBAT_UA)
+		return true;
+
+	return false;
+}
+
 #define DEFAULT_RECHARGE_SOC 95
 static int qg_charge_full_update(struct qpnp_qg *chip)
 {
@@ -2457,7 +2567,7 @@ static void qg_status_change_work(struct work_struct *work)
 	struct qpnp_qg *chip = container_of(work,
 			struct qpnp_qg, qg_status_change_work);
 	union power_supply_propval prop = {0, };
-	int rc = 0, batt_temp = 0, batt_soc_32b = 0;
+	int rc = 0, batt_temp = 0;
 	bool input_present = false;
 
 	if (!is_batt_available(chip)) {
@@ -2513,11 +2623,8 @@ static void qg_status_change_work(struct work_struct *work)
 		rc = qg_get_battery_temp(chip, &batt_temp);
 		if (rc < 0) {
 			pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc);
-		} else {
-			batt_soc_32b = div64_u64(
-					chip->batt_soc * BATT_SOC_32BIT,
-					QG_SOC_FULL);
-			cap_learning_update(chip->cl, batt_temp, batt_soc_32b,
+		} else if (chip->batt_soc >= 0) {
+			cap_learning_update(chip->cl, batt_temp, chip->batt_soc,
 				chip->charge_status, chip->charge_done,
 				input_present, false);
 		}
@@ -2807,17 +2914,39 @@ static int get_batt_id_ohm(struct qpnp_qg *chip, u32 *batt_id_ohm)
 static int qg_load_battery_profile(struct qpnp_qg *chip)
 {
 	struct device_node *node = chip->dev->of_node;
-	struct device_node *batt_node, *profile_node;
-	int rc, tuple_len, len, i;
+	struct device_node *profile_node;
+	int rc, tuple_len, len, i, avail_age_level = 0;
 
-	batt_node = of_find_node_by_name(node, "qcom,battery-data");
-	if (!batt_node) {
+	chip->batt_node = of_find_node_by_name(node, "qcom,battery-data");
+	if (!chip->batt_node) {
 		pr_err("Batterydata not available\n");
 		return -ENXIO;
 	}
 
-	profile_node = of_batterydata_get_best_profile(batt_node,
+	if (chip->dt.multi_profile_load) {
+		if (chip->batt_age_level == -EINVAL) {
+			rc = qg_get_batt_age_level(chip, &chip->batt_age_level);
+			if (rc < 0) {
+				pr_err("error in retrieving batt age level rc=%d\n",
+									rc);
+				return rc;
+			}
+		}
+		profile_node = of_batterydata_get_best_aged_profile(
+					chip->batt_node,
+					chip->batt_id_ohm / 1000,
+					chip->batt_age_level,
+					&avail_age_level);
+		if (chip->batt_age_level != avail_age_level) {
+			qg_dbg(chip, QG_DEBUG_PROFILE, "Batt_age_level %d doesn't exist, using %d\n",
+					chip->batt_age_level, avail_age_level);
+			chip->batt_age_level = avail_age_level;
+		}
+	} else {
+		profile_node = of_batterydata_get_best_profile(chip->batt_node,
 				chip->batt_id_ohm / 1000, NULL);
+	}
+
 	if (IS_ERR(profile_node)) {
 		rc = PTR_ERR(profile_node);
 		pr_err("Failed to detect valid QG battery profile %d\n", rc);
@@ -3434,9 +3563,44 @@ static int qg_hw_init(struct qpnp_qg *chip)
 	return 0;
 }
 
+static int qg_soh_batt_profile_init(struct qpnp_qg *chip)
+{
+	int rc = 0;
+
+	if (!chip->dt.multi_profile_load)
+		return 0;
+
+	if (is_debug_batt_id(chip) || chip->battery_missing)
+		return 0;
+
+	if (!chip->sp)
+		chip->sp = devm_kzalloc(chip->dev, sizeof(*chip->sp),
+					GFP_KERNEL);
+	if (!chip->sp)
+		return -ENOMEM;
+
+	if (!chip->sp->initialized) {
+		chip->sp->batt_id_kohms = chip->batt_id_ohm / 1000;
+		chip->sp->bp_node = chip->batt_node;
+		chip->sp->last_batt_age_level = chip->batt_age_level;
+		chip->sp->bms_psy = chip->qg_psy;
+		rc = soh_profile_init(chip->dev, chip->sp);
+		if (rc < 0) {
+			devm_kfree(chip->dev, chip->sp);
+			chip->sp = NULL;
+		} else {
+			qg_dbg(chip, QG_DEBUG_PROFILE, "SOH profile count: %d\n",
+				chip->sp->profile_count);
+		}
+	}
+
+	return rc;
+}
+
 static int qg_post_init(struct qpnp_qg *chip)
 {
 	u8 status = 0;
+	int rc = 0;
 
 	/* disable all IRQs if profile is not loaded */
 	if (!chip->profile_loaded) {
@@ -3455,6 +3619,14 @@ static int qg_post_init(struct qpnp_qg *chip)
 	/* read STATUS2 register to clear its last state */
 	qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
 
+	/*soh based multi profile init */
+	rc = qg_soh_batt_profile_init(chip);
+	if (rc < 0) {
+		pr_err("Failed to initialize battery based on soh rc=%d\n",
+								rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -3594,6 +3766,7 @@ static int qg_alg_init(struct qpnp_qg *chip)
 	cl->get_cc_soc = qg_get_cc_soc;
 	cl->get_learned_capacity = qg_get_learned_capacity;
 	cl->store_learned_capacity = qg_store_learned_capacity;
+	cl->ok_to_begin = qg_cl_ok_to_begin;
 	cl->data = chip;
 
 	rc = cap_learning_init(cl);
@@ -3734,6 +3907,7 @@ static int qg_parse_s2_dt(struct qpnp_qg *chip)
 #define DEFAULT_CL_MIN_LIM_DECIPERC	500
 #define DEFAULT_CL_MAX_LIM_DECIPERC	100
 #define DEFAULT_CL_DELTA_BATT_SOC	10
+#define DEFAULT_CL_WT_START_SOC		15
 static int qg_parse_cl_dt(struct qpnp_qg *chip)
 {
 	int rc;
@@ -3801,8 +3975,11 @@ static int qg_parse_cl_dt(struct qpnp_qg *chip)
 	of_property_read_u32(node, "qcom,cl-min-delta-batt-soc",
 				&chip->cl->dt.min_delta_batt_soc);
 
-	chip->cl->dt.cl_wt_enable = of_property_read_bool(node,
-						"qcom,cl-wt-enable");
+	if (of_property_read_bool(node, "qcom,cl-wt-enable")) {
+		chip->cl->dt.cl_wt_enable = true;
+		chip->cl->dt.min_start_soc = DEFAULT_CL_WT_START_SOC;
+		chip->cl->dt.max_start_soc = -EINVAL;
+	}
 
 	qg_dbg(chip, QG_DEBUG_PON, "DT: cl_min_start_soc=%d cl_max_start_soc=%d cl_min_temp=%d cl_max_temp=%d chip->cl->dt.cl_wt_enable=%d\n",
 		chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
@@ -3829,6 +4006,7 @@ static int qg_parse_cl_dt(struct qpnp_qg *chip)
 #define DEFAULT_SLEEP_TIME_SECS		1800 /* 30 mins */
 #define DEFAULT_SYS_MIN_VOLT_MV		2800
 #define DEFAULT_FVSS_VBAT_MV		3500
+#define DEFAULT_TCSS_ENTRY_SOC		90
 static int qg_parse_dt(struct qpnp_qg *chip)
 {
 	int rc = 0;
@@ -4072,6 +4250,21 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 			chip->dt.fvss_vbat_mv = temp;
 	}
 
+	if (of_property_read_bool(node, "qcom,tcss-enable")) {
+
+		chip->dt.tcss_enable = true;
+
+		rc = of_property_read_u32(node,
+				"qcom,tcss-entry-soc", &temp);
+		if (rc < 0)
+			chip->dt.tcss_entry_soc = DEFAULT_TCSS_ENTRY_SOC;
+		else
+			chip->dt.tcss_entry_soc = temp;
+	}
+
+	chip->dt.multi_profile_load = of_property_read_bool(node,
+					"qcom,multi-profile-load");
+
 	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
 			chip->dt.delta_soc, chip->dt.qg_ext_sense);
@@ -4374,6 +4567,7 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 	chip->soh = -EINVAL;
 	chip->esr_actual = -EINVAL;
 	chip->esr_nominal = -EINVAL;
+	chip->batt_age_level = -EINVAL;
 
 	qg_create_debugfs(chip);
 
@@ -4395,6 +4589,12 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 		return rc;
 	}
 
+	rc = qg_sdam_init(chip->dev);
+	if (rc < 0) {
+		pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = qg_setup_battery(chip);
 	if (rc < 0) {
 		pr_err("Failed to setup battery, rc=%d\n", rc);
@@ -4407,12 +4607,6 @@ static int qpnp_qg_probe(struct platform_device *pdev)
 		return rc;
 	}
 
-	rc = qg_sdam_init(chip->dev);
-	if (rc < 0) {
-		pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
-		return rc;
-	}
-
 	rc = qg_sanitize_sdam(chip);
 	if (rc < 0) {
 		pr_err("Failed to sanitize SDAM, rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-qnovo5.c b/drivers/power/supply/qcom/qpnp-qnovo5.c
index 0723340..e2a5a38 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo5.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo5.c
@@ -121,6 +121,7 @@ struct qnovo {
 	struct votable		*not_ok_to_qnovo_votable;
 	struct votable		*chg_ready_votable;
 	struct votable		*awake_votable;
+	struct votable		*cp_disable_votable;
 	struct work_struct	status_change_work;
 	struct delayed_work	usb_debounce_work;
 	int			base;
@@ -336,6 +337,7 @@ enum {
 	OK_TO_QNOVO,
 	QNOVO_ENABLE,
 	PT_ENABLE,
+	STANDALONE,
 	FV_REQUEST,
 	FCC_REQUEST,
 	PE_CTRL_REG,
@@ -666,9 +668,17 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
 			char *buf)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
-	int val = get_effective_result(chip->not_ok_to_qnovo_votable);
+	int val, cp_dis, not_ok =
+		get_effective_result(chip->not_ok_to_qnovo_votable);
+	struct votable *cp_disable_votable = find_votable("CP_DISABLE");
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", !val);
+	val = !not_ok;
+	if (cp_disable_votable) {
+		cp_dis = get_effective_result(cp_disable_votable);
+		val = val && cp_dis;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 static CLASS_ATTR_RO(ok_to_qnovo);
 
@@ -721,6 +731,38 @@ static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
 }
 static CLASS_ATTR_RW(pt_enable);
 
+static ssize_t standalone_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int val;
+	struct votable *cp_disable_votable = find_votable("CP_DISABLE");
+
+	if (!cp_disable_votable)
+		return -ENODEV;
+
+	val = get_client_vote(cp_disable_votable, QNOVO_VOTER);
+
+	return scnprintf(ubuf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t standalone_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	unsigned long val;
+	struct votable *cp_disable_votable;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	cp_disable_votable = find_votable("CP_DISABLE");
+	if (!cp_disable_votable)
+		return -ENODEV;
+
+	vote(cp_disable_votable, QNOVO_VOTER, !!val, 0);
+
+	return count;
+}
+static CLASS_ATTR_RW(standalone);
 
 static ssize_t val_show(struct class *c, struct class_attribute *attr,
 			char *ubuf)
@@ -1078,6 +1120,7 @@ static struct attribute *qnovo_class_attrs[] = {
 	[OK_TO_QNOVO]		= &class_attr_ok_to_qnovo.attr,
 	[QNOVO_ENABLE]		= &class_attr_qnovo_enable.attr,
 	[PT_ENABLE]		= &class_attr_pt_enable.attr,
+	[STANDALONE]		= &class_attr_standalone.attr,
 	[FV_REQUEST]		= &class_attr_fv_uV_request.attr,
 	[FCC_REQUEST]		= &class_attr_fcc_uA_request.attr,
 	[PE_CTRL_REG]		= &class_attr_PE_CTRL_REG.attr,
@@ -1131,6 +1174,7 @@ static void status_change_work(struct work_struct *work)
 	union power_supply_propval pval;
 	bool usb_present = false, hw_ok_to_qnovo = false;
 	int rc, battery_health, charge_status;
+	struct votable *cp_disable_votable = find_votable("CP_DISABLE");
 
 	if (is_usb_available(chip)) {
 		rc = power_supply_get_property(chip->usb_psy,
@@ -1144,6 +1188,9 @@ static void status_change_work(struct work_struct *work)
 		cancel_delayed_work_sync(&chip->usb_debounce_work);
 		vote(chip->awake_votable, USB_READY_VOTER, false, 0);
 		vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+		if (cp_disable_votable)
+			vote(cp_disable_votable, QNOVO_VOTER, false, 0);
+
 		if (chip->pinctrl) {
 			rc = pinctrl_select_state(chip->pinctrl,
 					chip->pinctrl_state1);
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 9a00920..684248a 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -303,11 +303,10 @@ static int smb5_chg_config_init(struct smb5 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
 	struct pmic_revid_data *pmic_rev_id;
-	struct device_node *revid_dev_node;
+	struct device_node *revid_dev_node, *node = chg->dev->of_node;
 	int rc = 0;
 
-	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
-					  "qcom,pmic-revid", 0);
+	revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
 	if (!revid_dev_node) {
 		pr_err("Missing qcom,pmic-revid property\n");
 		return -EINVAL;
@@ -326,20 +325,20 @@ static int smb5_chg_config_init(struct smb5 *chip)
 
 	switch (pmic_rev_id->pmic_subtype) {
 	case PM8150B_SUBTYPE:
-		chip->chg.smb_version = PM8150B_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM8150B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm8150b_charger";
 		chg->wa_flags |= CHG_TERMINATION_WA;
 		break;
 	case PM7250B_SUBTYPE:
-		chip->chg.smb_version = PM7250B_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM7250B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm7250b_charger";
 		chg->wa_flags |= CHG_TERMINATION_WA;
 		chg->uusb_moisture_protection_capable = true;
 		break;
 	case PM6150_SUBTYPE:
-		chip->chg.smb_version = PM6150_SUBTYPE;
+		chip->chg.chg_param.smb_version = PM6150_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm6150_charger";
 		chg->wa_flags |= SW_THERM_REGULATION_WA | CHG_TERMINATION_WA;
@@ -348,9 +347,10 @@ static int smb5_chg_config_init(struct smb5 *chip)
 		chg->main_fcc_max = PM6150_MAX_FCC_UA;
 		break;
 	case PMI632_SUBTYPE:
-		chip->chg.smb_version = PMI632_SUBTYPE;
+		chip->chg.chg_param.smb_version = PMI632_SUBTYPE;
 		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA
-				| CHG_TERMINATION_WA;
+				| CHG_TERMINATION_WA | USBIN_ADC_WA
+				| SKIP_MISC_PBS_IRQ_WA;
 		chg->param = smb5_pmi632_params;
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
@@ -378,6 +378,12 @@ static int smb5_chg_config_init(struct smb5 *chip)
 	chg->chg_freq.freq_below_otg_threshold	= 800;
 	chg->chg_freq.freq_above_otg_threshold	= 800;
 
+	if (of_property_read_bool(node, "qcom,disable-sw-thermal-regulation"))
+		chg->wa_flags &= ~SW_THERM_REGULATION_WA;
+
+	if (of_property_read_bool(node, "qcom,disable-fcc-restriction"))
+		chg->main_fcc_max = -EINVAL;
+
 out:
 	of_node_put(revid_dev_node);
 	return rc;
@@ -429,6 +435,8 @@ static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define DEFAULT_WD_BARK_TIME		64
 #define DEFAULT_WD_SNARL_TIME_8S	0x07
+#define DEFAULT_FCC_STEP_SIZE_UA	100000
+#define DEFAULT_FCC_STEP_UPDATE_DELAY_MS	1000
 static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 {
 	int rc = 0, byte_len;
@@ -565,6 +573,29 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 	chip->dt.adc_based_aicl = of_property_read_bool(node,
 					"qcom,adc-based-aicl");
 
+	of_property_read_u32(node, "qcom,fcc-step-delay-ms",
+					&chg->chg_param.fcc_step_delay_ms);
+	if (chg->chg_param.fcc_step_delay_ms <= 0)
+		chg->chg_param.fcc_step_delay_ms =
+					DEFAULT_FCC_STEP_UPDATE_DELAY_MS;
+
+	of_property_read_u32(node, "qcom,fcc-step-size-ua",
+					&chg->chg_param.fcc_step_size_ua);
+	if (chg->chg_param.fcc_step_size_ua <= 0)
+		chg->chg_param.fcc_step_size_ua = DEFAULT_FCC_STEP_SIZE_UA;
+
+	/*
+	 * If property is present parallel charging with CP is disabled
+	 * with HVDCP3 adapter.
+	 */
+	chg->hvdcp3_standalone_config = of_property_read_bool(node,
+					"qcom,hvdcp3-standalone-config");
+
+	of_property_read_u32(node, "qcom,hvdcp3-max-icl-ua",
+					&chg->chg_param.hvdcp3_max_icl_ua);
+	if (chg->chg_param.hvdcp3_max_icl_ua <= 0)
+		chg->chg_param.hvdcp3_max_icl_ua = MICRO_3PA;
+
 	return 0;
 }
 
@@ -637,7 +668,8 @@ static int smb5_parse_dt_currents(struct smb5 *chip, struct device_node *node)
 	rc = of_property_read_u32(node,
 				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+		chg->otg_cl_ua =
+			(chip->chg.chg_param.smb_version == PMI632_SUBTYPE) ?
 							MICRO_1PA : MICRO_3PA;
 
 	rc = of_property_read_u32(node, "qcom,chg-term-src",
@@ -1273,20 +1305,26 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	struct smb5 *chip = power_supply_get_drvdata(psy);
 	struct smb_charger *chg = &chip->chg;
 	union power_supply_propval pval = {0, };
-	int rc = 0;
+	int rc = 0, offset_ua = 0;
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		/* Adjust Main FCC for QC3.0 + SMB1390 */
+		rc = smblib_get_qc3_main_icl_offset(chg, &offset_ua);
+		if (rc < 0)
+			offset_ua = 0;
+
+		rc = smblib_set_charge_param(chg, &chg->param.fcc,
+						val->intval + offset_ua);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		rc = smblib_set_icl_current(chg, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_FLASH_ACTIVE:
-		if ((chg->smb_version == PMI632_SUBTYPE)
+		if ((chg->chg_param.smb_version == PMI632_SUBTYPE)
 				&& (chg->flash_active != val->intval)) {
 			chg->flash_active = val->intval;
 
@@ -1326,10 +1364,15 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
 		vote_override(chg->fcc_main_votable, CC_MODE_VOTER,
 				(val->intval < 0) ? false : true, val->intval);
+		/* Main FCC updated re-calculate FCC */
+		rerun_election(chg->fcc_votable);
 		break;
 	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
 		vote_override(chg->usb_icl_votable, CC_MODE_VOTER,
 				(val->intval < 0) ? false : true, val->intval);
+		/* Main ICL updated re-calculate ILIM */
+		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+			rerun_election(chg->fcc_votable);
 		break;
 	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
 		rc = smb5_set_prop_comp_clamp_level(chg, val);
@@ -1406,6 +1449,7 @@ static enum power_supply_property smb5_dc_props[] = {
 	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
 	POWER_SUPPLY_PROP_REAL_TYPE,
 	POWER_SUPPLY_PROP_DC_RESET,
+	POWER_SUPPLY_PROP_AICL_DONE,
 };
 
 static int smb5_dc_get_prop(struct power_supply *psy,
@@ -1436,7 +1480,7 @@ static int smb5_dc_get_prop(struct power_supply *psy,
 		rc = smblib_get_prop_dc_voltage_max(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_REAL_TYPE:
-		val->intval = POWER_SUPPLY_TYPE_WIPOWER;
+		val->intval = POWER_SUPPLY_TYPE_WIRELESS;
 		break;
 	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
 		rc = smblib_get_prop_voltage_wls_output(chg, val);
@@ -1444,6 +1488,9 @@ static int smb5_dc_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_DC_RESET:
 		val->intval = 0;
 		break;
+	case POWER_SUPPLY_PROP_AICL_DONE:
+		val->intval = chg->dcin_aicl_done;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1488,6 +1535,7 @@ static int smb5_dc_prop_is_writeable(struct power_supply *psy,
 {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		return 1;
 	default:
 		break;
@@ -1732,8 +1780,14 @@ static int smb5_batt_set_prop(struct power_supply *psy,
 		vote(chg->fv_votable, BATT_PROFILE_VOTER, true, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
-		vote(chg->fv_votable, QNOVO_VOTER, (val->intval >= 0),
-			val->intval);
+		if (val->intval == -EINVAL) {
+			vote(chg->fv_votable, BATT_PROFILE_VOTER, true,
+					chg->batt_profile_fv_uv);
+			vote(chg->fv_votable, QNOVO_VOTER, false, 0);
+		} else {
+			vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
+			vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
+		}
 		break;
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		chg->step_chg_enabled = !!val->intval;
@@ -2044,7 +2098,7 @@ static int smb5_configure_typec(struct smb_charger *chg)
 		return rc;
 	}
 
-	if (chg->smb_version != PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version != PMI632_SUBTYPE) {
 		rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
 				USBIN_IN_COLLAPSE_GF_SEL_MASK |
 				USBIN_AICL_STEP_TIMING_SEL_MASK,
@@ -2105,9 +2159,10 @@ static int smb5_configure_micro_usb(struct smb_charger *chg)
 		}
 
 		/* Disable periodic monitoring of CC_ID pin */
-		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
-			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
-			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
+		rc = smblib_write(chg,
+			((chg->chg_param.smb_version == PMI632_SUBTYPE) ?
+				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
 				rc);
@@ -2131,7 +2186,7 @@ static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
 	s16 raw_hi_thresh, raw_lo_thresh, max_limit_ma;
 	struct smb_charger *chg = &chip->chg;
 
-	if (chip->chg.smb_version == PMI632_SUBTYPE)
+	if (chip->chg.chg_param.smb_version == PMI632_SUBTYPE)
 		max_limit_ma = ITERM_LIMITS_PMI632_MA;
 	else
 		max_limit_ma = ITERM_LIMITS_PM8150B_MA;
@@ -2192,7 +2247,7 @@ static int smb5_configure_iterm_thresholds(struct smb5 *chip)
 
 	switch (chip->dt.term_current_src) {
 	case ITERM_SRC_ADC:
-		if (chip->chg.smb_version == PM8150B_SUBTYPE) {
+		if (chip->chg.chg_param.smb_version == PM8150B_SUBTYPE) {
 			rc = smblib_masked_write(chg, CHGR_ADC_TERM_CFG_REG,
 					TERM_BASED_ON_SYNC_CONV_OR_SAMPLE_CNT,
 					TERM_BASED_ON_SAMPLE_CNT);
@@ -2264,7 +2319,7 @@ static int smb5_init_dc_peripheral(struct smb_charger *chg)
 	int rc = 0;
 
 	/* PMI632 does not have DC peripheral */
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	/* Set DCIN ICL to 100 mA */
@@ -2402,7 +2457,7 @@ static int smb5_init_connector_type(struct smb_charger *chg)
 	 * PMI632 can have the connector type defined by a dedicated register
 	 * PMI632_TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
 	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		rc = smblib_read(chg, PMI632_TYPEC_MICRO_USB_MODE_REG, &val);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
@@ -2447,7 +2502,7 @@ static int smb5_init_connector_type(struct smb_charger *chg)
 	 *   boots with charger connected.
 	 * - Initialize flash module for PMI632
 	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		schgm_flash_init(chg);
 		smblib_rerun_apsd_if_required(chg);
 	}
@@ -2785,6 +2840,7 @@ static int smb5_determine_initial_status(struct smb5 *chip)
 		smblib_suspend_on_debug_battery(chg);
 
 	usb_plugin_irq_handler(0, &irq_data);
+	dc_plugin_irq_handler(0, &irq_data);
 	typec_attach_detach_irq_handler(0, &irq_data);
 	typec_state_change_irq_handler(0, &irq_data);
 	usb_source_change_irq_handler(0, &irq_data);
@@ -3307,6 +3363,7 @@ static int smb5_probe(struct platform_device *pdev)
 	chg->connector_health = -EINVAL;
 	chg->otg_present = false;
 	chg->main_fcc_max = -EINVAL;
+	mutex_init(&chg->adc_lock);
 
 	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
 	if (!chg->regmap) {
@@ -3403,9 +3460,10 @@ static int smb5_probe(struct platform_device *pdev)
 		}
 	}
 
-	switch (chg->smb_version) {
+	switch (chg->chg_param.smb_version) {
 	case PM8150B_SUBTYPE:
 	case PM6150_SUBTYPE:
+	case PM7250B_SUBTYPE:
 		rc = smb5_init_dc_psy(chip);
 		if (rc < 0) {
 			pr_err("Couldn't initialize dc psy rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index f363977..5bbaca7 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -236,19 +236,28 @@ struct smb1355 {
 	struct smb_params	param;
 
 	struct mutex		write_lock;
+	struct mutex		suspend_lock;
 
 	struct power_supply	*parallel_psy;
 	struct pmic_revid_data	*pmic_rev_id;
-
+	int			d_health;
 	int			c_health;
 	int			c_charger_temp_max;
 	int			die_temp_deciDegC;
 	int			suspended_usb_icl;
+	int			charge_type;
+	int			vbatt_uv;
+	int			fcc_ua;
 	bool			exit_die_temp;
 	struct delayed_work	die_temp_work;
 	bool			disabled;
+	bool			suspended;
+	bool			charging_enabled;
+	bool			pin_status;
 
 	struct votable		*irq_disable_votable;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
 };
 
 enum {
@@ -265,6 +274,27 @@ static bool is_secure(struct smb1355 *chip, int addr)
 	return (addr & 0xFF) >= 0xA0;
 }
 
+static bool is_voter_available(struct smb1355 *chip)
+{
+	if (!chip->fcc_votable) {
+		chip->fcc_votable = find_votable("FCC");
+		if (!chip->fcc_votable) {
+			pr_debug("Couldn't find FCC votable\n");
+			return false;
+		}
+	}
+
+	if (!chip->fv_votable) {
+		chip->fv_votable = find_votable("FV");
+		if (!chip->fv_votable) {
+			pr_debug("Couldn't find FV votable\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
 static int smb1355_read(struct smb1355 *chip, u16 addr, u8 *val)
 {
 	unsigned int temp;
@@ -589,6 +619,138 @@ static int smb1355_get_prop_health(struct smb1355 *chip, int type)
 	return POWER_SUPPLY_HEALTH_COOL;
 }
 
+static int smb1355_get_prop_voltage_max(struct smb1355 *chip,
+					union power_supply_propval *val)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = chip->vbatt_uv;
+		goto done;
+	}
+	rc = smb1355_get_charge_param(chip, &chip->param.ov, &val->intval);
+	if (rc < 0)
+		pr_err("failed to read vbatt rc=%d\n", rc);
+	else
+		chip->vbatt_uv = val->intval;
+done:
+	mutex_unlock(&chip->suspend_lock);
+	return rc;
+}
+
+static int smb1355_get_prop_constant_charge_current_max(struct smb1355 *chip,
+					union power_supply_propval *val)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = chip->fcc_ua;
+		goto done;
+	}
+	rc = smb1355_get_charge_param(chip, &chip->param.fcc, &val->intval);
+	if (rc < 0)
+		pr_err("failed to read fcc rc=%d\n", rc);
+	else
+		chip->fcc_ua = val->intval;
+done:
+	mutex_unlock(&chip->suspend_lock);
+	return rc;
+}
+
+static int smb1355_get_prop_health_value(struct smb1355 *chip,
+				union power_supply_propval *val, int type)
+{
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = (type == DIE_TEMP) ? chip->d_health :
+						chip->c_health;
+	} else {
+		val->intval = smb1355_get_prop_health(chip, (type == DIE_TEMP) ?
+						DIE_TEMP : CONNECTOR_TEMP);
+		if (type == DIE_TEMP)
+			chip->d_health = val->intval;
+		else
+			chip->c_health = val->intval;
+	}
+
+	mutex_unlock(&chip->suspend_lock);
+
+	return 0;
+}
+
+static int smb1355_get_prop_online(struct smb1355 *chip,
+					union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = chip->charging_enabled;
+		goto done;
+	}
+	rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		pr_err("failed to read BATTERY_STATUS_3_REG %d\n", rc);
+	} else {
+		val->intval = (bool)(stat & ENABLE_CHARGING_BIT);
+		chip->charging_enabled = val->intval;
+	}
+done:
+	mutex_unlock(&chip->suspend_lock);
+	return rc;
+}
+
+static int smb1355_get_prop_pin_enabled(struct smb1355 *chip,
+					union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = chip->pin_status;
+		goto done;
+	}
+	rc = smb1355_read(chip, BATTERY_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		pr_err("failed to read BATTERY_STATUS_2_REG %d\n", rc);
+	} else {
+		val->intval = !(stat & DISABLE_CHARGING_BIT);
+		chip->pin_status = val->intval;
+	}
+done:
+	mutex_unlock(&chip->suspend_lock);
+	return rc;
+}
+
+static int smb1355_get_prop_charge_type(struct smb1355 *chip,
+					union power_supply_propval *val)
+{
+	int rc = 0;
+
+	/*
+	 * In case of system suspend we should not allow
+	 * register reads and writes to the device as it
+	 * leads to i2c transaction failures.
+	 */
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		val->intval = chip->charge_type;
+		goto done;
+	}
+	rc = smb1355_get_prop_batt_charge_type(chip, val);
+	if (rc < 0)
+		pr_err("failed to read batt_charge_type %d\n", rc);
+	else
+		chip->charge_type = val->intval;
+done:
+	mutex_unlock(&chip->suspend_lock);
+	return rc;
+}
+
 #define MIN_PARALLEL_ICL_UA		250000
 #define SUSPEND_CURRENT_UA		2000
 static int smb1355_parallel_get_prop(struct power_supply *psy,
@@ -596,23 +758,18 @@ static int smb1355_parallel_get_prop(struct power_supply *psy,
 				     union power_supply_propval *val)
 {
 	struct smb1355 *chip = power_supply_get_drvdata(psy);
-	u8 stat;
 	int rc = 0;
 
 	switch (prop) {
 	case POWER_SUPPLY_PROP_CHARGE_TYPE:
-		rc = smb1355_get_prop_batt_charge_type(chip, val);
+		rc = smb1355_get_prop_charge_type(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
 	case POWER_SUPPLY_PROP_ONLINE:
-		rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
-		if (rc >= 0)
-			val->intval = (bool)(stat & ENABLE_CHARGING_BIT);
+		rc = smb1355_get_prop_online(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_PIN_ENABLED:
-		rc = smb1355_read(chip, BATTERY_STATUS_2_REG, &stat);
-		if (rc >= 0)
-			val->intval = !(stat & DISABLE_CHARGING_BIT);
+		rc = smb1355_get_prop_pin_enabled(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
 		val->intval = chip->die_temp_deciDegC;
@@ -634,12 +791,10 @@ static int smb1355_parallel_get_prop(struct power_supply *psy,
 		val->intval = chip->disabled;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
-		rc = smb1355_get_charge_param(chip, &chip->param.ov,
-						&val->intval);
+		rc = smb1355_get_prop_voltage_max(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-		rc = smb1355_get_charge_param(chip, &chip->param.fcc,
-						&val->intval);
+		rc = smb1355_get_prop_constant_charge_current_max(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_MODEL_NAME:
 		val->strval = chip->name;
@@ -649,13 +804,13 @@ static int smb1355_parallel_get_prop(struct power_supply *psy,
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		if (chip->c_health == -EINVAL)
-			val->intval = smb1355_get_prop_health(chip,
-						CONNECTOR_TEMP);
+			rc = smb1355_get_prop_health_value(chip, val,
+							CONNECTOR_TEMP);
 		else
 			val->intval = chip->c_health;
 		break;
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
-		val->intval = smb1355_get_prop_health(chip, DIE_TEMP);
+		rc = smb1355_get_prop_health_value(chip, val, DIE_TEMP);
 		break;
 	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
 		val->intval = chip->dt.pl_batfet_mode;
@@ -816,6 +971,11 @@ static int smb1355_parallel_set_prop(struct power_supply *psy,
 	struct smb1355 *chip = power_supply_get_drvdata(psy);
 	int rc = 0;
 
+	mutex_lock(&chip->suspend_lock);
+	if (chip->suspended) {
+		pr_debug("parallel power supply set prop %d\n", prop);
+		goto done;
+	}
 	switch (prop) {
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smb1355_set_parallel_charging(chip, (bool)val->intval);
@@ -845,9 +1005,10 @@ static int smb1355_parallel_set_prop(struct power_supply *psy,
 	default:
 		pr_debug("parallel power supply set prop %d not supported\n",
 			prop);
-		return -EINVAL;
+		rc = -EINVAL;
 	}
-
+done:
+	mutex_unlock(&chip->suspend_lock);
 	return rc;
 }
 
@@ -1387,8 +1548,10 @@ static int smb1355_probe(struct platform_device *pdev)
 	chip->dev = &pdev->dev;
 	chip->param = v1_params;
 	chip->c_health = -EINVAL;
+	chip->d_health = -EINVAL;
 	chip->c_charger_temp_max = -EINVAL;
 	mutex_init(&chip->write_lock);
+	mutex_init(&chip->suspend_lock);
 	INIT_DELAYED_WORK(&chip->die_temp_work, die_temp_work);
 	chip->disabled = false;
 	chip->die_temp_deciDegC = -EINVAL;
@@ -1484,9 +1647,49 @@ static void smb1355_shutdown(struct platform_device *pdev)
 	smb1355_clk_request(chip, false);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int smb1355_suspend(struct device *dev)
+{
+	struct smb1355 *chip = dev_get_drvdata(dev);
+
+	cancel_delayed_work_sync(&chip->die_temp_work);
+
+	mutex_lock(&chip->suspend_lock);
+	chip->suspended = true;
+	mutex_unlock(&chip->suspend_lock);
+
+	return 0;
+}
+
+static int smb1355_resume(struct device *dev)
+{
+	struct smb1355 *chip = dev_get_drvdata(dev);
+
+	mutex_lock(&chip->suspend_lock);
+	chip->suspended = false;
+	mutex_unlock(&chip->suspend_lock);
+
+	/*
+	 * During suspend i2c failures are fixed by reporting cached
+	 * chip state, to report correct values we need to invoke
+	 * callbacks for the fcc and fv votables. To avoid excessive
+	 * invokes to callbacks invoke only when smb1355 is enabled.
+	 */
+	if (is_voter_available(chip) && chip->charging_enabled) {
+		rerun_election(chip->fcc_votable);
+		rerun_election(chip->fv_votable);
+	}
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(smb1355_pm_ops, smb1355_suspend, smb1355_resume);
+
 static struct platform_driver smb1355_driver = {
 	.driver	= {
 		.name		= "qcom,smb1355-charger",
+		.pm		= &smb1355_pm_ops,
 		.of_match_table	= match_table,
 	},
 	.probe		= smb1355_probe,
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index baa51f0..7e0a399 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -180,6 +180,7 @@ struct smb1390 {
 	struct votable		*cp_awake_votable;
 	struct votable		*slave_disable_votable;
 	struct votable		*usb_icl_votable;
+	struct votable		*fcc_main_votable;
 
 	/* power supplies */
 	struct power_supply	*cps_psy;
@@ -204,6 +205,7 @@ struct smb1390 {
 	u32			max_temp_alarm_degc;
 	u32			max_cutoff_soc;
 	u32			pl_output_mode;
+	u32			pl_input_mode;
 	u32			cp_role;
 	enum isns_mode		current_capability;
 	bool			batt_soc_validated;
@@ -875,13 +877,38 @@ static int smb1390_slave_disable_vote_cb(struct votable *votable, void *data,
 			      int disable, const char *client)
 {
 	struct smb1390 *chip = data;
-	int rc;
+	int rc = 0, ilim_ua = 0;
 
 	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, CMD_EN_SL_BIT,
 					disable ? 0 : CMD_EN_SL_BIT);
-	if (rc < 0)
+	if (rc < 0) {
 		pr_err("Couldn't %s slave rc=%d\n",
 				disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	/* Re-distribute ILIM to Master CP when Slave is disabled */
+	if (disable && (chip->ilim_votable)) {
+		ilim_ua = get_effective_result_locked(chip->ilim_votable);
+		if (ilim_ua > MAX_ILIM_UA)
+			ilim_ua = MAX_ILIM_UA;
+
+		if (ilim_ua < 500000) {
+			smb1390_dbg(chip, PR_INFO, "ILIM too low, not re-distributing, ilim=%duA\n",
+								ilim_ua);
+			return 0;
+		}
+
+		rc = smb1390_set_ilim(chip,
+		      DIV_ROUND_CLOSEST(ilim_ua - 500000, 100000));
+		if (rc < 0) {
+			pr_err("Failed to set ILIM, rc=%d\n", rc);
+			return rc;
+		}
+
+		smb1390_dbg(chip, PR_INFO, "Master ILIM set to %duA\n",
+								ilim_ua);
+	}
 
 	return rc;
 }
@@ -892,6 +919,7 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 	struct smb1390 *chip = data;
 	union power_supply_propval pval = {0, };
 	int rc = 0;
+	bool slave_enabled = false;
 
 	if (!is_psy_voter_available(chip) || chip->suspended)
 		return -EAGAIN;
@@ -910,7 +938,17 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 			ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, true, 0);
 	} else {
+		/* Disable Slave CP if ILIM is < 2 * min ILIM */
 		if (is_cps_available(chip)) {
+			vote(chip->slave_disable_votable, ILIM_VOTER,
+				(ilim_uA < (2 * chip->min_ilim_ua)), 0);
+
+			if (get_effective_result(chip->slave_disable_votable)
+									== 0)
+				slave_enabled = true;
+		}
+
+		if (slave_enabled) {
 			ilim_uA /= 2;
 			pval.intval = DIV_ROUND_CLOSEST(ilim_uA - 500000,
 					100000);
@@ -929,7 +967,8 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 			return rc;
 		}
 
-		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA\n", ilim_uA);
+		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA slave_enabled%d\n",
+						ilim_uA, slave_enabled);
 		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
 
@@ -994,19 +1033,53 @@ static int smb1390_notifier_cb(struct notifier_block *nb,
 #define ILIM_NR			10
 #define ILIM_DR			8
 #define ILIM_FACTOR(ilim)	((ilim * ILIM_NR) / ILIM_DR)
+
+static void smb1390_configure_ilim(struct smb1390 *chip, int mode)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	/* PPS adapter reply on the current advertised by the adapter */
+	if ((chip->pl_output_mode == POWER_SUPPLY_PL_OUTPUT_VPH)
+			&& (mode == POWER_SUPPLY_CP_PPS)) {
+		rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get PD CURRENT MAX rc=%d\n", rc);
+		else
+			vote(chip->ilim_votable, ICL_VOTER,
+					true, ILIM_FACTOR(pval.intval));
+	}
+
+	/* QC3.0/Wireless adapter rely on the settled AICL for USBMID_USBMID */
+	if ((chip->pl_input_mode == POWER_SUPPLY_PL_USBMID_USBMID)
+			&& (mode == POWER_SUPPLY_CP_HVDCP3)) {
+		rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get usb aicl rc=%d\n", rc);
+		else
+			vote(chip->ilim_votable, ICL_VOTER, true, pval.intval);
+	}
+}
+
 static void smb1390_status_change_work(struct work_struct *work)
 {
 	struct smb1390 *chip = container_of(work, struct smb1390,
 					    status_change_work);
 	union power_supply_propval pval = {0, };
-	int rc;
+	int rc, dc_current_max = 0;
 
 	if (!is_psy_voter_available(chip))
 		goto out;
 
-	if (!smb1390_is_adapter_cc_mode(chip))
-		vote(chip->disable_votable, SOC_LEVEL_VOTER,
-		     smb1390_is_batt_soc_valid(chip) ? false : true, 0);
+	/*
+	 * If batt soc is not valid upon bootup, but becomes
+	 * valid due to the battery discharging later, remove
+	 * vote from SOC_LEVEL_VOTER.
+	 */
+	if (smb1390_is_batt_soc_valid(chip))
+		vote(chip->disable_votable, SOC_LEVEL_VOTER, false, 0);
 
 	rc = power_supply_get_property(chip->usb_psy,
 			POWER_SUPPLY_PROP_SMB_EN_MODE, &pval);
@@ -1023,6 +1096,12 @@ static void smb1390_status_change_work(struct work_struct *work)
 			goto out;
 		}
 
+		/*
+		 * Slave SMB1390 is not required for the power-rating of QC3
+		 */
+		if (pval.intval != POWER_SUPPLY_CP_HVDCP3)
+			vote(chip->slave_disable_votable, SRC_VOTER, false, 0);
+
 		/* Check for SOC threshold only once before enabling CP */
 		vote(chip->disable_votable, SRC_VOTER, false, 0);
 		if (!chip->batt_soc_validated) {
@@ -1036,35 +1115,24 @@ static void smb1390_status_change_work(struct work_struct *work)
 			vote(chip->ilim_votable, ICL_VOTER, false, 0);
 			rc = power_supply_get_property(chip->dc_psy,
 					POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
-			if (rc < 0)
+			if (rc < 0) {
 				pr_err("Couldn't get dc icl rc=%d\n", rc);
-			else
-				vote(chip->ilim_votable, WIRELESS_VOTER, true,
-						pval.intval);
+			} else {
+				dc_current_max = pval.intval;
+
+				rc = power_supply_get_property(chip->dc_psy,
+						POWER_SUPPLY_PROP_AICL_DONE,
+						&pval);
+				if (rc < 0)
+					pr_err("Couldn't get aicl done rc=%d\n",
+							rc);
+				else if (pval.intval)
+					vote(chip->ilim_votable, WIRELESS_VOTER,
+							true, dc_current_max);
+			}
 		} else {
 			vote(chip->ilim_votable, WIRELESS_VOTER, false, 0);
-			if ((chip->pl_output_mode == POWER_SUPPLY_PL_OUTPUT_VPH)
-				&& (pval.intval == POWER_SUPPLY_CP_PPS)) {
-				rc = power_supply_get_property(chip->usb_psy,
-					POWER_SUPPLY_PROP_PD_CURRENT_MAX,
-					&pval);
-				if (rc < 0)
-					pr_err("Couldn't get PD CURRENT MAX rc=%d\n",
-							rc);
-				else
-					vote(chip->ilim_votable, ICL_VOTER,
-						true, ILIM_FACTOR(pval.intval));
-			} else {
-				rc = power_supply_get_property(chip->usb_psy,
-					POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
-					&pval);
-				if (rc < 0)
-					pr_err("Couldn't get usb aicl rc=%d\n",
-							rc);
-				else
-					vote(chip->ilim_votable, ICL_VOTER,
-							true, pval.intval);
-			}
+			smb1390_configure_ilim(chip, pval.intval);
 		}
 
 		/*
@@ -1101,6 +1169,7 @@ static void smb1390_status_change_work(struct work_struct *work)
 		}
 	} else {
 		chip->batt_soc_validated = false;
+		vote(chip->slave_disable_votable, SRC_VOTER, true, 0);
 		vote(chip->disable_votable, SRC_VOTER, true, 0);
 		vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->fcc_votable, CP_VOTER, false, 0);
@@ -1136,11 +1205,14 @@ static int smb1390_validate_slave_chg_taper(struct smb1390 *chip, int fcc_uA)
 		smb1390_dbg(chip, PR_INFO, "Set Master ILIM to MAX, post Slave disable in taper, fcc=%d\n",
 									fcc_uA);
 		vote_override(chip->ilim_votable, CC_MODE_VOTER,
-						true, MAX_ILIM_DUAL_CP_UA);
+				smb1390_is_adapter_cc_mode(chip),
+				MAX_ILIM_DUAL_CP_UA);
+
 		if (chip->usb_icl_votable)
 			vote_override(chip->usb_icl_votable,
 				      TAPER_MAIN_ICL_LIMIT_VOTER,
-				      true, chip->cc_mode_taper_main_icl_ua);
+				      smb1390_is_adapter_cc_mode(chip),
+				      chip->cc_mode_taper_main_icl_ua);
 	}
 
 	return rc;
@@ -1150,11 +1222,20 @@ static void smb1390_taper_work(struct work_struct *work)
 {
 	struct smb1390 *chip = container_of(work, struct smb1390, taper_work);
 	union power_supply_propval pval = {0, };
-	int rc, fcc_uA, delta_fcc_uA;
+	int rc, fcc_uA, delta_fcc_uA, main_fcc_ua = 0;
 
 	if (!is_psy_voter_available(chip))
 		goto out;
 
+	if (!chip->fcc_main_votable)
+		chip->fcc_main_votable = find_votable("FCC_MAIN");
+
+	if (chip->fcc_main_votable)
+		main_fcc_ua = get_effective_result(chip->fcc_main_votable);
+
+	if (main_fcc_ua < 0)
+		main_fcc_ua = 0;
+
 	chip->taper_entry_fv = get_effective_result(chip->fv_votable);
 	while (true) {
 		rc = power_supply_get_property(chip->batt_psy,
@@ -1183,14 +1264,15 @@ static void smb1390_taper_work(struct work_struct *work)
 			smb1390_dbg(chip, PR_INFO, "taper work reducing FCC to %duA\n",
 				fcc_uA);
 			vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
-			rc = smb1390_validate_slave_chg_taper(chip, fcc_uA);
+			rc = smb1390_validate_slave_chg_taper(chip, (fcc_uA -
+							      main_fcc_ua));
 			if (rc < 0) {
 				pr_err("Couldn't Disable slave in Taper, rc=%d\n",
 				       rc);
 				goto out;
 			}
 
-			if (fcc_uA < (chip->min_ilim_ua * 2)) {
+			if ((fcc_uA - main_fcc_ua) < (chip->min_ilim_ua * 2)) {
 				vote(chip->disable_votable, TAPER_END_VOTER,
 								true, 0);
 				/*
@@ -1231,6 +1313,7 @@ static enum power_supply_property smb1390_charge_pump_props[] = {
 	POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE,
 	POWER_SUPPLY_PROP_MIN_ICL,
 	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
 };
 
 static int smb1390_get_prop(struct power_supply *psy,
@@ -1330,6 +1413,9 @@ static int smb1390_get_prop(struct power_supply *psy,
 		val->strval = (chip->pmic_rev_id->rev4 > 2) ? "SMB1390_V3" :
 								"SMB1390_V2";
 		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = chip->pl_input_mode;
+		break;
 	default:
 		smb1390_dbg(chip, PR_MISC, "charge pump power supply get prop %d not supported\n",
 			prop);
@@ -1360,7 +1446,7 @@ static int smb1390_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CP_ILIM:
 		if (chip->ilim_votable)
 			vote_override(chip->ilim_votable, CC_MODE_VOTER,
-							true, val->intval);
+					(val->intval > 0), val->intval);
 		break;
 	default:
 		smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
@@ -1456,7 +1542,13 @@ static int smb1390_parse_dt(struct smb1390 *chip)
 	of_property_read_u32(chip->dev->of_node, "qcom,parallel-output-mode",
 			&chip->pl_output_mode);
 
-	chip->cp_slave_thr_taper_ua = chip->min_ilim_ua * 3;
+	/* Default parallel input configuration is USBMID connection */
+	chip->pl_input_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+	of_property_read_u32(chip->dev->of_node, "qcom,parallel-input-mode",
+			&chip->pl_input_mode);
+
+	chip->cp_slave_thr_taper_ua = smb1390_is_adapter_cc_mode(chip) ?
+			(3 * chip->min_ilim_ua) : (4 * chip->min_ilim_ua);
 	of_property_read_u32(chip->dev->of_node, "qcom,cp-slave-thr-taper-ua",
 			      &chip->cp_slave_thr_taper_ua);
 
@@ -1493,6 +1585,8 @@ static int smb1390_create_votables(struct smb1390 *chip)
 	if (IS_ERR(chip->slave_disable_votable))
 		return PTR_ERR(chip->slave_disable_votable);
 
+	/* Keep slave SMB disabled */
+	vote(chip->slave_disable_votable, SRC_VOTER, true, 0);
 	/*
 	 * charge pump is initially disabled; this indirectly votes to allow
 	 * traditional parallel charging if present
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2cf5cdb..db4f93b 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -16,7 +16,6 @@
 #include <linux/ktime.h>
 #include "smb5-lib.h"
 #include "smb5-reg.h"
-#include "battery.h"
 #include "schgm-flash.h"
 #include "step-chg-jeita.h"
 #include "storm-watch.h"
@@ -756,7 +755,7 @@ static int smblib_usb_pd_adapter_allowance_override(struct smb_charger *chg,
 {
 	int rc = 0;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_OVERRIDE_REG,
@@ -803,7 +802,7 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
 	int rc, aicl_threshold;
 	u8 vbus_allowance;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	if (chg->pd_active == POWER_SUPPLY_PD_PPS_ACTIVE) {
@@ -832,10 +831,10 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
 	if (vbus_allowance != CONTINUOUS)
 		return 0;
 
+	aicl_threshold = min_allowed_uv / 1000 - CONT_AICL_HEADROOM_MV;
 	if (chg->adapter_cc_mode)
-		aicl_threshold = AICL_THRESHOLD_MV_IN_CC;
-	else
-		aicl_threshold = min_allowed_uv / 1000 - CONT_AICL_HEADROOM_MV;
+		aicl_threshold = min(aicl_threshold, AICL_THRESHOLD_MV_IN_CC);
+
 	rc = smblib_set_charge_param(chg, &chg->param.aicl_cont_threshold,
 							aicl_threshold);
 	if (rc < 0) {
@@ -873,6 +872,37 @@ int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
 /********************
  * HELPER FUNCTIONS *
  ********************/
+static bool is_cp_available(struct smb_charger *chg)
+{
+	if (!chg->cp_psy)
+		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
+
+	return !!chg->cp_psy;
+}
+
+#define CP_TO_MAIN_ICL_OFFSET_PC		10
+int smblib_get_qc3_main_icl_offset(struct smb_charger *chg, int *offset_ua)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if ((chg->real_charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3)
+		|| chg->hvdcp3_standalone_config || !is_cp_available(chg)) {
+		*offset_ua = 0;
+		return 0;
+	}
+
+	rc = power_supply_get_property(chg->cp_psy, POWER_SUPPLY_PROP_CP_ILIM,
+					&pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get CP ILIM rc=%d\n", rc);
+		return rc;
+	}
+
+	*offset_ua = (pval.intval * CP_TO_MAIN_ICL_OFFSET_PC * 2) / 100;
+
+	return 0;
+}
 
 int smblib_get_prop_from_bms(struct smb_charger *chg,
 				enum power_supply_property psp,
@@ -1052,6 +1082,11 @@ static int smblib_notifier_call(struct notifier_block *nb,
 		schedule_work(&chg->pl_update_work);
 	}
 
+	if (!strcmp(psy->desc->name, "charge_pump_master")) {
+		pm_stay_awake(chg->dev);
+		schedule_work(&chg->cp_status_change_work);
+	}
+
 	return NOTIFY_OK;
 }
 
@@ -1329,17 +1364,6 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 	/* suspend if 25mA or less is requested */
 	bool suspend = (icl_ua <= USBIN_25MA);
 
-	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC) {
-		rc = smblib_masked_write(chg, USB_CMD_PULLDOWN_REG,
-				EN_PULLDOWN_USB_IN_BIT,
-				suspend ? 0 : EN_PULLDOWN_USB_IN_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't write %s to EN_PULLDOWN_USB_IN_BIT rc=%d\n",
-				suspend ? "disable" : "enable", rc);
-			goto out;
-		}
-	}
-
 	if (suspend)
 		return smblib_set_usb_suspend(chg, true);
 
@@ -1428,6 +1452,11 @@ int smblib_get_irq_status(struct smb_charger *chg,
 	int rc;
 	u8 reg;
 
+	if (chg->wa_flags & SKIP_MISC_PBS_IRQ_WA) {
+		val->intval = 0;
+		return 0;
+	}
+
 	mutex_lock(&chg->irq_status_lock);
 	/* Report and clear cached status */
 	val->intval = chg->irq_status;
@@ -1485,8 +1514,8 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
 
 		/* Set 1% duty cycle on ID detection */
 		rc = smblib_masked_write(chg,
-				((chg->smb_version == PMI632_SUBTYPE) ?
-				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				((chg->chg_param.smb_version == PMI632_SUBTYPE)
+				? PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 				TYPEC_U_USB_WATER_PROTECTION_CFG_REG),
 				EN_MICRO_USB_WATER_PROTECTION_BIT |
 				MICRO_USB_DETECTION_ON_TIME_CFG_MASK |
@@ -1518,8 +1547,9 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
 		}
 
 		/* Disable periodic monitoring of CC_ID pin */
-		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
-				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+		rc = smblib_write(chg,
+				((chg->chg_param.smb_version == PMI632_SUBTYPE)
+				? PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 				TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't disable 1 percent CC_ID duty cycle rc=%d\n",
@@ -1567,7 +1597,7 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
 {
 	struct smb_charger *chg = data;
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		return 0;
 
 	/* resume input if suspend is invalid */
@@ -1880,6 +1910,16 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
 	u8 stat;
 	int rc, suspend = 0;
 
+	rc = smblib_get_prop_from_bms(chg,
+			POWER_SUPPLY_PROP_DEBUG_BATTERY, &pval);
+	if (rc < 0) {
+		pr_err_ratelimited("Couldn't get debug battery prop rc=%d\n",
+				rc);
+	} else if (pval.intval == 1) {
+		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+		return 0;
+	}
+
 	if (chg->dbc_usbov) {
 		rc = smblib_get_prop_usb_present(chg, &pval);
 		if (rc < 0) {
@@ -1971,7 +2011,7 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
 	 * If charge termination WA is active and has suspended charging, then
 	 * continue reporting charging status as FULL.
 	 */
-	if (is_client_vote_enabled(chg->usb_icl_votable,
+	if (is_client_vote_enabled_locked(chg->usb_icl_votable,
 						CHG_TERMINATION_VOTER)) {
 		val->intval = POWER_SUPPLY_STATUS_FULL;
 		return 0;
@@ -2157,7 +2197,7 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg,
 	temp = buf[1] | (buf[0] << 8);
 	temp = sign_extend32(temp, 15);
 
-	if (chg->smb_version == PMI632_SUBTYPE)
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 		temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PMI632_MA,
 					ADC_CHG_ITERM_MASK);
 	else
@@ -2676,10 +2716,7 @@ static int smblib_update_thermal_readings(struct smb_charger *chg)
 	}
 
 	if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
-		if (!chg->cp_psy)
-			chg->cp_psy =
-				power_supply_get_by_name("charge_pump_master");
-		if (chg->cp_psy) {
+		if (is_cp_available(chg)) {
 			rc = power_supply_get_property(chg->cp_psy,
 				POWER_SUPPLY_PROP_CP_DIE_TEMP, &pval);
 			if (rc < 0) {
@@ -2896,7 +2933,7 @@ int smblib_get_prop_dc_present(struct smb_charger *chg,
 	int rc;
 	u8 stat;
 
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		val->intval = 0;
 		return 0;
 	}
@@ -2917,7 +2954,7 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
 	int rc = 0;
 	u8 stat;
 
-	if (chg->smb_version == PMI632_SUBTYPE) {
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE) {
 		val->intval = 0;
 		return 0;
 	}
@@ -2957,7 +2994,23 @@ int smblib_get_prop_dc_current_max(struct smb_charger *chg,
 int smblib_get_prop_dc_voltage_max(struct smb_charger *chg,
 				    union power_supply_propval *val)
 {
+	int rc;
 	val->intval = MICRO_12V;
+
+	if (!chg->wls_psy)
+		chg->wls_psy = power_supply_get_by_name("wireless");
+
+	if (chg->wls_psy) {
+		rc = power_supply_get_property(chg->wls_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MAX,
+				val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't get VOLTAGE_MAX, rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
 	return 0;
 }
 
@@ -2991,6 +3044,7 @@ int smblib_get_prop_dc_voltage_now(struct smb_charger *chg,
 int smblib_set_prop_dc_current_max(struct smb_charger *chg,
 				    const union power_supply_propval *val)
 {
+	chg->dcin_icl_user_set = true;
 	return smblib_set_charge_param(chg, &chg->param.dc_icl, val->intval);
 }
 
@@ -3017,10 +3071,11 @@ int smblib_set_prop_voltage_wls_output(struct smb_charger *chg,
 
 	/*
 	 * When WLS VOUT goes down, the power-constrained adaptor may be able
-	 * to supply more current, so allow it to do so.
+	 * to supply more current, so allow it to do so - unless userspace has
+	 * changed DCIN ICL value already due to thermal considerations.
 	 */
-	if ((val->intval > 0) && (val->intval < chg->last_wls_vout)) {
-		/* Rerun AICL once after 10 s */
+	if (!chg->dcin_icl_user_set && (val->intval > 0) &&
+			(val->intval < chg->last_wls_vout)) {
 		alarm_start_relative(&chg->dcin_aicl_alarm,
 				ms_to_ktime(DCIN_AICL_RERUN_DELAY_MS));
 	}
@@ -3112,7 +3167,7 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
 		return rc;
 	}
 
-	if (is_client_vote_enabled(chg->usb_icl_votable,
+	if (is_client_vote_enabled_locked(chg->usb_icl_votable,
 					CHG_TERMINATION_VOTER)) {
 		rc = smblib_get_prop_usb_present(chg, val);
 		return rc;
@@ -3171,7 +3226,7 @@ int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
 		/* else, fallthrough */
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
 		else
 			val->intval = MICRO_12V;
@@ -3199,7 +3254,7 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 		}
 		/* else, fallthrough */
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
 		else
 			val->intval = MICRO_12V;
@@ -3284,12 +3339,51 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 				    union power_supply_propval *val)
 {
 	union power_supply_propval pval = {0, };
-	int rc;
+	int rc, ret = 0;
+	u8 reg;
+
+	mutex_lock(&chg->adc_lock);
+
+	if (chg->wa_flags & USBIN_ADC_WA) {
+		/* Store ADC channel config in order to restore later */
+		rc = smblib_read(chg, BATIF_ADC_CHANNEL_EN_REG, &reg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read ADC config rc=%d\n", rc);
+			ret = rc;
+			goto unlock;
+		}
+
+		/* Disable all ADC channels except IBAT channel */
+		rc = smblib_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+						IBATT_CHANNEL_EN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable ADC channels rc=%d\n",
+						rc);
+			ret = rc;
+			goto unlock;
+		}
+	}
 
 	rc = smblib_get_prop_usb_present(chg, &pval);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't get usb presence status rc=%d\n", rc);
-		return -ENODATA;
+		ret = -ENODATA;
+		goto restore_adc_config;
+	}
+
+	/*
+	 * Skip reading voltage only if USB is not present and we are not in
+	 * OTG mode.
+	 */
+	if (!pval.intval) {
+		rc = smblib_read(chg, DCDC_CMD_OTG_REG, &reg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read CMD_OTG rc=%d", rc);
+			goto restore_adc_config;
+		}
+
+		if (!(reg & OTG_EN_BIT))
+			goto restore_adc_config;
 	}
 
 	/*
@@ -3297,10 +3391,28 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 	 * to occur randomly in the USBIN channel, particularly at high
 	 * voltages.
 	 */
-	if (chg->smb_version == PM8150B_SUBTYPE && pval.intval)
-		return smblib_read_mid_voltage_chan(chg, val);
+	if (chg->chg_param.smb_version == PM8150B_SUBTYPE)
+		rc = smblib_read_mid_voltage_chan(chg, val);
 	else
-		return smblib_read_usbin_voltage_chan(chg, val);
+		rc = smblib_read_usbin_voltage_chan(chg, val);
+	if (rc < 0) {
+		smblib_err(chg, "Failed to read USBIN over vadc, rc=%d\n", rc);
+		ret = rc;
+	}
+
+restore_adc_config:
+	 /* Restore ADC channel config */
+	if (chg->wa_flags & USBIN_ADC_WA) {
+		rc = smblib_write(chg, BATIF_ADC_CHANNEL_EN_REG, reg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't write ADC config rc=%d\n",
+						rc);
+	}
+
+unlock:
+	mutex_unlock(&chg->adc_lock);
+
+	return ret;
 }
 
 int smblib_get_prop_vph_voltage_now(struct smb_charger *chg,
@@ -3584,6 +3696,7 @@ int smblib_get_prop_typec_power_role(struct smb_charger *chg,
 		return -EINVAL;
 	}
 
+	chg->power_role = val->intval;
 	return rc;
 }
 
@@ -3649,7 +3762,7 @@ int smblib_get_prop_usb_current_now(struct smb_charger *chg,
 		 * For PMI632, scaling factor = reciprocal of
 		 * 0.4V/A in Buck mode, 0.8V/A in Boost mode.
 		 */
-		switch (chg->smb_version) {
+		switch (chg->chg_param.smb_version) {
 		case PMI632_SUBTYPE:
 			buck_scale = 40;
 			boost_scale = 80;
@@ -3970,7 +4083,7 @@ int smblib_get_prop_connector_health(struct smb_charger *chg)
 	 * In PM8150B, SKIN channel measures Wireless charger receiver
 	 * temp, used to regulate DC ICL.
 	 */
-	if (chg->smb_version == PM8150B_SUBTYPE && dc_present)
+	if (chg->chg_param.smb_version == PM8150B_SUBTYPE && dc_present)
 		return smblib_get_skin_temp_status(chg);
 
 	return POWER_SUPPLY_HEALTH_COOL;
@@ -4172,15 +4285,88 @@ int smblib_set_prop_usb_voltage_max_limit(struct smb_charger *chg,
 	return 0;
 }
 
+void smblib_typec_irq_config(struct smb_charger *chg, bool en)
+{
+	if (en == chg->typec_irq_en)
+		return;
+
+	if (en) {
+		enable_irq(
+			chg->irq_info[TYPEC_ATTACH_DETACH_IRQ].irq);
+		enable_irq(
+			chg->irq_info[TYPEC_CC_STATE_CHANGE_IRQ].irq);
+		enable_irq(
+			chg->irq_info[TYPEC_OR_RID_DETECTION_CHANGE_IRQ].irq);
+	} else {
+		disable_irq_nosync(
+			chg->irq_info[TYPEC_ATTACH_DETACH_IRQ].irq);
+		disable_irq_nosync(
+			chg->irq_info[TYPEC_CC_STATE_CHANGE_IRQ].irq);
+		disable_irq_nosync(
+			chg->irq_info[TYPEC_OR_RID_DETECTION_CHANGE_IRQ].irq);
+	}
+
+	chg->typec_irq_en = en;
+}
+
+#define PR_LOCK_TIMEOUT_MS	1000
 int smblib_set_prop_typec_power_role(struct smb_charger *chg,
 				     const union power_supply_propval *val)
 {
 	int rc = 0;
 	u8 power_role;
+	enum power_supply_typec_mode typec_mode;
+	bool snk_attached = false, src_attached = false, is_pr_lock = false;
 
 	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		return 0;
 
+	smblib_dbg(chg, PR_MISC, "power role change: %d --> %d!",
+			chg->power_role, val->intval);
+
+	if (chg->power_role == val->intval) {
+		smblib_dbg(chg, PR_MISC, "power role already in %d, ignore!",
+				chg->power_role);
+		return 0;
+	}
+
+	typec_mode = smblib_get_prop_typec_mode(chg);
+	if (typec_mode >= POWER_SUPPLY_TYPEC_SINK &&
+			typec_mode <= POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER)
+		snk_attached = true;
+	else if (typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT &&
+			typec_mode <= POWER_SUPPLY_TYPEC_SOURCE_HIGH)
+		src_attached = true;
+
+	/*
+	 * If current power role is in DRP, and type-c is already in the
+	 * mode (source or sink) that's being requested, it means this is
+	 * a power role locking request from USBPD driver. Disable type-c
+	 * related interrupts for locking power role to avoid the redundant
+	 * notifications.
+	 */
+	if ((chg->power_role == POWER_SUPPLY_TYPEC_PR_DUAL) &&
+		((src_attached && val->intval == POWER_SUPPLY_TYPEC_PR_SINK) ||
+		(snk_attached && val->intval == POWER_SUPPLY_TYPEC_PR_SOURCE)))
+		is_pr_lock = true;
+
+	smblib_dbg(chg, PR_MISC, "snk_attached = %d, src_attached = %d, is_pr_lock = %d\n",
+			snk_attached, src_attached, is_pr_lock);
+	cancel_delayed_work(&chg->pr_lock_clear_work);
+	spin_lock(&chg->typec_pr_lock);
+	if (!chg->pr_lock_in_progress && is_pr_lock) {
+		smblib_dbg(chg, PR_MISC, "disable type-c interrupts for power role locking\n");
+		smblib_typec_irq_config(chg, false);
+		schedule_delayed_work(&chg->pr_lock_clear_work,
+					msecs_to_jiffies(PR_LOCK_TIMEOUT_MS));
+	} else if (chg->pr_lock_in_progress && !is_pr_lock) {
+		smblib_dbg(chg, PR_MISC, "restore type-c interrupts after exit power role locking\n");
+		smblib_typec_irq_config(chg, true);
+	}
+
+	chg->pr_lock_in_progress = is_pr_lock;
+	spin_unlock(&chg->typec_pr_lock);
+
 	switch (val->intval) {
 	case POWER_SUPPLY_TYPEC_PR_NONE:
 		power_role = TYPEC_DISABLE_CMD_BIT;
@@ -4208,6 +4394,7 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
 		return rc;
 	}
 
+	chg->power_role = val->intval;
 	return rc;
 }
 
@@ -4865,7 +5052,7 @@ irqreturn_t usbin_uv_irq_handler(int irq, void *data)
 
 unsuspend_input:
 		/* Force torch in boost mode to ensure it works with low ICL */
-		if (chg->smb_version == PMI632_SUBTYPE)
+		if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 			schgm_flash_torch_priority(chg, TORCH_BOOST_MODE);
 
 		if (chg->aicl_max_reached) {
@@ -5129,7 +5316,7 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
 					chg->aicl_cont_threshold_mv);
 			chg->aicl_max_reached = false;
 
-			if (chg->smb_version == PMI632_SUBTYPE)
+			if (chg->chg_param.smb_version == PMI632_SUBTYPE)
 				schgm_flash_torch_priority(chg,
 						TORCH_BUCK_MODE);
 
@@ -5532,8 +5719,10 @@ static void typec_src_insertion(struct smb_charger *chg)
 	int rc = 0;
 	u8 stat;
 
-	if (chg->pr_swap_in_progress)
+	if (chg->pr_swap_in_progress) {
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
 		return;
+	}
 
 	rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &stat);
 	if (rc < 0) {
@@ -5848,6 +6037,7 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 	u8 stat;
+	bool attached = false;
 	int rc;
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
@@ -5859,8 +6049,9 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
 		return IRQ_HANDLED;
 	}
 
-	if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) {
+	attached = !!(stat & TYPEC_ATTACH_DETACH_STATE_BIT);
 
+	if (attached) {
 		smblib_lpd_clear_ra_open_work(chg);
 
 		rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
@@ -5911,6 +6102,13 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
 					msecs_to_jiffies(1000));
 	}
 
+	rc = smblib_masked_write(chg, USB_CMD_PULLDOWN_REG,
+			EN_PULLDOWN_USB_IN_BIT,
+			attached ?  0 : EN_PULLDOWN_USB_IN_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't configure pulldown on USB_IN rc=%d\n",
+				rc);
+
 	power_supply_changed(chg->usb_psy);
 
 	return IRQ_HANDLED;
@@ -5920,6 +6118,7 @@ static void dcin_aicl(struct smb_charger *chg)
 {
 	int rc, icl, icl_save;
 	int input_present;
+	bool aicl_done = true;
 
 	/*
 	 * Hold awake votable to prevent pm_relax being called prior to
@@ -5932,11 +6131,16 @@ static void dcin_aicl(struct smb_charger *chg)
 
 	rc = smblib_get_charge_param(chg, &chg->param.dc_icl, &icl);
 	if (rc < 0)
-		goto unlock;
+		goto err;
 
 	if (icl == chg->wls_icl_ua) {
 		/* Upper limit reached; do nothing */
 		smblib_dbg(chg, PR_WLS, "hit max ICL: stop\n");
+
+		rc = smblib_is_input_present(chg, &input_present);
+		if (rc < 0 || !(input_present & INPUT_PRESENT_DC))
+			aicl_done = false;
+
 		goto unlock;
 	}
 
@@ -5945,7 +6149,7 @@ static void dcin_aicl(struct smb_charger *chg)
 
 	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl);
 	if (rc < 0)
-		goto unlock;
+		goto err;
 
 	mutex_unlock(&chg->dcin_aicl_lock);
 
@@ -5953,8 +6157,10 @@ static void dcin_aicl(struct smb_charger *chg)
 
 	/* Check to see if DC is still present before and after sleep */
 	rc = smblib_is_input_present(chg, &input_present);
-	if (!(input_present & INPUT_PRESENT_DC) || rc < 0)
+	if (rc < 0 || !(input_present & INPUT_PRESENT_DC)) {
+		aicl_done = false;
 		goto unvote;
+	}
 
 	/*
 	 * Wait awhile to check for any DCIN_UVs (the UV handler reduces the
@@ -5964,14 +6170,16 @@ static void dcin_aicl(struct smb_charger *chg)
 	msleep(500);
 
 	rc = smblib_is_input_present(chg, &input_present);
-	if (!(input_present & INPUT_PRESENT_DC) || rc < 0)
+	if (rc < 0 || !(input_present & INPUT_PRESENT_DC)) {
+		aicl_done = false;
 		goto unvote;
+	}
 
 	mutex_lock(&chg->dcin_aicl_lock);
 
 	rc = smblib_get_charge_param(chg, &chg->param.dc_icl, &icl);
 	if (rc < 0)
-		goto unlock;
+		goto err;
 
 	if (icl < icl_save) {
 		smblib_dbg(chg, PR_WLS, "done: icl: %d mA\n", (icl / 1000));
@@ -5981,10 +6189,14 @@ static void dcin_aicl(struct smb_charger *chg)
 	mutex_unlock(&chg->dcin_aicl_lock);
 
 	goto increment;
+
+err:
+	aicl_done = false;
 unlock:
 	mutex_unlock(&chg->dcin_aicl_lock);
 unvote:
 	vote(chg->awake_votable, DCIN_AICL_VOTER, false, 0);
+	chg->dcin_aicl_done = aicl_done;
 }
 
 static void dcin_aicl_work(struct work_struct *work)
@@ -6028,7 +6240,7 @@ static void dcin_icl_decrement(struct smb_charger *chg)
 	/* Reduce ICL by 100 mA if 3 UVs happen in a row */
 	if (ktime_us_delta(now, chg->dcin_uv_last_time) > (200 * 1000)) {
 		chg->dcin_uv_count = 0;
-	} else if (chg->dcin_uv_count == 3) {
+	} else if (chg->dcin_uv_count >= 3) {
 		icl -= DCIN_ICL_STEP_UA;
 
 		smblib_dbg(chg, PR_WLS, "icl: %d mA\n", (icl / 1000));
@@ -6067,12 +6279,8 @@ static bool is_cp_topo_vbatt(struct smb_charger *chg)
 	bool is_vbatt;
 	union power_supply_propval pval;
 
-	if (!chg->cp_psy) {
-		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
-
-		if (!chg->cp_psy)
-			return false;
-	}
+	if (!is_cp_available(chg))
+		return false;
 
 	rc = power_supply_get_property(chg->cp_psy,
 				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
@@ -6093,7 +6301,7 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 	union power_supply_propval pval;
 	int input_present;
 	bool dcin_present, vbus_present;
-	int rc, wireless_vout = 0;
+	int rc, wireless_vout = 0, wls_set = 0;
 	int sec_charger;
 
 	rc = smblib_get_prop_vph_voltage_now(chg, &pval);
@@ -6128,21 +6336,30 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 
 		/*
 		 * Remove USB's CP ILIM vote - inapplicable for wireless
-		 * parallel charging.
+		 * parallel charging. Also undo FCC STEPPER's 1.5 A vote.
 		 */
+		vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
 		if (chg->cp_ilim_votable)
 			vote(chg->cp_ilim_votable, ICL_CHANGE_VOTER, false, 0);
 
 		if (chg->sec_cp_present) {
 			/*
 			 * If CP output topology is VBATT, limit main charger's
-			 * FCC share to 1 A and let the CPs handle the rest.
+			 * FCC share and let the CPs handle the rest.
 			 */
 			if (is_cp_topo_vbatt(chg))
 				vote(chg->fcc_main_votable,
-					WLS_PL_CHARGING_VOTER, true, 1000000);
+					WLS_PL_CHARGING_VOTER, true, 800000);
 
-			pval.intval = wireless_vout;
+			rc = smblib_get_prop_batt_status(chg, &pval);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't read batt status rc=%d\n",
+						rc);
+
+			wls_set = (pval.intval == POWER_SUPPLY_STATUS_FULL) ?
+				MICRO_5V : wireless_vout;
+
+			pval.intval = wls_set;
 			rc = smblib_set_prop_voltage_wls_output(chg, &pval);
 			if (rc < 0)
 				dev_err(chg->dev, "Couldn't set dc voltage to 2*vph  rc=%d\n",
@@ -6182,10 +6399,17 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
 		vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, false, 0);
 
+		/* Force 1500mA FCC on WLS removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
 		chg->last_wls_vout = 0;
+		chg->dcin_aicl_done = false;
+		chg->dcin_icl_user_set = false;
 	}
 
-	power_supply_changed(chg->dc_psy);
+	if (chg->dc_psy)
+		power_supply_changed(chg->dc_psy);
 
 	smblib_dbg(chg, (PR_WLS | PR_INTERRUPT), "dcin_present= %d, usbin_present= %d, cp_reason = %d\n",
 			dcin_present, vbus_present, chg->cp_reason);
@@ -6462,13 +6686,19 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
 				rc);
+			return rc;
 		}
 
 		/* enable DRP */
 		rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
 				 TYPEC_POWER_ROLE_CMD_MASK, 0);
-		if (rc < 0)
+		if (rc < 0) {
 			smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+			return rc;
+		}
+		chg->power_role = POWER_SUPPLY_TYPEC_PR_DUAL;
+		smblib_dbg(chg, PR_MISC, "restore power role: %d\n",
+				chg->power_role);
 	}
 
 	return 0;
@@ -6477,6 +6707,20 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 /***************
  * Work Queues *
  ***************/
+static void smblib_pr_lock_clear_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						pr_lock_clear_work.work);
+
+	spin_lock(&chg->typec_pr_lock);
+	if (chg->pr_lock_in_progress) {
+		smblib_dbg(chg, PR_MISC, "restore type-c interrupts\n");
+		smblib_typec_irq_config(chg, true);
+		chg->pr_lock_in_progress = false;
+	}
+	spin_unlock(&chg->typec_pr_lock);
+}
+
 static void smblib_pr_swap_detach_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -6654,7 +6898,7 @@ static void smblib_moisture_protection_work(struct work_struct *work)
 	 * Disable 1% duty cycle on CC_ID pin and enable uUSB factory mode
 	 * detection to track any change on RID, as interrupts are disable.
 	 */
-	rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+	rc = smblib_write(chg, ((chg->chg_param.smb_version == PMI632_SUBTYPE) ?
 			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
 			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 	if (rc < 0) {
@@ -7066,6 +7310,38 @@ static void smblib_lpd_detach_work(struct work_struct *work)
 		chg->lpd_stage = LPD_STAGE_NONE;
 }
 
+static void smblib_cp_status_change_work(struct work_struct *work)
+{
+	int rc;
+	union power_supply_propval pval;
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+			cp_status_change_work);
+
+	if (!chg->cp_psy)
+		chg->cp_psy = power_supply_get_by_name("charge_pump_master");
+
+	if (!chg->cp_psy)
+		goto relax;
+
+	if (chg->cp_topo == -EINVAL) {
+		rc = power_supply_get_property(chg->cp_psy,
+				POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, &pval);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read cp topo rc=%d\n", rc);
+			goto relax;
+		}
+
+		chg->cp_topo = pval.intval;
+
+		if (chg->cp_topo == POWER_SUPPLY_PL_OUTPUT_VBAT &&
+				chg->cp_reason == POWER_SUPPLY_CP_WIRELESS)
+			vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, true,
+					800000);
+	}
+relax:
+	pm_relax(chg->dev);
+}
+
 static int smblib_create_votables(struct smb_charger *chg)
 {
 	int rc = 0;
@@ -7237,10 +7513,12 @@ int smblib_init(struct smb_charger *chg)
 	mutex_init(&chg->smb_lock);
 	mutex_init(&chg->irq_status_lock);
 	mutex_init(&chg->dcin_aicl_lock);
+	spin_lock_init(&chg->typec_pr_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->jeita_update_work, jeita_update_work);
 	INIT_WORK(&chg->dcin_aicl_work, dcin_aicl_work);
+	INIT_WORK(&chg->cp_status_change_work, smblib_cp_status_change_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
@@ -7253,6 +7531,8 @@ int smblib_init(struct smb_charger *chg)
 	INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
 	INIT_DELAYED_WORK(&chg->pr_swap_detach_work,
 					smblib_pr_swap_detach_work);
+	INIT_DELAYED_WORK(&chg->pr_lock_clear_work,
+					smblib_pr_lock_clear_work);
 
 	if (chg->wa_flags & CHG_TERMINATION_WA) {
 		INIT_WORK(&chg->chg_termination_work,
@@ -7296,10 +7576,12 @@ int smblib_init(struct smb_charger *chg)
 	chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE;
 	chg->cp_reason = POWER_SUPPLY_CP_NONE;
 	chg->thermal_status = TEMP_BELOW_RANGE;
+	chg->typec_irq_en = true;
+	chg->cp_topo = -EINVAL;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
-		rc = qcom_batt_init(chg->smb_version);
+		rc = qcom_batt_init(&chg->chg_param);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
 				rc);
@@ -7392,6 +7674,7 @@ int smblib_deinit(struct smb_charger *chg)
 		cancel_work_sync(&chg->jeita_update_work);
 		cancel_work_sync(&chg->pl_update_work);
 		cancel_work_sync(&chg->dcin_aicl_work);
+		cancel_work_sync(&chg->cp_status_change_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
 		cancel_delayed_work_sync(&chg->icl_change_work);
 		cancel_delayed_work_sync(&chg->pl_enable_work);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 32cd1a6..1473dac 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -14,6 +14,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/extcon-provider.h>
 #include "storm-watch.h"
+#include "battery.h"
 
 enum print_reason {
 	PR_INTERRUPT	= BIT(0),
@@ -123,6 +124,8 @@ enum {
 	WEAK_ADAPTER_WA			= BIT(2),
 	USBIN_OV_WA			= BIT(3),
 	CHG_TERMINATION_WA		= BIT(4),
+	USBIN_ADC_WA			= BIT(5),
+	SKIP_MISC_PBS_IRQ_WA		= BIT(6),
 };
 
 enum jeita_cfg_stat {
@@ -375,7 +378,6 @@ struct smb_charger {
 	int			pd_disabled;
 	enum smb_mode		mode;
 	struct smb_chg_freq	chg_freq;
-	int			smb_version;
 	int			otg_delay_ms;
 	int			weak_chg_icl_ua;
 	bool			pd_not_supported;
@@ -385,6 +387,8 @@ struct smb_charger {
 	struct mutex		ps_change_lock;
 	struct mutex		irq_status_lock;
 	struct mutex		dcin_aicl_lock;
+	spinlock_t		typec_pr_lock;
+	struct mutex		adc_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -436,6 +440,7 @@ struct smb_charger {
 	struct work_struct	moisture_protection_work;
 	struct work_struct	chg_termination_work;
 	struct work_struct	dcin_aicl_work;
+	struct work_struct	cp_status_change_work;
 	struct delayed_work	ps_change_timeout_work;
 	struct delayed_work	clear_hdc_work;
 	struct delayed_work	icl_change_work;
@@ -447,27 +452,32 @@ struct smb_charger {
 	struct delayed_work	thermal_regulation_work;
 	struct delayed_work	usbov_dbc_work;
 	struct delayed_work	pr_swap_detach_work;
+	struct delayed_work	pr_lock_clear_work;
 
 	struct alarm		lpd_recheck_timer;
 	struct alarm		moisture_protection_alarm;
 	struct alarm		chg_termination_alarm;
 	struct alarm		dcin_aicl_alarm;
 
+	struct charger_param	chg_param;
 	/* secondary charger config */
 	bool			sec_pl_present;
 	bool			sec_cp_present;
 	int			sec_chg_selected;
 	int			cp_reason;
+	int			cp_topo;
 
 	/* pd */
 	int			voltage_min_uv;
 	int			voltage_max_uv;
 	int			pd_active;
 	bool			pd_hard_reset;
+	bool			pr_lock_in_progress;
 	bool			pr_swap_in_progress;
 	bool			early_usb_attach;
 	bool			ok_to_pd;
 	bool			typec_legacy;
+	bool			typec_irq_en;
 
 	/* cached status */
 	bool			system_suspend_supported;
@@ -502,6 +512,7 @@ struct smb_charger {
 	int			hw_max_icl_ua;
 	int			auto_recharge_soc;
 	enum sink_src_mode	sink_src_mode;
+	enum power_supply_typec_power_role power_role;
 	enum jeita_cfg_stat	jeita_configured;
 	int			charger_temp_max;
 	int			smb_temp_max;
@@ -541,6 +552,9 @@ struct smb_charger {
 	int			init_thermal_ua;
 	u32			comp_clamp_level;
 	int			wls_icl_ua;
+	bool			dcin_aicl_done;
+	bool			hvdcp3_standalone_config;
+	bool			dcin_icl_user_set;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -787,6 +801,7 @@ void smblib_apsd_enable(struct smb_charger *chg, bool enable);
 int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
 int smblib_get_irq_status(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_qc3_main_icl_offset(struct smb_charger *chg, int *offset_ua);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 356cadd..2363f24 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -182,6 +182,7 @@ enum {
 #define SHIP_MODE_EN_BIT			BIT(0)
 
 #define BATIF_ADC_CHANNEL_EN_REG		(BATIF_BASE + 0x82)
+#define IBATT_CHANNEL_EN_BIT			BIT(6)
 #define CONN_THM_CHANNEL_EN_BIT			BIT(4)
 #define DIE_TEMP_CHANNEL_EN_BIT			BIT(2)
 #define MISC_THM_CHANNEL_EN_BIT			BIT(1)
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 1b0906f..5a81e53 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -306,7 +306,7 @@ static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip)
 		chip->step_chg_config->param.psy_prop =
 				POWER_SUPPLY_PROP_VOLTAGE_OCV;
 		chip->step_chg_config->param.prop_name = "OCV";
-		chip->step_chg_config->param.hysteresis = 10000;
+		chip->step_chg_config->param.hysteresis = 0;
 		chip->step_chg_config->param.use_bms = true;
 	}
 
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 8ba6abf..3958ee0 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health(
 {
 	int ret;
 
-	if (psp == POWER_SUPPLY_PROP_PRESENT) {
-		/* Dummy command; if it succeeds, battery is present. */
-		ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
-		if (ret < 0)
-			val->intval = 0; /* battery disconnected */
-		else
-			val->intval = 1; /* battery present */
-	} else { /* POWER_SUPPLY_PROP_HEALTH */
+	/* Dummy command; if it succeeds, battery is present. */
+	ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+
+	if (ret < 0) { /* battery not present*/
+		if (psp == POWER_SUPPLY_PROP_PRESENT) {
+			val->intval = 0;
+			return 0;
+		}
+		return ret;
+	}
+
+	if (psp == POWER_SUPPLY_PROP_PRESENT)
+		val->intval = 1; /* battery present */
+	else /* POWER_SUPPLY_PROP_HEALTH */
 		/* SBS spec doesn't have a general health command. */
 		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
-	}
 
 	return 0;
 }
@@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy,
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PRESENT:
 	case POWER_SUPPLY_PROP_HEALTH:
-		if (client->flags & SBS_FLAGS_TI_BQ20Z75)
+		if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
 			ret = sbs_get_ti_battery_presence_and_health(client,
 								     psp, val);
 		else
 			ret = sbs_get_battery_presence_and_health(client, psp,
 								  val);
+
+		/* this can only be true if no gpio is used */
 		if (psp == POWER_SUPPLY_PROP_PRESENT)
 			return 0;
 		break;
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
index c9675c3..2a15e48 100644
--- a/drivers/pwm/pwm-qti-lpg.c
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -12,10 +12,12 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/qpnp/qpnp-pbs.h>
 #include <linux/regmap.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -99,6 +101,34 @@
 #define LPG_LUT_VALUE_MSB_MASK		BIT(0)
 #define LPG_LUT_COUNT_MAX		47
 
+/* LPG config settings in SDAM */
+#define SDAM_REG_PBS_SEQ_EN			0x42
+#define PBS_SW_TRG_BIT				BIT(0)
+
+#define SDAM_REG_RAMP_STEP_DURATION		0x47
+
+#define SDAM_LUT_EN_OFFSET			0x0
+#define SDAM_PATTERN_CONFIG_OFFSET		0x1
+#define SDAM_END_INDEX_OFFSET			0x3
+#define SDAM_START_INDEX_OFFSET			0x4
+#define SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET	0x6
+
+/* SDAM_REG_LUT_EN */
+#define SDAM_LUT_EN_BIT				BIT(0)
+
+/* SDAM_REG_PATTERN_CONFIG */
+#define SDAM_PATTERN_LOOP_ENABLE		BIT(3)
+#define SDAM_PATTERN_RAMP_TOGGLE		BIT(2)
+#define SDAM_PATTERN_EN_PAUSE_END		BIT(1)
+#define SDAM_PATTERN_EN_PAUSE_START		BIT(0)
+
+/* SDAM_REG_PAUSE_MULTIPLIER */
+#define SDAM_PAUSE_START_SHIFT			4
+#define SDAM_PAUSE_START_MASK			GENMASK(7, 4)
+#define SDAM_PAUSE_END_MASK			GENMASK(3, 0)
+
+#define SDAM_LUT_COUNT_MAX			64
+
 enum lpg_src {
 	LUT_PATTERN = 0,
 	PWM_VALUE,
@@ -145,6 +175,7 @@ struct qpnp_lpg_channel {
 	u32				lpg_idx;
 	u32				reg_base;
 	u32				max_pattern_length;
+	u32				lpg_sdam_base;
 	u8				src_sel;
 	u8				subtype;
 	bool				lut_written;
@@ -160,7 +191,11 @@ struct qpnp_lpg_chip {
 	struct qpnp_lpg_lut	*lut;
 	struct mutex		bus_lock;
 	u32			*lpg_group;
+	struct nvmem_device	*sdam_nvmem;
+	struct device_node	*pbs_dev_node;
 	u32			num_lpgs;
+	unsigned long		pbs_en_bitmap;
+	bool			use_sdam;
 };
 
 static int qpnp_lpg_read(struct qpnp_lpg_channel *lpg, u16 addr, u8 *val)
@@ -187,7 +222,7 @@ static int qpnp_lpg_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
 	mutex_lock(&lpg->chip->bus_lock);
 	rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
 	if (rc < 0)
-		dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+		dev_err(lpg->chip->dev, "Write addr 0x%x with value 0x%x failed, rc=%d\n",
 				lpg->reg_base + addr, val, rc);
 	mutex_unlock(&lpg->chip->bus_lock);
 
@@ -240,6 +275,90 @@ static int qpnp_lut_masked_write(struct qpnp_lpg_lut *lut,
 	return rc;
 }
 
+static int qpnp_sdam_write(struct qpnp_lpg_chip *chip, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem, addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_masked_write(struct qpnp_lpg_channel *lpg,
+					u16 addr, u8 mask, u8 val)
+{
+	int rc;
+	u8 tmp;
+	struct qpnp_lpg_chip *chip = lpg->chip;
+
+	mutex_lock(&chip->bus_lock);
+
+	rc = nvmem_device_read(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0) {
+		dev_err(chip->dev, "Read SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+		goto unlock;
+	}
+
+	tmp = tmp & ~mask;
+	tmp |= val & mask;
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+unlock:
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lut_sdam_write(struct qpnp_lpg_lut *lut,
+		u16 addr, u8 *val, size_t length)
+{
+	struct qpnp_lpg_chip *chip = lut->chip;
+	int rc;
+
+	if (addr >= SDAM_LUT_COUNT_MAX)
+		return -EINVAL;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lut->reg_base + addr, length, val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lut->reg_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
 static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
 				struct pwm_device *pwm)
 {
@@ -361,14 +480,111 @@ static int qpnp_lpg_set_pwm_config(struct qpnp_lpg_channel *lpg)
 	return rc;
 }
 
-static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+static int qpnp_lpg_set_sdam_lut_pattern(struct qpnp_lpg_channel *lpg,
 		unsigned int *pattern, unsigned int length)
 {
 	struct qpnp_lpg_lut *lut = lpg->chip->lut;
 	int i, rc = 0;
-	u16 full_duty_value, pwm_values[LPG_LUT_COUNT_MAX + 1] = {0};
+	u8 val[SDAM_LUT_COUNT_MAX + 1], addr;
+
+	if (length > lpg->max_pattern_length) {
+		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
+				length, lpg->max_pattern_length);
+		return -EINVAL;
+	}
+
+	/* Program LUT pattern */
+	mutex_lock(&lut->lock);
+	addr = lpg->ramp_config.lo_idx;
+	for (i = 0; i < length; i++)
+		val[i] = pattern[i] * 255 / 100;
+
+	rc = qpnp_lut_sdam_write(lut, addr, val, length);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write pattern in SDAM failed, rc=%d\n",
+				rc);
+		goto unlock;
+	}
+
+	lpg->ramp_config.pattern_length = length;
+unlock:
+	mutex_unlock(&lut->lock);
+
+	return rc;
+}
+
+static int qpnp_lpg_set_sdam_ramp_config(struct qpnp_lpg_channel *lpg)
+{
+	struct lpg_ramp_config *ramp = &lpg->ramp_config;
+	u8 addr, mask, val;
+	int rc = 0;
+
+	/* clear PBS scatchpad register */
+	val = 0;
+	rc = qpnp_lpg_sdam_write(lpg,
+			SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set ramp step duration, one WAIT_TICK is 7.8ms */
+	val = (ramp->step_ms * 1000 / 7800) & 0xff;
+	if (val > 0)
+		val--;
+	addr = SDAM_REG_RAMP_STEP_DURATION;
+	rc = qpnp_sdam_write(lpg->chip, addr, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_RAMP_STEP_DURATION failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set hi_idx and lo_idx */
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_END_INDEX_OFFSET, ramp->hi_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_END_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_START_INDEX_OFFSET,
+						ramp->lo_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_START_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	/* Set LPG_PATTERN_CONFIG */
+	addr = SDAM_PATTERN_CONFIG_OFFSET;
+	mask = SDAM_PATTERN_LOOP_ENABLE;
+	val = 0;
+	if (ramp->pattern_repeat)
+		val |= SDAM_PATTERN_LOOP_ENABLE;
+
+	rc = qpnp_lpg_sdam_masked_write(lpg, addr, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_PATTERN_CONFIG failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+		unsigned int *pattern, unsigned int length)
+{
+	struct qpnp_lpg_lut *lut = lpg->chip->lut;
+	u16 full_duty_value, pwm_values[SDAM_LUT_COUNT_MAX + 1] = {0};
+	int i, rc = 0;
 	u8 lsb, msb, addr;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_lut_pattern(lpg, pattern, length);
+
 	if (length > lpg->max_pattern_length) {
 		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
 				length, lpg->max_pattern_length);
@@ -422,6 +638,9 @@ static int qpnp_lpg_set_ramp_config(struct qpnp_lpg_channel *lpg)
 	u8 lsb, msb, addr, mask, val;
 	int rc = 0;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_ramp_config(lpg);
+
 	/* Set ramp step duration */
 	lsb = ramp->step_ms & 0xff;
 	msb = ramp->step_ms >> 8;
@@ -503,6 +722,8 @@ static int qpnp_lpg_set_ramp_config(struct qpnp_lpg_channel *lpg)
 static void __qpnp_lpg_calc_pwm_period(u64 period_ns,
 			struct lpg_pwm_config *pwm_config)
 {
+	struct qpnp_lpg_channel *lpg = container_of(pwm_config,
+			struct qpnp_lpg_channel, pwm_config);
 	struct lpg_pwm_config configs[NUM_PWM_SIZE];
 	int i, j, m, n;
 	u64 tmp1, tmp2;
@@ -518,7 +739,12 @@ static void __qpnp_lpg_calc_pwm_period(u64 period_ns,
 	 *
 	 * Searching the closest settings for the requested PWM period.
 	 */
-	for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+	if (lpg->chip->use_sdam)
+		/* SDAM pattern control can only use 9 bit resolution */
+		n = 1;
+	else
+		n = 0;
+	for (; n < ARRAY_SIZE(pwm_size); n++) {
 		pwm_clk_period_ns = period_ns >> pwm_size[n];
 		for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
 			for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
@@ -666,6 +892,45 @@ static int qpnp_lpg_pwm_config_extend(struct pwm_chip *pwm_chip,
 	}
 
 	return qpnp_lpg_config(lpg, duty_ns, period_ns);
+};
+
+static int qpnp_lpg_pbs_trigger_enable(struct qpnp_lpg_channel *lpg, bool en)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc = 0;
+
+	if (en) {
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			rc = qpnp_pbs_trigger_event(chip->pbs_dev_node,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Failed to trigger PBS, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+		set_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+	} else {
+		clear_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
 }
 
 static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
@@ -680,7 +945,7 @@ static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
 					LPG_EN_RAMP_GEN_MASK;
 	val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
 
-	if (lpg->src_sel == LUT_PATTERN)
+	if (lpg->src_sel == LUT_PATTERN && !chip->use_sdam)
 		val |= 1 << LPG_EN_RAMP_GEN_SHIFT;
 
 	if (en)
@@ -693,6 +958,27 @@ static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
 		return rc;
 	}
 
+	if (chip->use_sdam) {
+		if (lpg->src_sel == LUT_PATTERN && en) {
+			val = SDAM_LUT_EN_BIT;
+			en = true;
+		} else {
+			val = 0;
+			en = false;
+		}
+
+		rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, val);
+		if (rc < 0) {
+			dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		qpnp_lpg_pbs_trigger_enable(lpg, en);
+
+		return rc;
+	}
+
 	if (lpg->src_sel == LUT_PATTERN && en) {
 		val = 1 << lpg->lpg_idx;
 		for (i = 0; i < chip->num_lpgs; i++) {
@@ -735,6 +1021,7 @@ static int qpnp_lpg_pwm_set_output_type(struct pwm_chip *pwm_chip,
 	struct qpnp_lpg_channel *lpg;
 	enum lpg_src src_sel;
 	int rc;
+	bool is_enabled;
 
 	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
 	if (lpg == NULL) {
@@ -752,6 +1039,23 @@ static int qpnp_lpg_pwm_set_output_type(struct pwm_chip *pwm_chip,
 	if (src_sel == lpg->src_sel)
 		return 0;
 
+	is_enabled = pwm_is_enabled(pwm);
+	if (is_enabled) {
+		/*
+		 * Disable the channel first then enable it later to make
+		 * sure the output type is changed successfully. This is
+		 * especially useful in SDAM use case to stop the PBS
+		 * sequence when changing the PWM output type from
+		 * MODULATED to FIXED.
+		 */
+		rc = qpnp_lpg_pwm_src_enable(lpg, false);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+					lpg->lpg_idx, rc);
+			return rc;
+		}
+	}
+
 	if (src_sel == LUT_PATTERN) {
 		/* program LUT if it's never been programmed */
 		if (!lpg->lut_written) {
@@ -776,7 +1080,14 @@ static int qpnp_lpg_pwm_set_output_type(struct pwm_chip *pwm_chip,
 
 	lpg->src_sel = src_sel;
 
-	if (pwm_is_enabled(pwm)) {
+	if (is_enabled) {
+		rc = qpnp_lpg_set_pwm_config(lpg);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+							lpg->lpg_idx, rc);
+			return rc;
+		}
+
 		rc = qpnp_lpg_pwm_src_enable(lpg, true);
 		if (rc < 0) {
 			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
@@ -1056,7 +1367,7 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 	struct qpnp_lpg_channel *lpg;
 	struct lpg_ramp_config *ramp;
 	int rc = 0, i;
-	u32 base, length, lpg_chan_id, tmp;
+	u32 base, length, lpg_chan_id, tmp, max_count;
 	const __be32 *addr;
 
 	rc = qpnp_get_lpg_channels(chip, &base);
@@ -1081,18 +1392,47 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 		}
 	}
 
-	addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
-	if (!addr) {
-		pr_debug("NO LUT address assigned\n");
-		return 0;
-	}
-
 	chip->lut = devm_kmalloc(chip->dev, sizeof(*chip->lut), GFP_KERNEL);
 	if (!chip->lut)
 		return -ENOMEM;
 
+	chip->sdam_nvmem = devm_nvmem_device_get(chip->dev, "ppg_sdam");
+	if (IS_ERR_OR_NULL(chip->sdam_nvmem)) {
+		if (PTR_ERR(chip->sdam_nvmem) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
+		if (!addr) {
+			pr_debug("NO LUT address assigned\n");
+			devm_kfree(chip->dev, chip->lut);
+			chip->lut = NULL;
+			return 0;
+		}
+
+		chip->lut->reg_base = be32_to_cpu(*addr);
+		max_count = LPG_LUT_COUNT_MAX;
+	} else {
+		chip->use_sdam = true;
+		chip->pbs_dev_node = of_parse_phandle(chip->dev->of_node,
+				"qcom,pbs-client", 0);
+		if (!chip->pbs_dev_node) {
+			dev_err(chip->dev, "Missing qcom,pbs-client property\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(chip->dev->of_node,
+				"qcom,lut-sdam-base",
+				&chip->lut->reg_base);
+		if (rc < 0) {
+			dev_err(chip->dev, "Read qcom,lut-sdam-base failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		max_count = SDAM_LUT_COUNT_MAX;
+	}
+
 	chip->lut->chip = chip;
-	chip->lut->reg_base = be32_to_cpu(*addr);
 	mutex_init(&chip->lut->lock);
 
 	rc = of_property_count_elems_of_size(chip->dev->of_node,
@@ -1104,13 +1444,13 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 	}
 
 	length = rc;
-	if (length > LPG_LUT_COUNT_MAX) {
+	if (length > max_count) {
 		dev_err(chip->dev, "qcom,lut-patterns length %d exceed max %d\n",
-				length, LPG_LUT_COUNT_MAX);
+				length, max_count);
 		return -EINVAL;
 	}
 
-	chip->lut->pattern = devm_kcalloc(chip->dev, LPG_LUT_COUNT_MAX,
+	chip->lut->pattern = devm_kcalloc(chip->dev, max_count,
 			sizeof(*chip->lut->pattern), GFP_KERNEL);
 	if (!chip->lut->pattern)
 		return -ENOMEM;
@@ -1137,12 +1477,24 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 			return rc;
 		}
 
-		if (lpg_chan_id > chip->num_lpgs) {
+		if (lpg_chan_id < 1 || lpg_chan_id > chip->num_lpgs) {
 			dev_err(chip->dev, "lpg-chann-id %d is out of range 1~%d\n",
 					lpg_chan_id, chip->num_lpgs);
 			return -EINVAL;
 		}
 
+		if (chip->use_sdam) {
+			rc = of_property_read_u32(child,
+					"qcom,lpg-sdam-base",
+					&tmp);
+			if (rc < 0) {
+				dev_err(chip->dev, "get qcom,lpg-sdam-base failed for lpg%d, rc=%d\n",
+						lpg_chan_id, rc);
+				return rc;
+			}
+			chip->lpgs[lpg_chan_id - 1].lpg_sdam_base = tmp;
+		}
+
 		/* lpg channel id is indexed from 1 in hardware */
 		lpg = &chip->lpgs[lpg_chan_id - 1];
 		ramp = &lpg->ramp_config;
@@ -1162,9 +1514,9 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 			return rc;
 		}
 		ramp->lo_idx = (u8)tmp;
-		if (ramp->lo_idx >= LPG_LUT_COUNT_MAX) {
+		if (ramp->lo_idx >= max_count) {
 			dev_err(chip->dev, "qcom,ramp-low-index should less than max %d\n",
-					LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
@@ -1176,14 +1528,14 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 		}
 		ramp->hi_idx = (u8)tmp;
 
-		if (ramp->hi_idx > LPG_LUT_COUNT_MAX) {
+		if (ramp->hi_idx > max_count) {
 			dev_err(chip->dev, "qcom,ramp-high-index shouldn't exceed max %d\n",
-						LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
-		if (ramp->hi_idx <= ramp->lo_idx) {
-			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d)\n",
+		if (chip->use_sdam && ramp->hi_idx <= ramp->lo_idx) {
+			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d) when SDAM used\n",
 						ramp->hi_idx, ramp->lo_idx);
 			return -EINVAL;
 		}
@@ -1192,6 +1544,12 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 		ramp->pattern = &chip->lut->pattern[ramp->lo_idx];
 		lpg->max_pattern_length = ramp->pattern_length;
 
+		ramp->pattern_repeat = of_property_read_bool(child,
+				"qcom,ramp-pattern-repeat");
+
+		if (chip->use_sdam)
+			continue;
+
 		rc = of_property_read_u32(child,
 				"qcom,ramp-pause-hi-count", &tmp);
 		if (rc < 0)
@@ -1209,9 +1567,6 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 		ramp->ramp_dir_low_to_hi = of_property_read_bool(child,
 				"qcom,ramp-from-low-to-high");
 
-		ramp->pattern_repeat = of_property_read_bool(child,
-				"qcom,ramp-pattern-repeat");
-
 		ramp->toggle =  of_property_read_bool(child,
 				"qcom,ramp-toggle");
 	}
@@ -1266,6 +1621,36 @@ static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
 	return 0;
 }
 
+static int qpnp_lpg_sdam_hw_init(struct qpnp_lpg_chip *chip)
+{
+	struct qpnp_lpg_channel *lpg;
+	int i, rc = 0;
+
+	if (!chip->use_sdam)
+		return 0;
+
+	for (i = 0; i < chip->num_lpgs; i++) {
+		lpg = &chip->lpgs[i];
+		if (lpg->lpg_sdam_base != 0) {
+			rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+			rc = qpnp_lpg_sdam_write(lpg,
+					SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(lpg->chip->dev, "Write SDAM_REG_PBS_SCRATCH_LUT_COUNTER failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int qpnp_lpg_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -1290,6 +1675,13 @@ static int qpnp_lpg_probe(struct platform_device *pdev)
 		goto err_out;
 	}
 
+	rc = qpnp_lpg_sdam_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "SDAM HW init failed, rc=%d\n",
+				rc);
+		goto err_out;
+	}
+
 	dev_set_drvdata(chip->dev, chip);
 	chip->pwm_chip.dev = chip->dev;
 	chip->pwm_chip.base = -1;
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 0059b24c..28e1f64 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 	/* Calculate the period and prescaler value */
 	div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
 	do_div(div, NSEC_PER_SEC);
+	if (!div) {
+		/* Clock is too slow to achieve requested period. */
+		dev_dbg(priv->chip.dev, "Can't reach %u ns\n",	state->period);
+		return -EINVAL;
+	}
+
 	prd = div;
 	while (div > STM32_LPTIM_MAX_ARR) {
 		presc++;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 212d99d..4d851df 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -313,7 +313,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
 	export->pwm = pwm;
 	mutex_init(&export->lock);
 
-	export->child.class = parent->class;
 	export->child.release = pwm_export_release;
 	export->child.parent = parent;
 	export->child.devt = MKDEV(0, 0);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e03bbad..5e96575 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1075,6 +1075,17 @@
 	  control allows for voting on regulator state between multiple
 	  processors within the SoC.
 
+config REGULATOR_RPM_SMD
+	bool "RPM SMD regulator driver"
+	depends on OF
+	depends on MSM_RPM_SMD
+	help
+	  Compile in support for the RPM SMD regulator driver which is used for
+	  setting voltages and other parameters of the various power rails
+	  supplied by some Qualcomm Technologies Inc. PMICs. The RPM SMD regulator driver should
+	  be used on systems which contain an RPM which communicates with the
+	  application processor over SMD.
+
 config REGULATOR_STUB
 	tristate "Stub Regulator"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 97d1de2..62f3005 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -81,6 +81,7 @@
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
+obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
 obj-$(CONFIG_REGULATOR_PM8008) += qcom_pm8008-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
 obj-$(CONFIG_REGULATOR_QPNP_LABIBB) += qpnp-labibb-regulator.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bdaf0253..316861b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5213,7 +5213,7 @@ static int __init regulator_init(void)
 /* init early to allow our consumers to complete system booting */
 core_initcall(regulator_init);
 
-static int __init regulator_late_cleanup(struct device *dev, void *data)
+static int regulator_late_cleanup(struct device *dev, void *data)
 {
 	struct regulator_dev *rdev = dev_to_rdev(dev);
 	const struct regulator_ops *ops = rdev->desc->ops;
@@ -5262,18 +5262,9 @@ static int __init regulator_late_cleanup(struct device *dev, void *data)
 	return 0;
 }
 
-static int __init regulator_init_complete(void)
+static void regulator_init_complete_work_function(struct work_struct *work)
 {
 	/*
-	 * Since DT doesn't provide an idiomatic mechanism for
-	 * enabling full constraints and since it's much more natural
-	 * with DT to provide them just assume that a DT enabled
-	 * system has full constraints.
-	 */
-	if (of_have_populated_dt())
-		has_full_constraints = true;
-
-	/*
 	 * Regulators may had failed to resolve their input supplies
 	 * when were registered, either because the input supply was
 	 * not registered yet or because its parent device was not
@@ -5290,6 +5281,35 @@ static int __init regulator_init_complete(void)
 	 */
 	class_for_each_device(&regulator_class, NULL, NULL,
 			      regulator_late_cleanup);
+}
+
+static DECLARE_DELAYED_WORK(regulator_init_complete_work,
+			    regulator_init_complete_work_function);
+
+static int __init regulator_init_complete(void)
+{
+	/*
+	 * Since DT doesn't provide an idiomatic mechanism for
+	 * enabling full constraints and since it's much more natural
+	 * with DT to provide them just assume that a DT enabled
+	 * system has full constraints.
+	 */
+	if (of_have_populated_dt())
+		has_full_constraints = true;
+
+	/*
+	 * We punt completion for an arbitrary amount of time since
+	 * systems like distros will load many drivers from userspace
+	 * so consumers might not always be ready yet, this is
+	 * particularly an issue with laptops where this might bounce
+	 * the display off then on.  Ideally we'd get a notification
+	 * from userspace when this happens but we don't so just wait
+	 * a bit and hope we waited long enough.  It'd be better if
+	 * we'd only do this on systems that need it, and a kernel
+	 * command line option might be useful.
+	 */
+	schedule_delayed_work(&regulator_init_complete_work,
+			      msecs_to_jiffies(30000));
 
 	class_for_each_device(&regulator_class, NULL, NULL,
 			      regulator_register_fill_coupling_array);
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index b615a41..27c0a67 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -33,7 +33,7 @@
 
 /* LM3632 */
 #define LM3632_BOOST_VSEL_MAX		0x26
-#define LM3632_LDO_VSEL_MAX		0x29
+#define LM3632_LDO_VSEL_MAX		0x28
 #define LM3632_VBOOST_MIN		4500000
 #define LM3632_VLDO_MIN			4000000
 
diff --git a/drivers/regulator/qpnp-amoled-regulator.c b/drivers/regulator/qpnp-amoled-regulator.c
index f5f89ee..c6b3e4d 100644
--- a/drivers/regulator/qpnp-amoled-regulator.c
+++ b/drivers/regulator/qpnp-amoled-regulator.c
@@ -15,7 +15,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -74,7 +73,6 @@ struct ab_regulator {
 	/* DT params */
 	bool			swire_control;
 	bool			pd_control;
-	u32			aod_entry_poll_time_ms;
 };
 
 struct ibb_regulator {
@@ -91,9 +89,6 @@ struct qpnp_amoled {
 	struct oledb_regulator	oledb;
 	struct ab_regulator	ab;
 	struct ibb_regulator	ibb;
-	struct mutex		reg_lock;
-	struct work_struct	aod_work;
-	struct workqueue_struct *wq;
 
 	/* DT params */
 	u32			oledb_base;
@@ -226,102 +221,6 @@ static int qpnp_ab_pd_control(struct qpnp_amoled *chip, bool en)
 	return qpnp_amoled_write(chip, AB_LDO_PD_CTL(chip), &val, 1);
 }
 
-#define AB_VREG_OK_POLL_TIME_US		2000
-#define AB_VREG_OK_POLL_HIGH_TRIES	8
-#define AB_VREG_OK_POLL_HIGH_TIME_US	10000
-#define AB_VREG_OK_POLL_AGAIN_TRIES	10
-
-static int qpnp_ab_poll_vreg_ok(struct qpnp_amoled *chip, bool status,
-				u32 poll_time_us)
-{
-	u32 i, poll_us = AB_VREG_OK_POLL_TIME_US, wait_time_us = 0;
-	bool swire_high = false, poll_again = false, monitor = false;
-	int rc;
-	u8 val;
-
-	if (poll_time_us < AB_VREG_OK_POLL_TIME_US)
-		return -EINVAL;
-
-	i = poll_time_us / AB_VREG_OK_POLL_TIME_US;
-loop:
-	while (i--) {
-		/* Write a dummy value before reading AB_STATUS1 */
-		rc = qpnp_amoled_write(chip, AB_STATUS1(chip), &val, 1);
-		if (rc < 0)
-			return rc;
-
-		rc = qpnp_amoled_read(chip, AB_STATUS1(chip), &val, 1);
-		if (rc < 0)
-			return rc;
-
-		wait_time_us += poll_us;
-		if (((val & VREG_OK_BIT) >> VREG_OK_SHIFT) == status) {
-			pr_debug("Waited for %d us\n", wait_time_us);
-
-			/*
-			 * Return if we're polling for VREG_OK low. Else, poll
-			 * for VREG_OK high for at least 80 ms. IF VREG_OK stays
-			 * high, then consider it as a valid SWIRE pulse.
-			 */
-
-			if (status) {
-				swire_high = true;
-				if (!poll_again && !monitor) {
-					pr_debug("SWIRE is high, start monitoring\n");
-					i = AB_VREG_OK_POLL_HIGH_TRIES;
-					poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
-					wait_time_us = 0;
-					monitor = true;
-				}
-
-				if (poll_again)
-					poll_again = false;
-			} else {
-				return 0;
-			}
-		} else {
-			/*
-			 * If we're here when polling for VREG_OK high, then it
-			 * is possibly because of an intermittent SWIRE pulse.
-			 * Ignore it and poll for valid SWIRE pulse again.
-			 */
-			if (status && swire_high && monitor) {
-				pr_debug("SWIRE is low\n");
-				poll_again = true;
-				swire_high = false;
-				break;
-			}
-
-			if (poll_again)
-				poll_again = false;
-		}
-
-		usleep_range(poll_us, poll_us + 1);
-	}
-
-	/*
-	 * If poll_again is set, then VREG_OK should be polled for another
-	 * 100 ms for valid SWIRE signal.
-	 */
-
-	if (poll_again) {
-		pr_debug("polling again for SWIRE\n");
-		i = AB_VREG_OK_POLL_AGAIN_TRIES;
-		poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
-		wait_time_us = 0;
-		goto loop;
-	}
-
-	/* If swire_high is set, then it's a valid SWIRE signal, return 0. */
-	if (swire_high) {
-		pr_debug("SWIRE is high\n");
-		return 0;
-	}
-
-	pr_err("AB_STATUS1: %x poll for VREG_OK %d timed out\n", val, status);
-	return -ETIMEDOUT;
-}
-
 static int qpnp_ibb_pd_control(struct qpnp_amoled *chip, bool en)
 {
 	u8 val = en ? ENABLE_PD_BIT : 0;
@@ -330,70 +229,24 @@ static int qpnp_ibb_pd_control(struct qpnp_amoled *chip, bool en)
 					val);
 }
 
-static int qpnp_ibb_aod_config(struct qpnp_amoled *chip, bool aod)
+static int qpnp_ab_ibb_regulator_set_mode(struct regulator_dev *rdev,
+						unsigned int mode)
 {
-	int rc;
-	u8 ps_ctl, smart_ps_ctl, nlimit_dac;
+	struct qpnp_amoled *chip  = rdev_get_drvdata(rdev);
+	int rc = 0;
 
-	pr_debug("aod: %d\n", aod);
-	if (aod) {
-		ps_ctl = 0x82;
-		smart_ps_ctl = 0;
-		nlimit_dac = 0;
-	} else {
-		ps_ctl = 0x02;
-		smart_ps_ctl = 0x80;
-		nlimit_dac = 0x3;
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_STANDBY &&
+		mode != REGULATOR_MODE_IDLE) {
+		pr_err("Unsupported mode %u\n", mode);
+		return -EINVAL;
 	}
 
-	rc = qpnp_amoled_write(chip, IBB_SMART_PS_CTL(chip), &smart_ps_ctl, 1);
-	if (rc < 0)
-		return rc;
-
-	rc = qpnp_amoled_write(chip, IBB_NLIMIT_DAC(chip), &nlimit_dac, 1);
-	if (rc < 0)
-		return rc;
-
-	rc = qpnp_amoled_write(chip, IBB_PS_CTL(chip), &ps_ctl, 1);
-	return rc;
-}
-
-static void qpnp_amoled_aod_work(struct work_struct *work)
-{
-	struct qpnp_amoled *chip = container_of(work, struct qpnp_amoled,
-					aod_work);
-	u8 val = 0;
-	unsigned int mode;
-	u32 poll_time_us = 100000;
-	int rc;
-
-	mutex_lock(&chip->reg_lock);
-	mode = chip->ab.vreg.mode;
-	mutex_unlock(&chip->reg_lock);
+	if (mode == chip->ab.vreg.mode || mode == chip->ibb.vreg.mode)
+		return 0;
 
 	pr_debug("mode: %d\n", mode);
-	if (mode == REGULATOR_MODE_NORMAL) {
-		rc = qpnp_ibb_aod_config(chip, true);
-		if (rc < 0)
-			goto error;
 
-		/* poll for VREG_OK high */
-		rc = qpnp_ab_poll_vreg_ok(chip, true, poll_time_us);
-		if (rc < 0)
-			goto error;
-
-		/*
-		 * As per the hardware recommendation, Wait for ~10 ms after
-		 * polling for VREG_OK before changing the configuration when
-		 * exiting from AOD mode.
-		 */
-
-		usleep_range(10000, 10001);
-
-		rc = qpnp_ibb_aod_config(chip, false);
-		if (rc < 0)
-			goto error;
-
+	if (mode == REGULATOR_MODE_NORMAL || mode == REGULATOR_MODE_STANDBY) {
 		if (chip->ibb.pd_control) {
 			rc = qpnp_ibb_pd_control(chip, true);
 			if (rc < 0)
@@ -406,14 +259,6 @@ static void qpnp_amoled_aod_work(struct work_struct *work)
 				goto error;
 		}
 	} else if (mode == REGULATOR_MODE_IDLE) {
-		if (chip->ab.aod_entry_poll_time_ms > 0)
-			poll_time_us = chip->ab.aod_entry_poll_time_ms * 1000;
-
-		/* poll for VREG_OK low */
-		rc = qpnp_ab_poll_vreg_ok(chip, false, poll_time_us);
-		if (rc < 0)
-			goto error;
-
 		if (chip->ibb.pd_control) {
 			rc = qpnp_ibb_pd_control(chip, false);
 			if (rc < 0)
@@ -425,54 +270,13 @@ static void qpnp_amoled_aod_work(struct work_struct *work)
 			if (rc < 0)
 				goto error;
 		}
-
-		val = 0xF1;
-	} else if (mode == REGULATOR_MODE_STANDBY) {
-		/* Restore the normal configuration without any delay */
-		rc = qpnp_ibb_aod_config(chip, false);
-		if (rc < 0)
-			goto error;
-
-		if (chip->ibb.pd_control) {
-			rc = qpnp_ibb_pd_control(chip, true);
-			if (rc < 0)
-				goto error;
-		}
-
-		if (chip->ab.pd_control) {
-			rc = qpnp_ab_pd_control(chip, true);
-			if (rc < 0)
-				goto error;
-		}
 	}
 
-	rc = qpnp_amoled_write(chip, AB_LDO_SW_DBG_CTL(chip), &val, 1);
+	chip->ab.vreg.mode = chip->ibb.vreg.mode = mode;
 error:
 	if (rc < 0)
 		pr_err("Failed to configure for mode %d\n", mode);
-}
-
-static int qpnp_ab_ibb_regulator_set_mode(struct regulator_dev *rdev,
-						unsigned int mode)
-{
-	struct qpnp_amoled *chip  = rdev_get_drvdata(rdev);
-
-	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_STANDBY &&
-		mode != REGULATOR_MODE_IDLE) {
-		pr_err("Unsupported mode %u\n", mode);
-		return -EINVAL;
-	}
-
-	pr_debug("mode: %d\n", mode);
-	if (mode == chip->ab.vreg.mode || mode == chip->ibb.vreg.mode)
-		return 0;
-
-	mutex_lock(&chip->reg_lock);
-	chip->ab.vreg.mode = chip->ibb.vreg.mode = mode;
-	mutex_unlock(&chip->reg_lock);
-
-	queue_work(chip->wq, &chip->aod_work);
-	return 0;
+	return rc;
 }
 
 static unsigned int qpnp_ab_ibb_regulator_get_mode(struct regulator_dev *rdev)
@@ -720,9 +524,6 @@ static int qpnp_amoled_parse_dt(struct qpnp_amoled *chip)
 							"qcom,swire-control");
 			chip->ab.pd_control = of_property_read_bool(temp,
 							"qcom,aod-pd-control");
-			of_property_read_u32(temp,
-				"qcom,aod-entry-poll-time-ms",
-				&chip->ab.aod_entry_poll_time_ms);
 			break;
 		case IBB_PERIPH_TYPE:
 			chip->ibb_base = base;
@@ -758,19 +559,6 @@ static int qpnp_amoled_regulator_probe(struct platform_device *pdev)
 	if (!chip)
 		return -ENOMEM;
 
-	/*
-	 * We need this workqueue to order the mode transitions that require
-	 * timing considerations. This way, we can ensure whenever the mode
-	 * transition is requested, it can be queued with high priority.
-	 */
-	chip->wq = alloc_ordered_workqueue("qpnp_amoled_wq", WQ_HIGHPRI);
-	if (!chip->wq) {
-		dev_err(chip->dev, "Unable to alloc workqueue\n");
-		return -ENOMEM;
-	}
-
-	mutex_init(&chip->reg_lock);
-	INIT_WORK(&chip->aod_work, qpnp_amoled_aod_work);
 	chip->dev = &pdev->dev;
 
 	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
@@ -789,23 +577,15 @@ static int qpnp_amoled_regulator_probe(struct platform_device *pdev)
 	}
 
 	rc = qpnp_amoled_hw_init(chip);
-	if (rc < 0) {
+	if (rc < 0)
 		dev_err(chip->dev, "Failed to initialize HW rc=%d\n", rc);
-		goto error;
-	}
 
-	return 0;
 error:
-	destroy_workqueue(chip->wq);
 	return rc;
 }
 
 static int qpnp_amoled_regulator_remove(struct platform_device *pdev)
 {
-	struct qpnp_amoled *chip = dev_get_drvdata(&pdev->dev);
-
-	cancel_work_sync(&chip->aod_work);
-	destroy_workqueue(chip->wq);
 	return 0;
 }
 
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 509d562..4e5b3b6 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"LCDB: %s: " fmt, __func__
@@ -106,6 +106,10 @@
 #define PFM_CURRENT_SHIFT		2
 
 #define LCDB_PWRUP_PWRDN_CTL_REG	0x66
+#define PWRUP_DELAY_MASK		GENAMSK(3, 2)
+#define PWRDN_DELAY_MASK		GENMASK(1, 0)
+#define PWRDN_DELAY_MIN_MS		0
+#define PWRDN_DELAY_MAX_MS		8
 
 /* LDO */
 #define LCDB_LDO_OUTPUT_VOLTAGE_REG	0x71
@@ -118,6 +122,10 @@
 #define LDO_DIS_PULLDOWN_BIT		BIT(1)
 #define LDO_PD_STRENGTH_BIT		BIT(0)
 
+#define LCDB_LDO_FORCE_PD_CTL_REG	0x79
+#define LDO_FORCE_PD_EN_BIT		BIT(0)
+#define LDO_FORCE_PD_MODE		BIT(7)
+
 #define LCDB_LDO_ILIM_CTL1_REG		0x7B
 #define EN_LDO_ILIM_BIT			BIT(7)
 #define SET_LDO_ILIM_MASK		GENMASK(2, 0)
@@ -212,7 +220,9 @@ struct qpnp_lcdb {
 	struct regmap			*regmap;
 	struct pmic_revid_data		*pmic_rev_id;
 	u32				base;
+	u32				wa_flags;
 	int				sc_irq;
+	int				pwrdn_delay_ms;
 
 	/* TTW params */
 	bool				ttw_enable;
@@ -284,6 +294,11 @@ enum lcdb_settings_index {
 	LCDB_SETTING_MAX,
 };
 
+enum lcdb_wa_flags {
+	NCP_SCP_DISABLE_WA = BIT(0),
+	FORCE_PD_ENABLE_WA = BIT(1),
+};
+
 static u32 soft_start_us[] = {
 	0,
 	500,
@@ -305,6 +320,13 @@ static u32 ncp_ilim_ma[] = {
 	810,
 };
 
+static const u32 pwrup_pwrdn_ms[] = {
+	0,
+	1,
+	4,
+	8,
+};
+
 #define SETTING(_id, _sec_access, _valid)	\
 	[_id] = {				\
 		.address = _id##_REG,		\
@@ -537,14 +559,6 @@ static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
 		lcdb->settings_saved = true;
 	}
 
-	val = HWEN_RDY_BIT;
-	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
-			     &val, 1);
-	if (rc < 0) {
-		pr_err("Failed to hw_enable lcdb rc= %d\n", rc);
-		return rc;
-	}
-
 	val = (BST_SS_TIME_OVERRIDE_1MS << BST_SS_TIME_OVERRIDE_SHIFT) |
 	      (DIS_BST_PRECHG_SHORT_ALARM << BST_PRECHG_SHORT_ALARM_SHIFT);
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_SS_CTL_REG, &val, 1);
@@ -552,31 +566,13 @@ static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
 		return rc;
 
 	val = 0;
-	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_SOFT_START_CTL_REG,
-			     &val, 1);
-	if (rc < 0)
-		return rc;
-
-	val = 0;
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_SOFT_START_CTL_REG,
 			     &val, 1);
 	if (rc < 0)
 		return rc;
 
-	val = BOOST_DIS_PULLDOWN_BIT | BOOST_PD_STRENGTH_BIT;
-	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_PD_CTL_REG,
-			     &val, 1);
-	if (rc < 0)
-		return rc;
-
-	val = LDO_DIS_PULLDOWN_BIT | LDO_PD_STRENGTH_BIT;
-	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_PD_CTL_REG,
-							&val, 1);
-	if (rc < 0)
-		return rc;
-
-	val = NCP_DIS_PULLDOWN_BIT | NCP_PD_STRENGTH_BIT;
-	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_PD_CTL_REG,
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_SOFT_START_CTL_REG,
 			     &val, 1);
 	if (rc < 0)
 		return rc;
@@ -590,6 +586,30 @@ static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
 	val = 0;
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_VREG_OK_CTL_REG,
 			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = BOOST_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_PD_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = LDO_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_PD_CTL_REG,
+							&val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = NCP_DIS_PULLDOWN_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_PD_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = HWEN_RDY_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+			     &val, 1);
 
 	return rc;
 }
@@ -909,6 +929,18 @@ static int qpnp_lcdb_disable(struct qpnp_lcdb *lcdb)
 		return 0;
 	}
 
+	if (lcdb->wa_flags & FORCE_PD_ENABLE_WA) {
+		/*
+		 * force pull-down to enable quick discharge after
+		 * turning off
+		 */
+		val = LDO_FORCE_PD_EN_BIT | LDO_FORCE_PD_MODE;
+		rc = qpnp_lcdb_write(lcdb, lcdb->base +
+				     LCDB_LDO_FORCE_PD_CTL_REG, &val, 1);
+		if (rc < 0)
+			return rc;
+	}
+
 	val = 0;
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
 							&val, 1);
@@ -917,6 +949,17 @@ static int qpnp_lcdb_disable(struct qpnp_lcdb *lcdb)
 	else
 		lcdb->lcdb_enabled = false;
 
+	if (lcdb->wa_flags & FORCE_PD_ENABLE_WA) {
+		/* wait for 10 msec after module disable for LDO to discharge */
+		usleep_range(10000, 11000);
+
+		val = 0;
+		rc = qpnp_lcdb_write(lcdb, lcdb->base +
+				     LCDB_LDO_FORCE_PD_CTL_REG, &val, 1);
+		if (rc < 0)
+			return rc;
+	}
+
 	return rc;
 }
 
@@ -2014,11 +2057,40 @@ static int qpnp_lcdb_init_bst(struct qpnp_lcdb *lcdb)
 	return 0;
 }
 
+static void qpnp_lcdb_pmic_config(struct qpnp_lcdb *lcdb)
+{
+	switch (lcdb->pmic_rev_id->pmic_subtype) {
+	case PM660L_SUBTYPE:
+		if (lcdb->pmic_rev_id->rev4 < PM660L_V2P0_REV4)
+			lcdb->wa_flags |= NCP_SCP_DISABLE_WA;
+		break;
+	case PMI632_SUBTYPE:
+	case PM6150L_SUBTYPE:
+		lcdb->wa_flags |= FORCE_PD_ENABLE_WA;
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("LCDB wa_flags = 0x%2x\n", lcdb->wa_flags);
+}
+
 static int qpnp_lcdb_hw_init(struct qpnp_lcdb *lcdb)
 {
 	int rc = 0;
 	u8 val = 0;
 
+	qpnp_lcdb_pmic_config(lcdb);
+
+	if (lcdb->pwrdn_delay_ms != -EINVAL) {
+		rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
+					    LCDB_PWRUP_PWRDN_CTL_REG,
+					    PWRDN_DELAY_MASK,
+					    lcdb->pwrdn_delay_ms);
+		if (rc < 0)
+			return rc;
+	}
+
 	rc = qpnp_lcdb_init_bst(lcdb);
 	if (rc < 0) {
 		pr_err("Failed to initialize BOOST rc=%d\n", rc);
@@ -2076,7 +2148,8 @@ static int qpnp_lcdb_hw_init(struct qpnp_lcdb *lcdb)
 
 static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
 {
-	int rc = 0;
+	int rc = 0, i = 0;
+	u32 tmp;
 	const char *label;
 	struct device_node *revid_dev_node, *temp, *node = lcdb->dev->of_node;
 
@@ -2140,7 +2213,24 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
 	lcdb->voltage_step_ramp =
 			of_property_read_bool(node, "qcom,voltage-step-ramp");
 
-	return rc;
+	lcdb->pwrdn_delay_ms = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,pwrdn-delay-ms", &tmp);
+	if (!rc) {
+		if (!is_between(tmp, PWRDN_DELAY_MIN_MS, PWRDN_DELAY_MAX_MS)) {
+			pr_err("Invalid PWRDN_DLY val %d (min=%d max=%d)\n",
+				tmp, PWRDN_DELAY_MIN_MS, PWRDN_DELAY_MAX_MS);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(pwrup_pwrdn_ms); i++) {
+			if (tmp == pwrup_pwrdn_ms[i]) {
+				lcdb->pwrdn_delay_ms = i;
+				break;
+			}
+		}
+	}
+
+	return 0;
 }
 
 static int qpnp_lcdb_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
new file mode 100644
index 0000000..9bc11fe
--- /dev/null
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -0,0 +1,2079 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012-2015, 2018-2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <soc/qcom/rpm-smd.h>
+#include <linux/debugfs.h>
+/* Debug Definitions */
+
+enum {
+	RPM_VREG_DEBUG_REQUEST		= BIT(0),
+	RPM_VREG_DEBUG_FULL_REQUEST	= BIT(1),
+	RPM_VREG_DEBUG_DUPLICATE	= BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+static bool is_debugfs_created;
+
+#define vreg_err(req, fmt, ...) \
+	pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_type {
+	RPM_REGULATOR_TYPE_LDO,
+	RPM_REGULATOR_TYPE_SMPS,
+	RPM_REGULATOR_TYPE_VS,
+	RPM_REGULATOR_TYPE_NCP,
+	RPM_REGULATOR_TYPE_BOB,
+	RPM_REGULATOR_TYPE_MAX,
+};
+
+/* Supported PMIC regulator LDO types */
+enum rpm_regulator_hw_type {
+	RPM_REGULATOR_HW_TYPE_UNKNOWN,
+	RPM_REGULATOR_HW_TYPE_PMIC4_LDO,
+	RPM_REGULATOR_HW_TYPE_PMIC5_LDO,
+	RPM_REGULATOR_HW_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+	RPM_REGULATOR_PARAM_ENABLE,
+	RPM_REGULATOR_PARAM_VOLTAGE,
+	RPM_REGULATOR_PARAM_CURRENT,
+	RPM_REGULATOR_PARAM_MODE_LDO,
+	RPM_REGULATOR_PARAM_MODE_SMPS,
+	RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+	RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+	RPM_REGULATOR_PARAM_FREQUENCY,
+	RPM_REGULATOR_PARAM_HEAD_ROOM,
+	RPM_REGULATOR_PARAM_QUIET_MODE,
+	RPM_REGULATOR_PARAM_FREQ_REASON,
+	RPM_REGULATOR_PARAM_CORNER,
+	RPM_REGULATOR_PARAM_BYPASS,
+	RPM_REGULATOR_PARAM_FLOOR_CORNER,
+	RPM_REGULATOR_PARAM_LEVEL,
+	RPM_REGULATOR_PARAM_FLOOR_LEVEL,
+	RPM_REGULATOR_PARAM_MODE_BOB,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2,
+	RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3,
+	RPM_REGULATOR_PARAM_MAX,
+};
+
+enum rpm_regulator_smps_mode {
+	RPM_REGULATOR_SMPS_MODE_AUTO	= 0,
+	RPM_REGULATOR_SMPS_MODE_IPEAK	= 1,
+	RPM_REGULATOR_SMPS_MODE_PWM	= 2,
+};
+
+enum rpm_regulator_ldo_mode {
+	RPM_REGULATOR_LDO_MODE_IPEAK	= 0,
+	RPM_REGULATOR_LDO_MODE_HPM	= 1,
+};
+
+enum rpm_regulator_bob_mode {
+	RPM_REGULATOR_BOB_MODE_PASS	= 0,
+	RPM_REGULATOR_BOB_MODE_PFM	= 1,
+	RPM_REGULATOR_BOB_MODE_AUTO	= 2,
+	RPM_REGULATOR_BOB_MODE_PWM	= 3,
+};
+
+#define RPM_SET_CONFIG_ACTIVE			BIT(0)
+#define RPM_SET_CONFIG_SLEEP			BIT(1)
+#define RPM_SET_CONFIG_BOTH			(RPM_SET_CONFIG_ACTIVE \
+						 | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+	char	*name;
+	char	*property_name;
+	u32	key;
+	u32	min;
+	u32	max;
+	u32	supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+		_support_bob, _name, _min, _max, _property_name)	\
+	[RPM_REGULATOR_PARAM_##_idx] = { \
+		.name = _name, \
+		.property_name = _property_name, \
+		.min = _min, \
+		.max = _max, \
+		.supported_regulator_types = \
+			_support_ldo << RPM_REGULATOR_TYPE_LDO | \
+			_support_smps << RPM_REGULATOR_TYPE_SMPS | \
+			_support_vs << RPM_REGULATOR_TYPE_VS | \
+			_support_ncp << RPM_REGULATOR_TYPE_NCP | \
+			_support_bob << RPM_REGULATOR_TYPE_BOB, \
+	}
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+	/*    ID               LDO SMPS VS  NCP BOB  name  min max          property-name */
+	PARAM(ENABLE,            1,  1,  1,  1,  1, "swen", 0, 1,          "qcom,init-enable"),
+	PARAM(VOLTAGE,           1,  1,  0,  1,  1, "uv",   0, 0x7FFFFFF,  "qcom,init-voltage"),
+	PARAM(CURRENT,           0,  1,  0,  0,  0, "ma",   0, 0x1FFF,     "qcom,init-current"),
+	PARAM(MODE_LDO,          1,  0,  0,  0,  0, "lsmd", 0, 1,          "qcom,init-ldo-mode"),
+	PARAM(MODE_SMPS,         0,  1,  0,  0,  0, "ssmd", 0, 2,          "qcom,init-smps-mode"),
+	PARAM(PIN_CTRL_ENABLE,   1,  1,  1,  0,  0, "pcen", 0, 0xF,        "qcom,init-pin-ctrl-enable"),
+	PARAM(PIN_CTRL_MODE,     0,  1,  1,  0,  0, "pcmd", 0, 0x1F,       "qcom,init-pin-ctrl-mode"),
+	PARAM(FREQUENCY,         0,  1,  0,  1,  0, "freq", 0, 31,         "qcom,init-frequency"),
+	PARAM(HEAD_ROOM,         0,  0,  0,  1,  0, "hr",   0, 0x7FFFFFFF, "qcom,init-head-room"),
+	PARAM(QUIET_MODE,        0,  1,  0,  0,  0, "qm",   0, 2,          "qcom,init-quiet-mode"),
+	PARAM(FREQ_REASON,       0,  1,  0,  1,  0, "resn", 0, 8,          "qcom,init-freq-reason"),
+	PARAM(CORNER,            0,  1,  0,  0,  0, "corn", 0, 6,          "qcom,init-voltage-corner"),
+	PARAM(BYPASS,            0,  0,  0,  0,  0, "bypa", 0, 1,          "qcom,init-disallow-bypass"),
+	PARAM(FLOOR_CORNER,      0,  1,  0,  0,  0, "vfc",  0, 6,          "qcom,init-voltage-floor-corner"),
+	PARAM(LEVEL,             0,  1,  0,  0,  0, "vlvl", 0, 0xFFFF,     "qcom,init-voltage-level"),
+	PARAM(FLOOR_LEVEL,       0,  1,  0,  0,  0, "vfl",  0, 0xFFFF,     "qcom,init-voltage-floor-level"),
+	PARAM(MODE_BOB,          0,  0,  0,  0,  1, "bobm", 0, 3,          "qcom,init-bob-mode"),
+	PARAM(PIN_CTRL_VOLTAGE1, 0,  0,  0,  0,  1, "pcv1", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage1"),
+	PARAM(PIN_CTRL_VOLTAGE2, 0,  0,  0,  0,  1, "pcv2", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage2"),
+	PARAM(PIN_CTRL_VOLTAGE3, 0,  0,  0,  0,  1, "pcv3", 0, 0x7FFFFFF,  "qcom,init-pin-ctrl-voltage3"),
+};
+
+struct rpm_regulator_mode_map {
+	int			ldo_mode;
+	int			smps_mode;
+};
+
+static struct rpm_regulator_mode_map mode_mapping[] = {
+	[RPM_REGULATOR_MODE_AUTO]
+		= {-1,				 RPM_REGULATOR_SMPS_MODE_AUTO},
+	[RPM_REGULATOR_MODE_IPEAK]
+		= {RPM_REGULATOR_LDO_MODE_IPEAK, RPM_REGULATOR_SMPS_MODE_IPEAK},
+	[RPM_REGULATOR_MODE_HPM]
+		= {RPM_REGULATOR_LDO_MODE_HPM,   RPM_REGULATOR_SMPS_MODE_PWM},
+};
+
+/* Indices for use with pin control enable via enable/disable feature. */
+#define RPM_VREG_PIN_CTRL_STATE_DISABLE	0
+#define RPM_VREG_PIN_CTRL_STATE_ENABLE	1
+#define RPM_VREG_PIN_CTRL_STATE_COUNT	2
+
+struct rpm_vreg_request {
+	u32			param[RPM_REGULATOR_PARAM_MAX];
+	u32			valid;
+	u32			modified;
+};
+
+struct rpm_vreg {
+	struct rpm_vreg_request	aggr_req_active;
+	struct rpm_vreg_request	aggr_req_sleep;
+	struct list_head	reg_list;
+	const char		*resource_name;
+	u32			resource_id;
+	bool			allow_atomic;
+	int			regulator_type;
+	int			hpm_min_load;
+	int			enable_time;
+	spinlock_t		slock;
+	struct mutex		mlock;
+	unsigned long		flags;
+	bool			sleep_request_sent;
+	bool			wait_for_ack_active;
+	bool			wait_for_ack_sleep;
+	bool			always_wait_for_ack;
+	bool			apps_only;
+	struct msm_rpm_request	*handle_active;
+	struct msm_rpm_request	*handle_sleep;
+	enum rpm_regulator_hw_type	regulator_hw_type;
+};
+
+struct rpm_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	struct rpm_vreg		*rpm_vreg;
+	struct dentry		*dfs_root;
+	struct list_head	list;
+	bool			set_active;
+	bool			set_sleep;
+	bool			always_send_voltage;
+	bool			always_send_current;
+	bool			use_pin_ctrl_for_enable;
+	struct rpm_vreg_request	req;
+	int			system_load;
+	int			min_uV;
+	int			max_uV;
+	u32			pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_COUNT];
+	enum rpm_regulator_param_index voltage_index;
+	int			voltage_offset;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately.  Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse.  For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE	MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP	MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+	return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+			&& (rpm_vreg->aggr_req_active.valid
+				& BIT(RPM_REGULATOR_PARAM_ENABLE)))
+	    || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+				&& (rpm_vreg->aggr_req_sleep.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static inline bool rpm_vreg_shared_active_or_sleep_enabled_valid
+						(struct rpm_vreg *rpm_vreg)
+{
+	return !rpm_vreg->apps_only &&
+		((rpm_vreg->aggr_req_active.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE))
+		 || (rpm_vreg->aggr_req_sleep.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static const u32 power_level_params =
+	BIT(RPM_REGULATOR_PARAM_ENABLE) |
+	BIT(RPM_REGULATOR_PARAM_VOLTAGE) |
+	BIT(RPM_REGULATOR_PARAM_CURRENT) |
+	BIT(RPM_REGULATOR_PARAM_CORNER) |
+	BIT(RPM_REGULATOR_PARAM_BYPASS) |
+	BIT(RPM_REGULATOR_PARAM_FLOOR_CORNER) |
+	BIT(RPM_REGULATOR_PARAM_LEVEL) |
+	BIT(RPM_REGULATOR_PARAM_FLOOR_LEVEL);
+
+static bool rpm_vreg_ack_required(struct rpm_vreg *rpm_vreg, u32 set,
+				const u32 *prev_param, const u32 *param,
+				u32 prev_valid, u32 modified)
+{
+	u32 mask;
+	int i;
+
+	if (rpm_vreg->always_wait_for_ack
+	    || (set == RPM_SET_ACTIVE && rpm_vreg->wait_for_ack_active)
+	    || (set == RPM_SET_SLEEP && rpm_vreg->wait_for_ack_sleep))
+		return true;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		mask = BIT(i);
+		if (modified & mask) {
+			if ((prev_valid & mask) && (power_level_params & mask)
+			    && (param[i] <= prev_param[i]))
+				continue;
+			else
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static void rpm_vreg_check_param_max(struct rpm_regulator *regulator, int index,
+					u32 new_max)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+
+	if (regulator->set_active
+	    && (rpm_vreg->aggr_req_active.valid & BIT(index))
+	    && rpm_vreg->aggr_req_active.param[index] > new_max)
+		rpm_vreg->wait_for_ack_active = true;
+
+	if (regulator->set_sleep
+	    && (rpm_vreg->aggr_req_sleep.valid & BIT(index))
+	    && rpm_vreg->aggr_req_sleep.param[index] > new_max)
+		rpm_vreg->wait_for_ack_sleep = true;
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator.  It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP	1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+	return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+	return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV)	((uV) / 1000)
+#define MILLI_TO_MICRO(uV)	((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT	0
+#define REQ_PREV	1
+#define REQ_CACHED	2
+#define REQ_TYPES	3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+				bool sent)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct rpm_vreg_request *aggr;
+	bool first;
+	u32 mask[REQ_TYPES] = {0, 0, 0};
+	const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+	int pos = 0;
+	int i, j;
+
+	aggr = (set == RPM_SET_ACTIVE)
+		? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+	if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent
+		   && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+	}
+
+	if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+		return;
+
+	if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+		mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+		mask[REQ_SENT] = 0;
+		mask[REQ_PREV] = 0;
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+			KERN_INFO, __func__);
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+			rpm_vreg->resource_name, rpm_vreg->resource_id,
+			regulator->rdesc.name,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+	for (i = 0; i < REQ_TYPES; i++) {
+		if (mask[i])
+			pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+					req_names[i]);
+
+		first = true;
+		for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+			if (mask[i] & BIT(j)) {
+				pos += scnprintf(buf + pos, buflen - pos,
+					"%s%s=%u", (first ? "" : ", "),
+					params[j].name, aggr->param[j]);
+				first = false;
+			}
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	pr_info("%s\n", buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+	(_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+	(_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+				       const u32 *param, int idx, u32 set)
+{
+	struct msm_rpm_request *handle;
+
+	handle = (set == RPM_SET_ACTIVE	? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+
+	if (rpm_vreg->allow_atomic)
+		return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+						  (u8 *)&param[idx], 4);
+	else
+		return msm_rpm_add_kvp_data(handle, params[idx].key,
+					    (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+		const u32 *param, u32 prev_valid, u32 *modified)
+{
+	u32 value_changed = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		if (param[i] != prev_param[i])
+			value_changed |= BIT(i);
+	}
+
+	/*
+	 * Only keep bits that are for changed parameters or previously
+	 * invalid parameters.
+	 */
+	*modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+		u32 set, const u32 *param, u32 modified)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		/* Only send requests for modified parameters. */
+		if (modified & BIT(i)) {
+			rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+							set);
+			if (rc) {
+				vreg_err(regulator,
+					"add KVP failed: %s %u; %s, rc=%d\n",
+					rpm_vreg->resource_name,
+					rpm_vreg->resource_id, params[i].name,
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set,
+				bool wait_for_ack)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct msm_rpm_request *handle
+		= (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+	int rc = 0;
+	void *temp;
+
+	if (unlikely(rpm_vreg->allow_atomic)) {
+		rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+						  handle));
+	} else if (wait_for_ack) {
+		rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+	} else {
+		temp = msm_rpm_send_request_noack(handle);
+		if (IS_ERR(temp))
+			rc = PTR_ERR(temp);
+	}
+
+	if (rc)
+		vreg_err(regulator,
+			"msm rpm send failed: %s %u; set=%s, rc=%d\n",
+			rpm_vreg->resource_name,
+			rpm_vreg->resource_id,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+	return rc;
+}
+
+#define RPM_VREG_AGGR_MIN(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+	 = min(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+		_param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+	 = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+		_param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		 += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		|= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+	RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+	RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MIN(FREQUENCY, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FLOOR_CORNER, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(LEVEL, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FLOOR_LEVEL, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_BOB, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE1, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE2, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE3, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	u32 param_active[RPM_REGULATOR_PARAM_MAX];
+	u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+	u32 modified_active, modified_sleep;
+	struct rpm_regulator *reg;
+	bool sleep_set_differs = false;
+	bool send_active = false;
+	bool send_sleep = false;
+	bool wait_for_ack;
+	int rc = 0;
+	int i;
+
+	memset(param_active, 0, sizeof(param_active));
+	memset(param_sleep, 0, sizeof(param_sleep));
+	modified_active = rpm_vreg->aggr_req_active.modified;
+	modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+	/*
+	 * Aggregate all of the requests for this regulator in both active
+	 * and sleep sets.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		if (reg->set_active) {
+			rpm_vreg_aggregate_params(param_active, reg->req.param);
+			modified_active |= reg->req.modified;
+		}
+		if (reg->set_sleep) {
+			rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+			modified_sleep |= reg->req.modified;
+		}
+	}
+
+	/*
+	 * Check if the aggregated sleep set parameter values differ from the
+	 * aggregated active set parameter values.
+	 */
+	if (!rpm_vreg->sleep_request_sent) {
+		for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+			if ((param_active[i] != param_sleep[i])
+			    && (modified_sleep & BIT(i))) {
+				sleep_set_differs = true;
+				break;
+			}
+		}
+	}
+
+	/* Add KVPs to the active set RPM request if they have new values. */
+	rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+		param_active, rpm_vreg->aggr_req_active.valid,
+		&modified_active);
+	rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+		param_active, modified_active);
+	if (rc)
+		return rc;
+	send_active = modified_active;
+
+	/*
+	 * Sleep set configurations are only sent if they differ from the
+	 * active set values.  This is because the active set values will take
+	 * effect during rpm assisted power collapse in the absence of sleep set
+	 * values.
+	 *
+	 * However, once a sleep set request is sent for a given regulator,
+	 * additional sleep set requests must be sent in the future even if they
+	 * match the corresponding active set requests.
+	 */
+	if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+		/* Add KVPs to the sleep set RPM request if they are new. */
+		rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+			param_sleep, rpm_vreg->aggr_req_sleep.valid,
+			&modified_sleep);
+		rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+			param_sleep, modified_sleep);
+		if (rc)
+			return rc;
+		send_sleep = modified_sleep;
+	}
+
+	/* Send active set request to the RPM if it contains new KVPs. */
+	if (send_active) {
+		wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_ACTIVE,
+					rpm_vreg->aggr_req_active.param,
+					param_active,
+					rpm_vreg->aggr_req_active.valid,
+					modified_active);
+		rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE,
+						wait_for_ack);
+		if (rc)
+			return rc;
+		rpm_vreg->aggr_req_active.valid |= modified_active;
+		rpm_vreg->wait_for_ack_active = false;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_active.modified = modified_active;
+	memcpy(rpm_vreg->aggr_req_active.param, param_active,
+		sizeof(param_active));
+
+	/* Handle debug printing of the active set request. */
+	rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+	if (send_active)
+		rpm_vreg->aggr_req_active.modified = 0;
+
+	/* Send sleep set request to the RPM if it contains new KVPs. */
+	if (send_sleep) {
+		wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_SLEEP,
+					rpm_vreg->aggr_req_sleep.param,
+					param_sleep,
+					rpm_vreg->aggr_req_sleep.valid,
+					modified_sleep);
+		rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP,
+						wait_for_ack);
+		if (rc)
+			return rc;
+
+		rpm_vreg->sleep_request_sent = true;
+		rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+		rpm_vreg->wait_for_ack_sleep = false;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+	memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+		sizeof(param_sleep));
+
+	/* Handle debug printing of the sleep set request. */
+	rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+	if (send_sleep)
+		rpm_vreg->aggr_req_sleep.modified = 0;
+
+	/*
+	 * Loop over all requests for this regulator to update the valid and
+	 * modified values for use in future aggregation.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		reg->req.valid |= reg->req.modified;
+		reg->req.modified = 0;
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	if (likely(!reg->use_pin_ctrl_for_enable))
+		return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+	else
+		return reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE]
+			== reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	if (likely(!reg->use_pin_ctrl_for_enable)) {
+		/* Enable using swen KVP. */
+		prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+		RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "enable failed, rc=%d\n", rc);
+			RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+		}
+	} else {
+		/* Enable using pcen KVP. */
+		prev_enable
+			= reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+		RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+			reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE]);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "enable failed, rc=%d\n", rc);
+			RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+		}
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	if (likely(!reg->use_pin_ctrl_for_enable)) {
+		/* Disable using swen KVP. */
+		prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+		RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "disable failed, rc=%d\n", rc);
+			RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+		}
+	} else {
+		/* Disable using pcen KVP. */
+		prev_enable
+			= reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+		RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+			reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_DISABLE]);
+		rc = rpm_vreg_aggregate_requests(reg);
+		if (rc) {
+			vreg_err(reg, "disable failed, rc=%d\n", rc);
+			RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+		}
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+#define RPM_VREG_SET_VOLTAGE(_regulator, _val) \
+{ \
+	(_regulator)->req.param[(_regulator)->voltage_index] = _val; \
+	(_regulator)->req.modified |= BIT((_regulator)->voltage_index); \
+} \
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+				int max_uV, unsigned int *selector)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	int voltage;
+	u32 prev_voltage;
+
+	voltage = min_uV - reg->voltage_offset;
+
+	if (voltage < params[reg->voltage_index].min
+	    || voltage > params[reg->voltage_index].max) {
+		vreg_err(reg, "voltage=%d for key=%s is not within allowed range: [%u, %u]\n",
+			voltage, params[reg->voltage_index].name,
+			params[reg->voltage_index].min,
+			params[reg->voltage_index].max);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_voltage = reg->req.param[reg->voltage_index];
+	RPM_VREG_SET_VOLTAGE(reg, voltage);
+
+	rpm_vreg_check_param_max(reg, reg->voltage_index,
+				max_uV - reg->voltage_offset);
+
+	/*
+	 * Only send a new voltage if the regulator is currently enabled or
+	 * if the regulator has been configured to always send voltage updates.
+	 */
+	if (reg->always_send_voltage
+	    || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+	    || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set voltage for key=%s failed, rc=%d\n",
+			params[reg->voltage_index].name, rc);
+		RPM_VREG_SET_VOLTAGE(reg, prev_voltage);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int uV;
+
+	uV = reg->req.param[reg->voltage_index] + reg->voltage_offset;
+	if (uV == 0)
+		uV = VOLTAGE_UNKNOWN;
+
+	return uV;
+}
+
+#define REGULATOR_MODE_PMIC4_LDO_LPM	0
+#define REGULATOR_MODE_PMIC4_LDO_HPM	1
+#define REGULATOR_MODE_PMIC5_LDO_LPM	4
+#define REGULATOR_MODE_PMIC5_LDO_HPM	7
+
+static int _rpm_vreg_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	u32 hw_mode;
+	int rc = 0;
+
+	if (mode == REGULATOR_MODE_NORMAL) {
+		switch (reg->rpm_vreg->regulator_hw_type) {
+		case RPM_REGULATOR_HW_TYPE_PMIC4_LDO:
+			hw_mode = REGULATOR_MODE_PMIC4_LDO_HPM;
+			break;
+
+		case RPM_REGULATOR_HW_TYPE_PMIC5_LDO:
+			hw_mode = REGULATOR_MODE_PMIC5_LDO_HPM;
+			break;
+
+		default:
+			vreg_err(reg, "unsupported ldo hw type: %d\n",
+					reg->rpm_vreg->regulator_hw_type);
+			return -EINVAL;
+		}
+	} else if (mode == REGULATOR_MODE_IDLE) {
+		switch (reg->rpm_vreg->regulator_hw_type) {
+		case RPM_REGULATOR_HW_TYPE_PMIC4_LDO:
+			hw_mode = REGULATOR_MODE_PMIC4_LDO_LPM;
+			break;
+
+		case RPM_REGULATOR_HW_TYPE_PMIC5_LDO:
+			hw_mode = REGULATOR_MODE_PMIC5_LDO_LPM;
+			break;
+
+		default:
+			vreg_err(reg, "unsupported ldo hw type: %d\n",
+					reg->rpm_vreg->regulator_hw_type);
+			return -EINVAL;
+		}
+	} else {
+		vreg_err(reg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	RPM_VREG_SET_PARAM(reg, MODE_LDO, hw_mode);
+
+	/*
+	 * Only send the mode if the regulator is currently enabled or if the
+	 * regulator has been configured to always send current updates.
+	 */
+	if (reg->always_send_current
+	    || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+	    || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc)
+		vreg_err(reg, "set mode failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int rpm_vreg_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	rc = _rpm_vreg_ldo_set_mode(rdev, mode);
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_ldo_get_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	u32 hw_mode;
+
+	hw_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_LDO];
+	if (hw_mode == REGULATOR_MODE_PMIC4_LDO_HPM ||
+			hw_mode == REGULATOR_MODE_PMIC5_LDO_HPM)
+		return REGULATOR_MODE_NORMAL;
+
+	return REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_ldo_set_load(struct regulator_dev *rdev, int load_uA)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	unsigned int mode;
+	int rc = 0;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	mode = (load_uA + reg->system_load >= reg->rpm_vreg->hpm_min_load)
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+	rc = _rpm_vreg_ldo_set_mode(rdev, mode);
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u32 prev_current;
+	int prev_uA;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+	prev_uA = MILLI_TO_MICRO(prev_current);
+
+	if (mode == REGULATOR_MODE_NORMAL) {
+		/* Make sure that request current is in HPM range. */
+		if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+			RPM_VREG_SET_PARAM(reg, CURRENT,
+			    MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+	} else if (REGULATOR_MODE_IDLE) {
+		/* Make sure that request current is in LPM range. */
+		if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+			RPM_VREG_SET_PARAM(reg, CURRENT,
+			    MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+	} else {
+		vreg_err(reg, "invalid mode: %u\n", mode);
+		rpm_vreg_unlock(reg->rpm_vreg);
+		return -EINVAL;
+	}
+
+	/*
+	 * Only send a new load current value if the regulator is currently
+	 * enabled or if the regulator has been configured to always send
+	 * current updates.
+	 */
+	if (reg->always_send_current
+	    || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+	    || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set mode failed, rc=%d\n", rc);
+		RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+			>= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+			int input_uV, int output_uV, int load_uA)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	u32 load_mA;
+
+	load_uA += reg->system_load;
+
+	load_mA = MICRO_TO_MILLI(load_uA);
+	if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+		load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+	RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return (load_uA >= reg->rpm_vreg->hpm_min_load)
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_set_bob_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_mode;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB];
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PWM);
+		break;
+	case REGULATOR_MODE_NORMAL:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_AUTO);
+		break;
+	case REGULATOR_MODE_IDLE:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PFM);
+		break;
+	case REGULATOR_MODE_STANDBY:
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PASS);
+		break;
+	default:
+		vreg_err(reg, "invalid mode: %u\n", mode);
+		rpm_vreg_unlock(reg->rpm_vreg);
+		return -EINVAL;
+	}
+
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc) {
+		vreg_err(reg, "set BoB mode failed, rc=%d\n", rc);
+		RPM_VREG_SET_PARAM(reg, MODE_BOB, prev_mode);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_get_bob_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	switch (reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB]) {
+	case RPM_REGULATOR_BOB_MODE_PWM:
+		mode = REGULATOR_MODE_FAST;
+		break;
+	case RPM_REGULATOR_BOB_MODE_AUTO:
+		mode = REGULATOR_MODE_NORMAL;
+		break;
+	case RPM_REGULATOR_BOB_MODE_PFM:
+		mode = REGULATOR_MODE_IDLE;
+		break;
+	case RPM_REGULATOR_BOB_MODE_PASS:
+		mode = REGULATOR_MODE_STANDBY;
+		break;
+	default:
+		vreg_err(reg, "BoB mode unknown\n");
+		mode = REGULATOR_MODE_NORMAL;
+	}
+
+	return mode;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return reg->rpm_vreg->enable_time;
+}
+
+static int rpm_vreg_send_defaults(struct rpm_regulator *reg)
+{
+	int rc;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc)
+		vreg_err(reg, "RPM request failed, rc=%d", rc);
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_configure_pin_control_enable(struct rpm_regulator *reg,
+		struct device_node *node)
+{
+	struct rpm_regulator_param *pcen_param =
+			&params[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+	int rc, i;
+
+	if (!of_find_property(node, "qcom,enable-with-pin-ctrl", NULL))
+		return 0;
+
+	if (pcen_param->supported_regulator_types
+			& BIT(reg->rpm_vreg->regulator_type)) {
+		rc = of_property_read_u32_array(node,
+			"qcom,enable-with-pin-ctrl", reg->pin_ctrl_mask,
+			RPM_VREG_PIN_CTRL_STATE_COUNT);
+		if (rc) {
+			vreg_err(reg, "could not read qcom,enable-with-pin-ctrl, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Verify that the mask values are valid. */
+		for (i = 0; i < RPM_VREG_PIN_CTRL_STATE_COUNT; i++) {
+			if (reg->pin_ctrl_mask[i] < pcen_param->min
+			    || reg->pin_ctrl_mask[i] > pcen_param->max) {
+				vreg_err(reg, "device tree property: qcom,enable-with-pin-ctrl[%d]=%u is outside allowed range [%u, %u]\n",
+					i, reg->pin_ctrl_mask[i],
+					pcen_param->min, pcen_param->max);
+				return -EINVAL;
+			}
+		}
+
+		reg->use_pin_ctrl_for_enable = true;
+	} else {
+		pr_warn("%s: regulator type=%d does not support device tree property: qcom,enable-with-pin-ctrl\n",
+			reg->rdesc.name, reg->rpm_vreg->regulator_type);
+	}
+
+	return 0;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+	struct rpm_regulator *framework_reg;
+	struct rpm_regulator *priv_reg = NULL;
+	struct regulator *regulator;
+	struct rpm_vreg *rpm_vreg;
+
+	regulator = regulator_get(dev, supply);
+	if (IS_ERR(regulator)) {
+		pr_err("could not find regulator for: dev=%s, supply=%s, rc=%ld\n",
+			(dev ? dev_name(dev) : ""), (supply ? supply : ""),
+			PTR_ERR(regulator));
+		return ERR_CAST(regulator);
+	}
+
+	framework_reg = regulator_get_drvdata(regulator);
+	if (framework_reg == NULL) {
+		pr_err("regulator structure not found\n");
+		regulator_put(regulator);
+		return ERR_PTR(-ENODEV);
+	}
+	regulator_put(regulator);
+
+	rpm_vreg = framework_reg->rpm_vreg;
+
+	priv_reg = kzalloc(sizeof(*priv_reg), GFP_KERNEL);
+	if (priv_reg == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Allocate a regulator_dev struct so that framework callback functions
+	 * can be called from the private API functions.
+	 */
+	priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+	if (priv_reg->rdev == NULL) {
+		kfree(priv_reg);
+		return ERR_PTR(-ENOMEM);
+	}
+	priv_reg->rdev->reg_data	= priv_reg;
+	priv_reg->rpm_vreg		= rpm_vreg;
+	priv_reg->rdesc.name		= framework_reg->rdesc.name;
+	priv_reg->rdesc.ops		= framework_reg->rdesc.ops;
+	priv_reg->set_active		= framework_reg->set_active;
+	priv_reg->set_sleep		= framework_reg->set_sleep;
+	priv_reg->min_uV		= framework_reg->min_uV;
+	priv_reg->max_uV		= framework_reg->max_uV;
+	priv_reg->system_load		= framework_reg->system_load;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&priv_reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	return priv_reg;
+}
+EXPORT_SYMBOL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+	if (IS_ERR_OR_NULL(regulator) || regulator->rpm_vreg == NULL) {
+		pr_err("invalid rpm_regulator pointer\n");
+		return -EINVAL;
+	}
+
+	might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+	return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg;
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return;
+
+	rpm_vreg = regulator->rpm_vreg;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&regulator->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	kfree(regulator->rdev);
+	kfree(regulator);
+}
+EXPORT_SYMBOL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers.  Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled.  If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator.  This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV)
+{
+	int rc = rpm_regulator_check_input(regulator);
+	int uV = min_uV;
+
+	if (rc)
+		return rc;
+
+	if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_VS) {
+		vreg_err(regulator, "unsupported regulator type: %d\n",
+			regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	if (min_uV > max_uV) {
+		vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+		uV = regulator->min_uV;
+
+	if (uV < regulator->min_uV || uV > regulator->max_uV) {
+		vreg_err(regulator,
+			"request v=[%d, %d] is outside allowed v=[%d, %d]\n",
+			min_uV, max_uV, regulator->min_uV, regulator->max_uV);
+		return -EINVAL;
+	}
+
+	return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL(rpm_regulator_set_voltage);
+
+/**
+ * rpm_regulator_set_mode() - set regulator operating mode
+ * @regulator: RPM regulator handle
+ * @mode: operating mode requested for the regulator
+ *
+ * Requests that the mode of the regulator be set to the mode specified.  This
+ * parameter is aggregated using a max function such that AUTO < IPEAK < HPM.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode)
+{
+	int index = 0;
+	u32 new_mode, prev_mode;
+	int rc;
+
+	rc = rpm_regulator_check_input(regulator);
+	if (rc)
+		return rc;
+
+	if (mode < 0 || mode >= ARRAY_SIZE(mode_mapping)) {
+		vreg_err(regulator, "invalid mode requested: %d\n", mode);
+		return -EINVAL;
+	}
+
+	switch (regulator->rpm_vreg->regulator_type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+		index = RPM_REGULATOR_PARAM_MODE_SMPS;
+		new_mode = mode_mapping[mode].smps_mode;
+		break;
+	case RPM_REGULATOR_TYPE_LDO:
+		index = RPM_REGULATOR_PARAM_MODE_LDO;
+		new_mode = mode_mapping[mode].ldo_mode;
+		break;
+	default:
+		vreg_err(regulator, "unsupported regulator type: %d\n",
+			regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	if (new_mode < params[index].min || new_mode > params[index].max) {
+		vreg_err(regulator, "invalid mode requested: %d for type: %d\n",
+			mode, regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(regulator->rpm_vreg);
+
+	prev_mode = regulator->req.param[index];
+	regulator->req.param[index] = new_mode;
+	regulator->req.modified |= BIT(index);
+
+	rc = rpm_vreg_aggregate_requests(regulator);
+	if (rc) {
+		vreg_err(regulator, "set mode failed, rc=%d", rc);
+		regulator->req.param[index] = prev_mode;
+	}
+
+	rpm_vreg_unlock(regulator->rpm_vreg);
+
+	return rc;
+}
+EXPORT_SYMBOL(rpm_regulator_set_mode);
+
+static struct regulator_ops ldo_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_load		= rpm_vreg_ldo_set_load,
+	.set_mode		= rpm_vreg_ldo_set_mode,
+	.get_mode		= rpm_vreg_ldo_get_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops bob_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.set_mode		= rpm_vreg_set_bob_mode,
+	.get_mode		= rpm_vreg_get_bob_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+	[RPM_REGULATOR_TYPE_LDO]	= &ldo_ops,
+	[RPM_REGULATOR_TYPE_SMPS]	= &smps_ops,
+	[RPM_REGULATOR_TYPE_VS]		= &switch_ops,
+	[RPM_REGULATOR_TYPE_NCP]	= &ncp_ops,
+	[RPM_REGULATOR_TYPE_BOB]	= &bob_ops,
+};
+
+static int rpm_vreg_device_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg;
+	struct rpm_vreg *rpm_vreg;
+
+	reg = platform_get_drvdata(pdev);
+	if (reg) {
+		rpm_vreg = reg->rpm_vreg;
+		rpm_vreg_lock(rpm_vreg);
+		regulator_unregister(reg->rdev);
+		list_del(&reg->list);
+		kfree(reg);
+		rpm_vreg_unlock(rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg, *reg_temp;
+	struct rpm_vreg *rpm_vreg;
+
+	rpm_vreg = platform_get_drvdata(pdev);
+	if (rpm_vreg) {
+		rpm_vreg_lock(rpm_vreg);
+		list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+				list) {
+			/* Only touch data for private consumers. */
+			if (reg->rdev->desc == NULL) {
+				list_del(&reg->list);
+				kfree(reg->rdev);
+				kfree(reg);
+			} else {
+				dev_err(dev, "%s: not all child devices have been removed\n",
+					__func__);
+			}
+		}
+		rpm_vreg_unlock(rpm_vreg);
+
+		msm_rpm_free_request(rpm_vreg->handle_active);
+		msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+		kfree(rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int rpm_vreg_set_smps_ldo_voltage_index(struct device *dev,
+					struct rpm_regulator *reg)
+{
+	struct device_node *node = dev->of_node;
+	int chosen = 0;
+
+	if (of_property_read_bool(node, "qcom,use-voltage-corner")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_CORNER;
+		reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-floor-corner")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_CORNER;
+		reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-level")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_LEVEL;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-voltage-floor-level")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_LEVEL;
+		chosen++;
+	}
+
+	if (chosen > 1) {
+		dev_err(dev, "only one qcom,use-voltage-* may be specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rpm_vreg_set_bob_voltage_index(struct device *dev,
+					struct rpm_regulator *reg)
+{
+	struct device_node *node = dev->of_node;
+	int chosen = 0;
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage1")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage2")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2;
+		chosen++;
+	}
+
+	if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage3")) {
+		reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3;
+		chosen++;
+	}
+
+	if (chosen > 1) {
+		dev_err(dev, "only one qcom,use-pin-ctrl-voltage* may be specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rpm_vreg_device_set_voltage_index(struct device *dev,
+					struct rpm_regulator *reg, int type)
+{
+	int rc = 0;
+
+	reg->voltage_index = RPM_REGULATOR_PARAM_VOLTAGE;
+
+	switch (type) {
+	case RPM_REGULATOR_TYPE_SMPS:
+	case RPM_REGULATOR_TYPE_LDO:
+		rc = rpm_vreg_set_smps_ldo_voltage_index(dev, reg);
+		break;
+	case RPM_REGULATOR_TYPE_BOB:
+		rc = rpm_vreg_set_bob_voltage_index(dev, reg);
+		break;
+	}
+
+	return rc;
+}
+
+static void rpm_vreg_create_debugfs(struct rpm_regulator *reg)
+{
+	struct dentry *entry;
+
+	if (!is_debugfs_created) {
+		reg->dfs_root = debugfs_create_dir("rpm_vreg_debugfs", NULL);
+		if (IS_ERR_OR_NULL(reg->dfs_root)) {
+			pr_err("Failed to create debugfs directory rc=%ld\n",
+			(long)reg->dfs_root);
+			return;
+		}
+		entry = debugfs_create_u32("debug_mask", 0600, reg->dfs_root,
+						&rpm_vreg_debug_mask);
+		if (IS_ERR_OR_NULL(entry)) {
+			pr_err("Failed to create debug_mask rc=%ld\n",
+							(long)entry);
+			debugfs_remove_recursive(reg->dfs_root);
+		}
+		is_debugfs_created = true;
+	}
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int rpm_vreg_device_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct regulator_init_data *init_data;
+	struct rpm_vreg *rpm_vreg;
+	struct rpm_regulator *reg;
+	struct regulator_config reg_config = {};
+	int rc = 0;
+	int i, regulator_type;
+	u32 val;
+
+	if (pdev->dev.parent == NULL) {
+		dev_err(dev, "%s: parent device missing\n", __func__);
+		return -ENODEV;
+	}
+
+	rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+	if (rpm_vreg == NULL) {
+		dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+	if (reg == NULL)
+		return -ENOMEM;
+
+	regulator_type		= rpm_vreg->regulator_type;
+	reg->rpm_vreg		= rpm_vreg;
+	reg->rdesc.owner	= THIS_MODULE;
+	reg->rdesc.type		= REGULATOR_VOLTAGE;
+	reg->rdesc.ops		= vreg_ops[regulator_type];
+
+	rc = rpm_vreg_device_set_voltage_index(dev, reg, regulator_type);
+	if (rc)
+		goto fail_free_reg;
+
+	reg->always_send_voltage
+		= of_property_read_bool(node, "qcom,always-send-voltage");
+	reg->always_send_current
+		= of_property_read_bool(node, "qcom,always-send-current");
+
+	if (regulator_type == RPM_REGULATOR_TYPE_VS)
+		reg->rdesc.n_voltages = 0;
+	else
+		reg->rdesc.n_voltages = 2;
+
+	rc = of_property_read_u32(node, "qcom,set", &val);
+	if (rc) {
+		dev_err(dev, "%s: sleep set and/or active set must be configured via qcom,set property, rc=%d\n",
+			__func__, rc);
+		goto fail_free_reg;
+	} else if (!(val & RPM_SET_CONFIG_BOTH)) {
+		dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+			val);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+	reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+	init_data = of_get_regulator_init_data(dev, node, &reg->rdesc);
+	if (init_data == NULL) {
+		dev_err(dev, "%s: failed to populate regulator_init_data\n",
+			__func__);
+		rc = -ENOMEM;
+		goto fail_free_reg;
+	}
+	if (init_data->constraints.name == NULL) {
+		dev_err(dev, "%s: regulator name not specified\n", __func__);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	init_data->constraints.input_uV	= init_data->constraints.max_uV;
+
+	if (of_get_property(node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	/*
+	 * Fill in ops and mode masks based on callbacks specified for
+	 * this type of regulator.
+	 */
+	if (reg->rdesc.ops->enable)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+	if (reg->rdesc.ops->get_voltage)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+	if (reg->rdesc.ops->get_mode) {
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+		init_data->constraints.valid_modes_mask
+			|= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+	}
+
+	reg->rdesc.name		= init_data->constraints.name;
+	reg->min_uV		= init_data->constraints.min_uV;
+	reg->max_uV		= init_data->constraints.max_uV;
+
+	/* Initialize the param array based on optional properties. */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		rc = of_property_read_u32(node, params[i].property_name, &val);
+		if (rc == 0) {
+			if (params[i].supported_regulator_types
+					& BIT(regulator_type)) {
+				if (val < params[i].min
+						|| val > params[i].max) {
+					pr_warn("%s: device tree property: %s=%u is outsided allowed range [%u, %u]\n",
+						reg->rdesc.name,
+						params[i].property_name, val,
+						params[i].min, params[i].max);
+					continue;
+				}
+				reg->req.param[i] = val;
+				reg->req.modified |= BIT(i);
+			} else {
+				pr_warn("%s: regulator type=%d does not support device tree property: %s\n",
+					reg->rdesc.name, regulator_type,
+					params[i].property_name);
+			}
+		}
+	}
+
+	of_property_read_u32(node, "qcom,system-load", &reg->system_load);
+
+	rc = rpm_vreg_configure_pin_control_enable(reg, node);
+	if (rc) {
+		vreg_err(reg, "could not configure pin control enable, rc=%d\n",
+			rc);
+		goto fail_free_reg;
+	}
+
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	if (of_property_read_bool(node, "qcom,send-defaults")) {
+		rc = rpm_vreg_send_defaults(reg);
+		if (rc) {
+			vreg_err(reg, "could not send defaults, rc=%d\n", rc);
+			goto fail_remove_from_list;
+		}
+	}
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.of_node = node;
+	reg_config.driver_data = reg;
+	reg->rdev = regulator_register(&reg->rdesc, &reg_config);
+	if (IS_ERR(reg->rdev)) {
+		rc = PTR_ERR(reg->rdev);
+		reg->rdev = NULL;
+		pr_err("regulator_register failed: %s, rc=%d\n",
+			reg->rdesc.name, rc);
+		goto fail_remove_from_list;
+	}
+
+	platform_set_drvdata(pdev, reg);
+
+	rpm_vreg_create_debugfs(reg);
+
+	pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+	return 0;
+
+fail_remove_from_list:
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&reg->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+	kfree(reg);
+	return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct rpm_vreg *rpm_vreg;
+	const char *type = "";
+	const char *prop;
+	int val = 0;
+	u32 resource_type;
+	int rc;
+
+
+	/* Create new rpm_vreg entry. */
+	rpm_vreg = kzalloc(sizeof(*rpm_vreg), GFP_KERNEL);
+	if (rpm_vreg == NULL)
+		return -ENOMEM;
+
+	/* Required device tree properties: */
+	rc = of_property_read_string(node, "qcom,resource-name",
+			&rpm_vreg->resource_name);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+	resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+	rc = of_property_read_u32(node, "qcom,resource-id",
+			&rpm_vreg->resource_id);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	rc = of_property_read_u32(node, "qcom,regulator-type",
+			&rpm_vreg->regulator_type);
+	if (rc) {
+		dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	if ((rpm_vreg->regulator_type < 0)
+	    || (rpm_vreg->regulator_type >= RPM_REGULATOR_TYPE_MAX)) {
+		dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+			rpm_vreg->regulator_type);
+		rc = -EINVAL;
+		goto fail_free_vreg;
+	}
+
+	if (rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_LDO) {
+		prop = "qcom,regulator-hw-type";
+		rpm_vreg->regulator_hw_type = RPM_REGULATOR_HW_TYPE_UNKNOWN;
+		rc = of_property_read_string(node, prop, &type);
+		if (rc) {
+			dev_err(dev, "%s is missing in DT node rc=%d\n",
+				prop, rc);
+			goto fail_free_vreg;
+		}
+
+		if (!strcmp(type, "pmic4-ldo")) {
+			rpm_vreg->regulator_hw_type
+				= RPM_REGULATOR_HW_TYPE_PMIC4_LDO;
+		} else if (!strcmp(type, "pmic5-ldo")) {
+			rpm_vreg->regulator_hw_type
+				= RPM_REGULATOR_HW_TYPE_PMIC5_LDO;
+		} else {
+			dev_err(dev, "unknown %s = %s\n",
+				prop, type);
+			goto fail_free_vreg;
+		}
+	}
+
+	/* Optional device tree properties: */
+	of_property_read_u32(node, "qcom,allow-atomic", &val);
+	rpm_vreg->allow_atomic = !!val;
+	of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+	of_property_read_u32(node, "qcom,hpm-min-load",
+		&rpm_vreg->hpm_min_load);
+	rpm_vreg->apps_only = of_property_read_bool(node, "qcom,apps-only");
+	rpm_vreg->always_wait_for_ack
+		= of_property_read_bool(node, "qcom,always-wait-for-ack");
+
+	rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_active == NULL
+	    || IS_ERR(rpm_vreg->handle_active)) {
+		rc = PTR_ERR(rpm_vreg->handle_active);
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+				__func__, rc);
+		goto fail_free_vreg;
+	}
+
+	rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+		rc = PTR_ERR(rpm_vreg->handle_sleep);
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+				__func__, rc);
+		goto fail_free_handle_active;
+	}
+
+	INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+	if (rpm_vreg->allow_atomic)
+		spin_lock_init(&rpm_vreg->slock);
+	else
+		mutex_init(&rpm_vreg->mlock);
+
+	platform_set_drvdata(pdev, rpm_vreg);
+
+	rc = of_platform_populate(node, NULL, NULL, dev);
+	if (rc) {
+		dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+			rc);
+		goto fail_unset_drvdata;
+	}
+
+	pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+		resource_type, rpm_vreg->resource_id);
+
+	return rc;
+
+fail_unset_drvdata:
+	platform_set_drvdata(pdev, NULL);
+	msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+	msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+	kfree(rpm_vreg);
+
+	return rc;
+}
+
+static const struct of_device_id rpm_vreg_match_table_device[] = {
+	{ .compatible = "qcom,rpm-smd-regulator", },
+	{}
+};
+
+static const struct of_device_id rpm_vreg_match_table_resource[] = {
+	{ .compatible = "qcom,rpm-smd-regulator-resource", },
+	{}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+	.probe = rpm_vreg_device_probe,
+	.remove = rpm_vreg_device_remove,
+	.driver = {
+		.name = "qcom,rpm-smd-regulator",
+		.of_match_table = rpm_vreg_match_table_device,
+	},
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+	.probe = rpm_vreg_resource_probe,
+	.remove = rpm_vreg_resource_remove,
+	.driver = {
+		.name = "qcom,rpm-smd-regulator-resource",
+		.of_match_table = rpm_vreg_match_table_resource,
+	},
+};
+
+/**
+ * rpm_smd_regulator_driver_init() - initialize the RPM SMD regulator drivers
+ *
+ * This function registers the RPM SMD regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_smd_regulator_driver_init(void)
+{
+	static bool initialized;
+	int i, rc;
+
+	if (initialized)
+		return 0;
+
+	initialized = true;
+
+	/* Store parameter string names as integers */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+		params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+	rc = platform_driver_register(&rpm_vreg_device_driver);
+	if (rc)
+		return rc;
+
+	return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL(rpm_smd_regulator_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+	platform_driver_unregister(&rpm_vreg_device_driver);
+	platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+arch_initcall(rpm_smd_regulator_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM SMD regulator driver");
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index e9ab90c..602af83 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -188,6 +188,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
 	init_completion(&q6v5->stop_done);
 
 	q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
+	if (q6v5->wdog_irq < 0) {
+		if (q6v5->wdog_irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"failed to retrieve wdog IRQ: %d\n",
+				q6v5->wdog_irq);
+		return q6v5->wdog_irq;
+	}
+
 	ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
 					NULL, q6v5_wdog_interrupt,
 					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -198,8 +206,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
 	}
 
 	q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
-	if (q6v5->fatal_irq == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (q6v5->fatal_irq < 0) {
+		if (q6v5->fatal_irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"failed to retrieve fatal IRQ: %d\n",
+				q6v5->fatal_irq);
+		return q6v5->fatal_irq;
+	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
 					NULL, q6v5_fatal_interrupt,
@@ -211,8 +224,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
 	}
 
 	q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
-	if (q6v5->ready_irq == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (q6v5->ready_irq < 0) {
+		if (q6v5->ready_irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"failed to retrieve ready IRQ: %d\n",
+				q6v5->ready_irq);
+		return q6v5->ready_irq;
+	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
 					NULL, q6v5_ready_interrupt,
@@ -224,8 +242,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
 	}
 
 	q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
-	if (q6v5->handover_irq == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (q6v5->handover_irq < 0) {
+		if (q6v5->handover_irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"failed to retrieve handover IRQ: %d\n",
+				q6v5->handover_irq);
+		return q6v5->handover_irq;
+	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
 					NULL, q6v5_handover_interrupt,
@@ -238,8 +261,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
 	disable_irq(q6v5->handover_irq);
 
 	q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
-	if (q6v5->stop_irq == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (q6v5->stop_irq < 0) {
+		if (q6v5->stop_irq != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"failed to retrieve stop-ack IRQ: %d\n",
+				q6v5->stop_irq);
+		return q6v5->stop_irq;
+	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
 					NULL, q6v5_stop_interrupt,
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index d7a4b9e..6a84b63 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -1132,6 +1132,9 @@ static int q6v5_probe(struct platform_device *pdev)
 	if (!desc)
 		return -EINVAL;
 
+	if (desc->need_mem_protection && !qcom_scm_is_available())
+		return -EPROBE_DEFER;
+
 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
 			    desc->hexagon_mba_image, sizeof(*qproc));
 	if (!rproc) {
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index c1b34cd..5fccffc 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -77,4 +77,13 @@
 	select RPMSG
 	select VIRTIO
 
+config MSM_RPM_SMD
+	bool "RPM driver using SMD protocol"
+	help
+	  RPM is the dedicated hardware engine for managing shared SoC
+	  resources. This config adds driver support for using SMD as a
+	  transport layer communication with RPM hardware. It also selects
+	  the MSM_MPM config that programs the MPM module to monitor interrupts
+	  during sleep modes.
+
 endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 1680763..8220b8d 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_RPMSG_CHAR)	+= rpmsg_char.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
+obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_SPSS) += qcom_glink_spss.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_SPI) += qcom_glink_spi.o
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index d859315..c996be2 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1607,9 +1607,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
 static void qcom_glink_rpdev_release(struct device *dev)
 {
 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
-	struct glink_channel *channel = to_glink_channel(rpdev->ept);
 
-	channel->rpdev = NULL;
 	kfree(rpdev);
 }
 
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
index 51265663..4938a2d 100644
--- a/drivers/rpmsg/qcom_glink_smem.c
+++ b/drivers/rpmsg/qcom_glink_smem.c
@@ -47,6 +47,12 @@ struct glink_smem_pipe {
 
 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
 
+static void glink_smem_rx_reset(struct qcom_glink_pipe *np)
+{
+	struct glink_smem_pipe *pipe = to_smem_pipe(np);
+	*pipe->tail = 0;
+}
+
 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
 {
 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
@@ -123,6 +129,12 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
 	*pipe->tail = cpu_to_le32(tail);
 }
 
+static void glink_smem_tx_reset(struct qcom_glink_pipe *np)
+{
+	struct glink_smem_pipe *pipe = to_smem_pipe(np);
+	*pipe->head = 0;
+}
+
 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np)
 {
 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
@@ -282,11 +294,13 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
 		goto err_put_dev;
 	}
 
+	rx_pipe->native.reset = glink_smem_rx_reset;
 	rx_pipe->native.avail = glink_smem_rx_avail;
 	rx_pipe->native.peak = glink_smem_rx_peak;
 	rx_pipe->native.advance = glink_smem_rx_advance;
 	rx_pipe->remote_pid = remote_pid;
 
+	tx_pipe->native.reset = glink_smem_tx_reset;
 	tx_pipe->native.avail = glink_smem_tx_avail;
 	tx_pipe->native.write = glink_smem_tx_write;
 	tx_pipe->remote_pid = remote_pid;
diff --git a/drivers/rpmsg/rpm-smd.c b/drivers/rpmsg/rpm-smd.c
new file mode 100644
index 0000000..5550068
--- /dev/null
+++ b/drivers/rpmsg/rpm-smd.c
@@ -0,0 +1,1645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/rbtree.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/rpm-smd.h>
+#include <linux/rpmsg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_rpm_smd.h>
+
+#define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+#define INV_RSC "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 128
+#define MAX_WAIT_ON_ACK 24
+#define INIT_ERROR 1
+#define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
+#define V0_PROTOCOL_VERSION 0 /* rev0 */
+#define RPM_MSG_TYPE_OFFSET 16
+#define RPM_MSG_TYPE_SIZE 8
+#define RPM_SET_TYPE_OFFSET 28
+#define RPM_SET_TYPE_SIZE 4
+#define RPM_REQ_LEN_OFFSET 0
+#define RPM_REQ_LEN_SIZE 16
+#define RPM_MSG_VERSION_OFFSET 24
+#define RPM_MSG_VERSION_SIZE 8
+#define RPM_MSG_VERSION 1
+#define RPM_MSG_SET_OFFSET 28
+#define RPM_MSG_SET_SIZE 4
+#define RPM_RSC_ID_OFFSET 16
+#define RPM_RSC_ID_SIZE 12
+#define RPM_DATA_LEN_OFFSET 0
+#define RPM_DATA_LEN_SIZE 16
+#define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
+		sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
+#define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
+
+#define for_each_kvp(buf, k) \
+	for (k = (struct kvp *)get_first_kvp(buf); \
+		((void *)k - (void *)get_first_kvp(buf)) < \
+		 get_data_len(buf);\
+		k = get_next_kvp(k))
+
+
+/* Debug Definitions */
+enum {
+	MSM_RPM_LOG_REQUEST_PRETTY	= BIT(0),
+	MSM_RPM_LOG_REQUEST_RAW		= BIT(1),
+	MSM_RPM_LOG_REQUEST_SHOW_MSG_ID	= BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+	debug_mask, msm_rpm_debug_mask, int, 0644
+);
+
+static uint32_t rpm_msg_fmt_ver;
+module_param_named(
+	rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444
+);
+
+struct msm_rpm_driver_data {
+	const char *ch_name;
+	uint32_t ch_type;
+	struct smd_channel *ch_info;
+	struct work_struct work;
+	spinlock_t smd_lock_write;
+	spinlock_t smd_lock_read;
+	struct completion smd_open;
+};
+
+struct qcom_smd_rpm {
+	struct rpmsg_endpoint *rpm_channel;
+	struct device *dev;
+	int irq;
+	struct completion ack;
+	struct mutex lock;
+	int ack_status;
+};
+
+struct qcom_smd_rpm *rpm;
+struct qcom_smd_rpm priv_rpm;
+
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
+static bool standalone;
+static int probe_status = -EPROBE_DEFER;
+static void msm_rpm_process_ack(uint32_t msg_id, int errno);
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+enum {
+	MSM_RPM_MSG_REQUEST_TYPE = 0,
+	MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
+	0x716572, /* 'req\0' */
+};
+
+enum {
+	RPM_V1_REQUEST_SERVICE,
+	RPM_V1_SYSTEMDB_SERVICE,
+	RPM_V1_COMMAND_SERVICE,
+	RPM_V1_ACK_SERVICE,
+	RPM_V1_NACK_SERVICE,
+} msm_rpm_request_service_v2;
+
+struct rpm_v0_hdr {
+	uint32_t service_type;
+	uint32_t request_len;
+};
+
+struct rpm_v1_hdr {
+	uint32_t request_hdr;
+};
+
+struct rpm_message_header_v0 {
+	struct rpm_v0_hdr hdr;
+	uint32_t msg_id;
+	enum msm_rpm_set set;
+	uint32_t resource_type;
+	uint32_t resource_id;
+	uint32_t data_len;
+};
+
+struct rpm_message_header_v1 {
+	struct rpm_v1_hdr hdr;
+	uint32_t msg_id;
+	uint32_t resource_type;
+	uint32_t request_details;
+};
+
+struct msm_rpm_ack_msg_v0 {
+	uint32_t req;
+	uint32_t req_len;
+	uint32_t rsc_id;
+	uint32_t msg_len;
+	uint32_t id_ack;
+};
+
+struct msm_rpm_ack_msg_v1 {
+	uint32_t request_hdr;
+	uint32_t id_ack;
+};
+
+struct kvp {
+	unsigned int k;
+	unsigned int s;
+};
+
+struct msm_rpm_kvp_data {
+	uint32_t key;
+	uint32_t nbytes; /* number of bytes */
+	uint8_t *value;
+	bool valid;
+};
+
+struct slp_buf {
+	struct rb_node node;
+	char ubuf[MAX_SLEEP_BUFFER];
+	char *buf;
+	bool valid;
+};
+
+enum rpm_msg_fmts {
+	RPM_MSG_V0_FMT,
+	RPM_MSG_V1_FMT
+};
+
+static struct rb_root tr_root = RB_ROOT;
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
+		uint32_t size)
+{
+	return (((val) & GENMASK(offset + size - 1, offset))
+		>> offset);
+}
+
+static inline void change_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, int32_t val1)
+{
+	uint32_t member = *val;
+	uint32_t offset_val = get_offset_value(member, offset, size);
+	uint32_t mask = (1 << size) - 1;
+
+	offset_val += val1;
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((offset_val & mask) << offset);
+}
+
+static inline void set_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, uint32_t val1)
+{
+	uint32_t mask = (1 << size) - 1;
+
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((val1 & mask) << offset);
+}
+
+static uint32_t get_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->msg_id;
+
+	return ((struct rpm_message_header_v1 *)buf)->msg_id;
+
+}
+
+static uint32_t get_ack_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
+
+	return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
+
+}
+
+static uint32_t get_rsc_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_type;
+
+	return ((struct rpm_message_header_v1 *)buf)->resource_type;
+
+}
+
+static uint32_t get_set_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->set;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE);
+}
+
+static uint32_t get_data_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->data_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE);
+}
+
+static uint32_t get_rsc_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_id;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE);
+}
+
+static uint32_t get_ack_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static uint32_t get_ack_msg_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE);
+}
+
+static uint32_t get_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static void set_msg_ver(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver) {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, val);
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, 0);
+	}
+}
+
+static void set_req_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
+	else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void change_req_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
+	else {
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void set_msg_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->hdr.service_type =
+			msm_rpm_request_service_v1[val];
+	else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
+	}
+}
+
+static void set_rsc_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_id = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE, val);
+}
+
+static void set_data_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+static void change_data_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len += val;
+	else
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+
+static void set_set_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->set = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE, val);
+}
+
+static void set_msg_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->msg_id = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->msg_id = val;
+
+}
+
+static void set_rsc_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_type = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->resource_type = val;
+}
+
+static inline int get_buf_len(char *buf)
+{
+	return get_req_len(buf) + RPM_HDR_SIZE;
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v0));
+	else
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v1));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+	return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+	return (void *)k + sizeof(*k);
+}
+
+static void delete_kvp(char *buf, struct kvp *d)
+{
+	struct kvp *n;
+	int dec;
+	uint32_t size;
+
+	n = get_next_kvp(d);
+	dec = (void *)n - (void *)d;
+	size = get_data_len(buf) -
+		((void *)n - (void *)get_first_kvp(buf));
+
+	memcpy((void *)d, (void *)n, size);
+
+	change_data_len(buf, -dec);
+	change_req_len(buf, -dec);
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+	memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+	int32_t inc = sizeof(*n) + n->s;
+
+	if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
+		WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
+		return;
+	}
+
+	memcpy(buf + get_buf_len(buf), n, inc);
+
+	change_data_len(buf, inc);
+	change_req_len(buf, inc);
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+	unsigned int type = get_rsc_type(slp);
+	unsigned int id = get_rsc_id(slp);
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(cur->buf);
+		unsigned int cid = get_rsc_id(cur->buf);
+
+		if (type < ctype)
+			node = node->rb_left;
+		else if (type > ctype)
+			node = node->rb_right;
+		else if (id < cid)
+			node = node->rb_left;
+		else if (id > cid)
+			node = node->rb_right;
+		else
+			return cur;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+	unsigned int type = get_rsc_type(slp->buf);
+	unsigned int id = get_rsc_id(slp->buf);
+	struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+	while (*node) {
+		struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(curr->buf);
+		unsigned int cid = get_rsc_id(curr->buf);
+
+		parent = *node;
+
+		if (type < ctype)
+			node = &((*node)->rb_left);
+		else if (type > ctype)
+			node = &((*node)->rb_right);
+		else if (id < cid)
+			node = &((*node)->rb_left);
+		else if (id > cid)
+			node = &((*node)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&slp->node, parent, node);
+	rb_insert_color(&slp->node, root);
+	slp->valid = true;
+	return 0;
+}
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+	struct kvp *e, *n;
+
+	for_each_kvp(buf, n) {
+		bool found = false;
+
+		for_each_kvp(s->buf, e) {
+			if (n->k == e->k) {
+				found = true;
+				if (n->s == e->s) {
+					void *e_data = get_data(e);
+					void *n_data = get_data(n);
+
+					if (memcmp(e_data, n_data, n->s)) {
+						update_kvp_data(e, n);
+						s->valid = true;
+					}
+				} else {
+					delete_kvp(s->buf, e);
+					add_kvp(s->buf, n);
+					s->valid = true;
+				}
+				break;
+			}
+
+		}
+		if (!found) {
+			add_kvp(s->buf, n);
+			s->valid = true;
+		}
+	}
+}
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+struct msm_rpm_request {
+	uint8_t *client_buf;
+	struct msm_rpm_kvp_data *kvp;
+	uint32_t num_elements;
+	uint32_t write_idx;
+	uint8_t *buf;
+	uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgment
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+	struct list_head list;
+	uint32_t msg_id;
+	bool ack_recd;
+	int errno;
+	struct completion ack;
+	bool delete_on_ack;
+};
+
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+LIST_HEAD(msm_rpm_ack_list);
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+	return get_ack_msg_id(buf);
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+	uint8_t *tmp;
+	uint32_t req_len = get_ack_req_len(buf);
+	uint32_t msg_type = get_ack_msg_type(buf);
+	int rc = -ENODEV;
+	uint32_t err;
+	uint32_t ack_msg_size = rpm_msg_fmt_ver ?
+			sizeof(struct msm_rpm_ack_msg_v1) :
+			sizeof(struct msm_rpm_ack_msg_v0);
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
+			msg_type == RPM_V1_ACK_SERVICE) {
+		return 0;
+	} else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
+		err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
+		return err;
+	}
+
+	req_len -= ack_msg_size;
+	req_len += 2 * sizeof(uint32_t);
+	if (!req_len)
+		return 0;
+
+	pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
+				__func__, req_len, get_ack_msg_id(buf));
+
+	tmp = buf + ack_msg_size;
+
+	if (memcmp(tmp, ERR, sizeof(uint32_t))) {
+		pr_err("%s rpm returned error\n", __func__);
+		WARN_ON(1);
+	}
+
+	tmp += 2 * sizeof(uint32_t);
+
+	if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
+						sizeof(INV_RSC))-1))) {
+		pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
+		rc = -EINVAL;
+	} else {
+		pr_err("%s(): RPM NACK Invalid header\n", __func__);
+	}
+
+	return rc;
+}
+
+int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
+		uint32_t size, gfp_t flag)
+{
+	struct slp_buf *slp;
+	static DEFINE_SPINLOCK(slp_buffer_lock);
+	unsigned long flags;
+	char *buf;
+
+	buf = cdata->buf;
+
+	if (size > MAX_SLEEP_BUFFER)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&slp_buffer_lock, flags);
+	slp = tr_search(&tr_root, buf);
+
+	if (!slp) {
+		slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+		if (!slp) {
+			spin_unlock_irqrestore(&slp_buffer_lock, flags);
+			return -ENOMEM;
+		}
+		slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+		memcpy(slp->buf, buf, size);
+		if (tr_insert(&tr_root, slp)) {
+			pr_err("Error updating sleep request\n");
+			kfree(slp);
+			return -EINVAL;
+		}
+	} else {
+		/* handle unsent requests */
+		tr_update(slp, buf);
+	}
+	trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
+			get_rsc_type(cdata->client_buf),
+			get_req_len(cdata->client_buf));
+
+	spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+	return 0;
+}
+
+static struct msm_rpm_driver_data msm_rpm_data = {
+	.smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
+};
+
+static int msm_rpm_flush_requests(bool print)
+{
+	struct rb_node *t;
+	int ret;
+	int count = 0;
+
+	for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+		struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+		unsigned int type = get_rsc_type(s->buf);
+		unsigned int id = get_rsc_id(s->buf);
+
+		if (!s->valid)
+			continue;
+
+		set_msg_id(s->buf, msm_rpm_get_next_msg_id());
+
+		ret = msm_rpm_send_smd_buffer(s->buf,
+					get_buf_len(s->buf));
+		WARN_ON(ret != 0);
+		trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
+
+		s->valid = false;
+		count++;
+
+		/*
+		 * RPM acks need to be handled here if we have sent 24
+		 * messages such that we do not overrun SMD buffer. Since
+		 * we expect only sleep sets at this point (RPM PC would be
+		 * disallowed if we had pending active requests), we need not
+		 * process these sleep set acks.
+		 */
+		if (count >= MAX_WAIT_ON_ACK) {
+			pr_err("Error: more than %d requests are buffered\n",
+							MAX_WAIT_ON_ACK);
+			return -ENOSPC;
+		}
+	}
+	return 0;
+}
+
+static void msm_rpm_notify_sleep_chain(char *buf,
+		struct msm_rpm_kvp_data *kvp)
+{
+	struct msm_rpm_notifier_data notif;
+
+	notif.rsc_type = get_rsc_type(buf);
+	notif.rsc_id = get_req_len(buf);
+	notif.key = kvp->key;
+	notif.size = kvp->nbytes;
+	notif.value = kvp->value;
+	atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	uint32_t i;
+	uint32_t data_size, msg_size;
+
+	if (probe_status)
+		return probe_status;
+
+	if (!handle || !data) {
+		pr_err("%s(): Invalid handle/data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (size < 0)
+		return  -EINVAL;
+
+	data_size = ALIGN(size, SZ_4);
+	msg_size = data_size + 8;
+
+	for (i = 0; i < handle->write_idx; i++) {
+		if (handle->kvp[i].key != key)
+			continue;
+		if (handle->kvp[i].nbytes != data_size) {
+			kfree(handle->kvp[i].value);
+			handle->kvp[i].value = NULL;
+		} else {
+			if (!memcmp(handle->kvp[i].value, data, data_size))
+				return 0;
+		}
+		break;
+	}
+
+	if (i >= handle->num_elements) {
+		pr_err("Number of resources exceeds max allocated\n");
+		return -ENOMEM;
+	}
+
+	if (i == handle->write_idx)
+		handle->write_idx++;
+
+	if (!handle->kvp[i].value) {
+		handle->kvp[i].value = kzalloc(data_size, GFP_NOIO);
+
+		if (!handle->kvp[i].value)
+			return -ENOMEM;
+	} else {
+		/*
+		 * We enter the else case, if a key already exists but the
+		 * data doesn't match. In which case, we should zero the data
+		 * out.
+		 */
+		memset(handle->kvp[i].value, 0, data_size);
+	}
+
+	if (!handle->kvp[i].valid)
+		change_data_len(handle->client_buf, msg_size);
+	else
+		change_data_len(handle->client_buf,
+			(data_size - handle->kvp[i].nbytes));
+
+	handle->kvp[i].nbytes = data_size;
+	handle->kvp[i].key = key;
+	memcpy(handle->kvp[i].value, data, size);
+	handle->kvp[i].valid = true;
+
+	return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+		enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+		int num_elements)
+{
+	struct msm_rpm_request *cdata;
+	uint32_t buf_size;
+
+	if (probe_status)
+		return ERR_PTR(probe_status);
+
+	cdata = kzalloc(sizeof(struct msm_rpm_request), GFP_NOIO);
+
+	if (!cdata)
+		goto cdata_alloc_fail;
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		buf_size = sizeof(struct rpm_message_header_v0);
+	else
+		buf_size = sizeof(struct rpm_message_header_v1);
+
+	cdata->client_buf = kzalloc(buf_size, GFP_NOIO);
+
+	if (!cdata->client_buf)
+		goto client_buf_alloc_fail;
+
+	set_set_type(cdata->client_buf, set);
+	set_rsc_type(cdata->client_buf, rsc_type);
+	set_rsc_id(cdata->client_buf, rsc_id);
+
+	cdata->num_elements = num_elements;
+	cdata->write_idx = 0;
+
+	cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data),
+				GFP_NOIO);
+
+	if (!cdata->kvp) {
+		pr_warn("%s(): Cannot allocate memory for key value data\n",
+				__func__);
+		goto kvp_alloc_fail;
+	}
+
+	cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_NOIO);
+
+	if (!cdata->buf)
+		goto buf_alloc_fail;
+
+	cdata->numbytes = DEFAULT_BUFFER_SIZE;
+	return cdata;
+
+buf_alloc_fail:
+	kfree(cdata->kvp);
+kvp_alloc_fail:
+	kfree(cdata->client_buf);
+client_buf_alloc_fail:
+	kfree(cdata);
+cdata_alloc_fail:
+	return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+	int i;
+
+	if (!handle)
+		return;
+	for (i = 0; i < handle->num_elements; i++)
+		kfree(handle->kvp[i].value);
+	kfree(handle->kvp);
+	kfree(handle->client_buf);
+	kfree(handle->buf);
+	kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+			num_elements);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+bool msm_rpm_waiting_for_ack(void)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	ret = list_empty(&msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return !ret;
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+	struct list_head *ptr;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each(ptr, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem && (elem->msg_id == msg_id))
+			break;
+		elem = NULL;
+	}
+
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	return elem;
+}
+
+static uint32_t msm_rpm_get_next_msg_id(void)
+{
+	uint32_t id;
+
+	/*
+	 * A message id of 0 is used by the driver to indicate a error
+	 * condition. The RPM driver uses a id of 1 to indicate unsent data
+	 * when the data sent over hasn't been modified. This isn't a error
+	 * scenario and wait for ack returns a success when the message id is 1.
+	 */
+
+	do {
+		id = atomic_inc_return(&msm_rpm_msg_id);
+	} while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
+
+	return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
+{
+	unsigned long flags;
+	struct msm_rpm_wait_data *data =
+		kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+	if (!data)
+		return -ENOMEM;
+
+	init_completion(&data->ack);
+	data->ack_recd = false;
+	data->msg_id = msg_id;
+	data->errno = INIT_ERROR;
+	data->delete_on_ack = delete_on_ack;
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	if (delete_on_ack)
+		list_add_tail(&data->list, &msm_rpm_wait_list);
+	else
+		list_add(&data->list, &msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	list_del(&elem->list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+	struct list_head *ptr, *next;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem->msg_id == msg_id) {
+			elem->errno = errno;
+			elem->ack_recd = true;
+			complete(&elem->ack);
+			if (elem->delete_on_ack) {
+				list_del(&elem->list);
+				kfree(elem);
+			}
+			break;
+		}
+	}
+	/*
+	 * Special case where the sleep driver doesn't
+	 * wait for ACKs. This would decrease the latency involved with
+	 * entering RPM assisted power collapse.
+	 */
+
+	if (!elem)
+		trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
+
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+	uint32_t id;
+	uint32_t len;
+	uint32_t val;
+};
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	char name[5];
+	u32 value;
+	uint32_t i;
+	int j, prev_valid;
+	int valid_count = 0;
+	int pos = 0;
+	uint32_t res_type, rsc_id;
+
+	name[4] = 0;
+
+	for (i = 0; i < cdata->write_idx; i++)
+		if (cdata->kvp[i].valid)
+			valid_count++;
+
+	pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+	if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+		pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+				get_msg_id(cdata->client_buf));
+	pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+		(get_set_type(cdata->client_buf) ==
+				MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+	res_type = get_rsc_type(cdata->client_buf);
+	rsc_id = get_rsc_id(cdata->client_buf);
+	if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+	    && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+		/* Both pretty and raw formatting */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X (%s), rsc_id=%u; ",
+			res_type, name, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X (%s), value=%s",
+					cdata->kvp[i].key, name,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j++)
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X ",
+						cdata->kvp[i].value[j]);
+
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, "(");
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, ")");
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+		/* Pretty formatting only */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+			rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+				name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else {
+		/* Raw formatting only */
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X, value=%s",
+					cdata->kvp[i].key,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+			for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X",
+						cdata->kvp[i].value[j]);
+				if (j + 1 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+							buflen - pos, " ");
+			}
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	pr_info("request info %s\n", buf);
+}
+
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	ret = rpmsg_send(rpm->rpm_channel, buf, size);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+	return ret;
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+		int msg_type, bool noack)
+{
+	uint8_t *tmpbuff;
+	int ret;
+	uint32_t i;
+	uint32_t msg_size;
+	int msg_hdr_sz, req_hdr_sz;
+	uint32_t data_len = get_data_len(cdata->client_buf);
+	uint32_t set = get_set_type(cdata->client_buf);
+	uint32_t msg_id;
+
+	if (probe_status) {
+		pr_err("probe failed\n");
+		return probe_status;
+	}
+	if (!data_len) {
+		pr_err("no data len\n");
+		return 1;
+	}
+
+	msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
+			sizeof(struct rpm_message_header_v0);
+
+	req_hdr_sz = RPM_HDR_SIZE;
+	set_msg_type(cdata->client_buf, msg_type);
+
+	set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
+	msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
+
+	/* populate data_len */
+	if (msg_size > cdata->numbytes) {
+		kfree(cdata->buf);
+		cdata->numbytes = msg_size;
+		cdata->buf = kzalloc(msg_size, GFP_NOIO);
+	}
+
+	if (!cdata->buf) {
+		pr_err("Failed malloc\n");
+		return 0;
+	}
+
+	tmpbuff = cdata->buf;
+
+	tmpbuff += msg_hdr_sz;
+	for (i = 0; (i < cdata->write_idx); i++) {
+		/* Sanity check */
+		WARN_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+		if (!cdata->kvp[i].valid)
+			continue;
+
+		memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+		tmpbuff += cdata->kvp[i].nbytes;
+
+		if (set == MSM_RPM_CTX_SLEEP_SET)
+			msm_rpm_notify_sleep_chain(cdata->client_buf,
+					&cdata->kvp[i]);
+
+	}
+
+	memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
+	if ((set == MSM_RPM_CTX_SLEEP_SET) &&
+		!msm_rpm_smd_buffer_request(cdata, msg_size, GFP_NOIO)) {
+		return 1;
+	}
+
+	msg_id = msm_rpm_get_next_msg_id();
+	/* Set the version bit for new protocol */
+	set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
+	set_msg_id(cdata->buf, msg_id);
+	set_msg_id(cdata->client_buf, msg_id);
+
+	if (msm_rpm_debug_mask
+	    & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+		msm_rpm_log_request(cdata);
+
+	if (standalone) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		return ret;
+	}
+
+	msm_rpm_add_wait_list(msg_id, noack);
+
+	ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size);
+
+	if (!ret) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		trace_rpm_smd_send_active_set(msg_id,
+			get_rsc_type(cdata->client_buf),
+			get_rsc_id(cdata->client_buf));
+	} else if (ret < 0) {
+		struct msm_rpm_wait_data *rc;
+
+		ret = 0;
+		pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
+				msg_size, ret, msg_id);
+		rc = msm_rpm_get_entry_from_msg_id(msg_id);
+		if (rc)
+			msm_rpm_free_list_entry(rc);
+	}
+	return ret;
+}
+
+static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
+{
+	int ret;
+	static DEFINE_MUTEX(send_mtx);
+
+	mutex_lock(&send_mtx);
+	ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, noack);
+	mutex_unlock(&send_mtx);
+
+	return ret;
+}
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return _msm_rpm_send_request(handle, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return _msm_rpm_send_request(handle, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	int ret;
+
+	ret = _msm_rpm_send_request(handle, true);
+
+	return ret < 0 ? ERR_PTR(ret) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noack);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	struct msm_rpm_wait_data *elem;
+	int rc = 0;
+
+	if (!msg_id) {
+		pr_err("Invalid msg id\n");
+		return -ENOMEM;
+	}
+
+	if (msg_id == 1)
+		return rc;
+
+	if (standalone)
+		return rc;
+
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+	if (!elem)
+		return rc;
+
+	wait_for_completion(&elem->ack);
+	trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
+
+	rc = elem->errno;
+	msm_rpm_free_list_entry(elem);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	return msm_rpm_wait_for_ack(msg_id);
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return req;
+
+	if (!req)
+		return ERR_PTR(ENOMEM);
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = PTR_ERR(msm_rpm_send_request_noack(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc < 0 ? ERR_PTR(rc) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noack);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+			uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+
+	return msm_rpm_send_message(set, rsc_type, rsc_id, kvp, nelems);
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+static int smd_mask_receive_interrupt(bool mask,
+		const struct cpumask *cpumask)
+{
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+
+	irq_data = irq_get_irq_data(rpm->irq);
+	if (!irq_data)
+		return -ENODEV;
+
+	irq_chip = irq_data->chip;
+	if (!irq_chip)
+		return -ENODEV;
+
+	if (mask) {
+		irq_chip->irq_mask(irq_data);
+		if (cpumask)
+			irq_set_affinity(rpm->irq, cpumask);
+	} else {
+		irq_chip->irq_unmask(irq_data);
+	}
+
+	return 0;
+}
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
+{
+	int ret = 0;
+
+	if (standalone)
+		return 0;
+
+
+	ret = smd_mask_receive_interrupt(true, cpumask);
+	if (!ret) {
+		ret = msm_rpm_flush_requests(print);
+		if (ret)
+			smd_mask_receive_interrupt(false, NULL);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+
+	if (standalone)
+		return;
+
+	smd_mask_receive_interrupt(false, NULL);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, void *ptr,
+				int size, void *priv, u32 addr)
+{
+	uint32_t msg_id;
+	int errno;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+	struct msm_rpm_wait_data *elem;
+	static DEFINE_SPINLOCK(rx_notify_lock);
+	unsigned long flags;
+
+	if (!size)
+		return -EINVAL;
+
+	WARN_ON(size > MAX_ERR_BUFFER_SIZE);
+
+	spin_lock_irqsave(&rx_notify_lock, flags);
+	memcpy(buf, ptr, size);
+	msg_id = msm_rpm_get_msg_id_from_ack(buf);
+	errno = msm_rpm_get_error_from_ack(buf);
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+	/*
+	 * It is applicable for sleep set requests
+	 * Sleep set requests are not added to the
+	 * wait queue list. Without this check we
+	 * run into NULL pointer deferrence issue.
+	 */
+	if (!elem) {
+		spin_unlock_irqrestore(&rx_notify_lock, flags);
+		return 0;
+	}
+
+	msm_rpm_process_ack(msg_id, errno);
+	spin_unlock_irqrestore(&rx_notify_lock, flags);
+
+	return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+	char *key = NULL;
+	struct device_node *p;
+	int ret = 0;
+	int irq;
+	void __iomem *reg_base;
+	uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
+
+	p = of_find_compatible_node(NULL, NULL, "qcom,rpm-smd");
+	if (!p) {
+		pr_err("Unable to find rpm-smd\n");
+		probe_status = -ENODEV;
+		goto fail;
+	}
+
+	key = "rpm-standalone";
+	standalone = of_property_read_bool(p, key);
+	if (standalone) {
+		probe_status = ret;
+		goto skip_init;
+	}
+
+	reg_base = of_iomap(p, 0);
+	if (reg_base) {
+		version = readq_relaxed(reg_base);
+		iounmap(reg_base);
+	}
+
+	if (version == V1_PROTOCOL_VERSION)
+		rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
+
+	pr_info("RPM-SMD running version %d\n", rpm_msg_fmt_ver);
+
+	irq = of_irq_get(p, 0);
+	if (!irq) {
+		pr_err("Unable to get rpm-smd interrupt number\n");
+		probe_status = -ENODEV;
+		goto fail;
+	}
+
+	rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm) {
+		probe_status = -ENOMEM;
+		goto fail;
+	}
+
+	rpm->dev = &rpdev->dev;
+	rpm->rpm_channel = rpdev->ept;
+	dev_set_drvdata(&rpdev->dev, rpm);
+	priv_rpm = *rpm;
+	rpm->irq = irq;
+
+	mutex_init(&rpm->lock);
+	init_completion(&rpm->ack);
+	spin_lock_init(&msm_rpm_data.smd_lock_write);
+	spin_lock_init(&msm_rpm_data.smd_lock_read);
+
+skip_init:
+	probe_status = of_platform_populate(p, NULL, NULL, &rpdev->dev);
+
+	if (standalone)
+		pr_info("RPM running in standalone mode\n");
+fail:
+	return probe_status;
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+	of_platform_depopulate(&rpdev->dev);
+}
+
+static struct rpmsg_device_id rpmsg_driver_rpm_id_table[] = {
+	{ .name	= "rpm_requests" },
+	{ },
+};
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+	.probe = qcom_smd_rpm_probe,
+	.remove = qcom_smd_rpm_remove,
+	.callback = qcom_smd_rpm_callback,
+	.id_table = rpmsg_driver_rpm_id_table,
+	.drv  = {
+		.name  = "qcom_rpm_smd",
+		.owner = THIS_MODULE,
+	},
+};
+
+int __init msm_rpm_driver_init(void)
+{
+	unsigned int ret = 0;
+
+	ret = register_rpmsg_driver(&qcom_smd_rpm_driver);
+	if (ret)
+		pr_err("register_rpmsg_driver: failed with err %d\n", ret);
+
+	return ret;
+}
+postcore_initcall_sync(msm_rpm_driver_init);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index c04a1ed..c370268 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -169,7 +169,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
 	buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
 
 	ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
-				tmp, sizeof(tmp));
+				tmp, 2);
+	if (ret)
+		return ret;
+
+	ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
+				buf, sizeof(tmp) - 2);
 	if (ret)
 		return ret;
 
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index b2483a7..3cf011e 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -273,6 +273,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
 	if (!data)
 		return -ENOMEM;
 
+	data->rtc = devm_rtc_allocate_device(&pdev->dev);
+	if (IS_ERR(data->rtc))
+		return PTR_ERR(data->rtc);
+
 	data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
 
 	if (IS_ERR(data->regmap)) {
@@ -335,10 +339,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
 		goto error_rtc_device_register;
 	}
 
-	data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-					&snvs_rtc_ops, THIS_MODULE);
-	if (IS_ERR(data->rtc)) {
-		ret = PTR_ERR(data->rtc);
+	data->rtc->ops = &snvs_rtc_ops;
+	ret = rtc_register_device(data->rtc);
+	if (ret) {
 		dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
 		goto error_rtc_device_register;
 	}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 93b2862..674d848 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
 		goto error;
 	}
 	/* Check for trailing stuff. */
-	if (i == num_devices && strlen(buf) > 0) {
+	if (i == num_devices && buf && strlen(buf) > 0) {
 		rc = -EINVAL;
 		goto error;
 	}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea5029..df09ed5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
 
 int sch_is_pseudo_sch(struct subchannel *sch)
 {
+	if (!sch->dev.parent)
+		return 0;
 	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
 }
 
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a57b969..3be5465 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -777,6 +777,8 @@ static int ap_device_probe(struct device *dev)
 		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
 		if (!!devres != !!drvres)
 			return -ENODEV;
+		/* (re-)init queue's state machine */
+		ap_queue_reinit_state(to_ap_queue(dev));
 	}
 
 	/* Add queue/card to list of active queues/cards */
@@ -809,6 +811,8 @@ static int ap_device_remove(struct device *dev)
 	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = ap_dev->drv;
 
+	if (is_queue_dev(dev))
+		ap_queue_remove(to_ap_queue(dev));
 	if (ap_drv->remove)
 		ap_drv->remove(ap_dev);
 
@@ -1446,10 +1450,6 @@ static void ap_scan_bus(struct work_struct *unused)
 			aq->ap_dev.device.parent = &ac->ap_dev.device;
 			dev_set_name(&aq->ap_dev.device,
 				     "%02x.%04x", id, dom);
-			/* Start with a device reset */
-			spin_lock_bh(&aq->lock);
-			ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
-			spin_unlock_bh(&aq->lock);
 			/* Register device */
 			rc = device_register(&aq->ap_dev.device);
 			if (rc) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 5246cd8..7e85d23 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -253,6 +253,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
+void ap_queue_reinit_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
 			       int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 66f7334..0aa4b3c 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
 {
 	ap_flush_queue(aq);
 	del_timer_sync(&aq->timeout);
+
+	/* reset with zero, also clears irq registration */
+	spin_lock_bh(&aq->lock);
+	ap_zapq(aq->qid);
+	aq->state = AP_STATE_BORKED;
+	spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_remove);
+
+void ap_queue_reinit_state(struct ap_queue *aq)
+{
+	spin_lock_bh(&aq->lock);
+	aq->state = AP_STATE_RESET_START;
+	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index f4ae5fa..ff17a00 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -198,7 +198,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
 	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 	struct zcrypt_queue *zq = aq->private;
 
-	ap_queue_remove(aq);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 35d58db..2a42e59 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -273,7 +273,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
 	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 	struct zcrypt_queue *zq = aq->private;
 
-	ap_queue_remove(aq);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 94d9f72..baa683c 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -276,7 +276,6 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
 	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 	struct zcrypt_queue *zq = aq->private;
 
-	ap_queue_remove(aq);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 3c86e27f..df888506 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,6 +21,11 @@
 
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
 		break;
 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-		dev_warn(&adapter->ccw_device->dev,
-			 "The error threshold for checksum statistics "
-			 "has been exceeded\n");
 		zfcp_dbf_hba_bit_err("fssrh_3", req);
+		if (ber_stop) {
+			dev_warn(&adapter->ccw_device->dev,
+				 "All paths over this FCP device are disused because of excessive bit errors\n");
+			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+		} else {
+			dev_warn(&adapter->ccw_device->dev,
+				 "The error threshold for checksum statistics has been exceeded\n");
+		}
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -1594,6 +1604,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
+	unsigned long req_id = 0;
 	int retval = -EIO;
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1616,6 +1627,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
 	req->data = wka_port;
 
+	req_id = req->req_id;
+
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	if (retval)
@@ -1623,7 +1636,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	if (!retval)
-		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
 	return retval;
 }
 
@@ -1649,6 +1662,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
+	unsigned long req_id = 0;
 	int retval = -EIO;
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1671,6 +1685,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 	req->data = wka_port;
 	req->qtcb->header.port_handle = wka_port->handle;
 
+	req_id = req->req_id;
+
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	if (retval)
@@ -1678,7 +1694,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	if (!retval)
-		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
 	return retval;
 }
 
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ec54538..67efdf2 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -132,6 +132,7 @@ struct airq_info {
 	struct airq_iv *aiv;
 };
 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static DEFINE_MUTEX(airq_areas_lock);
 
 #define CCW_CMD_SET_VQ 0x13
 #define CCW_CMD_VDEV_RESET 0x33
@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
 	unsigned long bit, flags;
 
 	for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+		mutex_lock(&airq_areas_lock);
 		if (!airq_areas[i])
 			airq_areas[i] = new_airq_info();
 		info = airq_areas[i];
+		mutex_unlock(&airq_areas_lock);
 		if (!info)
 			return 0;
 		write_lock_irqsave(&info->lock, flags);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 1c5051b..9e28792 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file)
 	scsi_changer *ch = file->private_data;
 
 	scsi_device_put(ch->device);
-	ch->device = NULL;
 	file->private_data = NULL;
 	kref_put(&ch->ref, ch_destroy);
 	return 0;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index d27fabae..6c629ef 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
 	spin_unlock(&ctlr->ms_lock);
 
  retry:
+	memset(cdb, 0, sizeof(cdb));
+
 	data_size = rdac_failover_get(ctlr, &list, cdb);
 
 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c43eccd..f570b8c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
 		switch (c2->error_data.status) {
 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			if (cmd)
+				cmd->result = 0;
 			break;
 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
 			cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
 	/* check for good status */
 	if (likely(c2->error_data.serv_response == 0 &&
-			c2->error_data.status == 0))
+			c2->error_data.status == 0)) {
+		cmd->result = 0;
 		return hpsa_cmd_free_and_done(h, c, cmd);
+	}
 
 	/*
 	 * Any RAID offload error results in retry which will use
@@ -5618,6 +5622,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 	c = cmd_tagged_alloc(h, cmd);
 
 	/*
+	 * This is necessary because the SML doesn't zero out this field during
+	 * error recovery.
+	 */
+	cmd->result = 0;
+
+	/*
 	 * Call alternate submit routine for I/O accelerated commands.
 	 * Retries always go down the normal I/O path.
 	 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8c71541..a84878f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4189,11 +4189,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		 */
 		if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
 		    pdev->subsystem_device == 0xC000)
-		   	return -ENODEV;
+			goto out_disable_device;
 		/* Now check the magic signature byte */
 		pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
 		if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-			return -ENODEV;
+			goto out_disable_device;
 		/* Ok it is probably a megaraid */
 	}
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 806ceab..bc37666f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5218,7 +5218,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 {
 	u32 max_sectors_1;
 	u32 max_sectors_2, tmp_sectors, msix_enable;
-	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg;
 	resource_size_t base_addr;
 	struct megasas_register_set __iomem *reg_set;
 	struct megasas_ctrl_info *ctrl_info = NULL;
@@ -5226,6 +5226,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	int i, j, loop, fw_msix_count = 0;
 	struct IOV_111 *iovPtr;
 	struct fusion_context *fusion;
+	bool do_adp_reset = true;
 
 	fusion = instance->ctrl_context;
 
@@ -5274,19 +5275,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	}
 
 	if (megasas_transition_to_ready(instance, 0)) {
-		atomic_set(&instance->fw_reset_no_pci_access, 1);
-		instance->instancet->adp_reset
-			(instance, instance->reg_set);
-		atomic_set(&instance->fw_reset_no_pci_access, 0);
-		dev_info(&instance->pdev->dev,
-			"FW restarted successfully from %s!\n",
-			__func__);
+		if (instance->adapter_type >= INVADER_SERIES) {
+			status_reg = instance->instancet->read_fw_status_reg(
+					instance->reg_set);
+			do_adp_reset = status_reg & MFI_RESET_ADAPTER;
+		}
 
-		/*waitting for about 30 second before retry*/
-		ssleep(30);
+		if (do_adp_reset) {
+			atomic_set(&instance->fw_reset_no_pci_access, 1);
+			instance->instancet->adp_reset
+				(instance, instance->reg_set);
+			atomic_set(&instance->fw_reset_no_pci_access, 0);
+			dev_info(&instance->pdev->dev,
+				 "FW restarted successfully from %s!\n",
+				 __func__);
 
-		if (megasas_transition_to_ready(instance, 0))
+			/*waiting for about 30 second before retry*/
+			ssleep(30);
+
+			if (megasas_transition_to_ready(instance, 0))
+				goto fail_ready_state;
+		} else {
 			goto fail_ready_state;
+		}
 	}
 
 	megasas_init_ctrl_params(instance);
@@ -5325,12 +5336,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
 				instance->msix_vectors = (scratch_pad_2
 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
 				fw_msix_count = instance->msix_vectors;
-			} else { /* Invader series supports more than 8 MSI-x vectors*/
+			} else {
 				instance->msix_vectors = ((scratch_pad_2
 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
-				if (instance->msix_vectors > 16)
-					instance->msix_combined = true;
+
+				/*
+				 * For Invader series, > 8 MSI-x vectors
+				 * supported by FW/HW implies combined
+				 * reply queue mode is enabled.
+				 * For Ventura series, > 16 MSI-x vectors
+				 * supported by FW/HW implies combined
+				 * reply queue mode is enabled.
+				 */
+				switch (instance->adapter_type) {
+				case INVADER_SERIES:
+					if (instance->msix_vectors > 8)
+						instance->msix_combined = true;
+					break;
+				case VENTURA_SERIES:
+					if (instance->msix_vectors > 16)
+						instance->msix_combined = true;
+					break;
+				}
 
 				if (rdpq_enable)
 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
@@ -6028,13 +6056,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
  * @instance:		Adapter soft state
  * Description:
  *
- * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ * For Ventura, driver/FW will operate in 63bit DMA addresses.
  *
  * For invader-
  *	By default, driver/FW will operate in 32bit DMA addresses
  *	for consistent DMA mapping but if 32 bit consistent
- *	DMA mask fails, driver will try with 64 bit consistent
- *	mask provided FW is true 64bit DMA capable
+ *	DMA mask fails, driver will try with 63 bit consistent
+ *	mask provided FW is true 63bit DMA capable
  *
  * For older controllers(Thunderbolt and MFI based adapters)-
  *	driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6047,15 +6075,15 @@ megasas_set_dma_mask(struct megasas_instance *instance)
 	u32 scratch_pad_2;
 
 	pdev = instance->pdev;
-	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
-				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
+				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
 
 	if (IS_DMA64) {
-		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
 			goto fail_set_dma_mask;
 
-		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
 			/*
@@ -6068,7 +6096,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
 			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
 				goto fail_set_dma_mask;
 			else if (dma_set_mask_and_coherent(&pdev->dev,
-							   DMA_BIT_MASK(64)))
+							   DMA_BIT_MASK(63)))
 				goto fail_set_dma_mask;
 		}
 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6080,8 +6108,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
 		instance->consistent_mask_64bit = true;
 
 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
-		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
-		 (instance->consistent_mask_64bit ? "64" : "32"));
+		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+		 (instance->consistent_mask_64bit ? "63" : "32"));
 
 	return 0;
 
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f8f4d3ea..15d493f3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2191,6 +2191,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
 	    vha->gnl.ldma);
 
+	vha->gnl.l = NULL;
+
 	vfree(vha->scan.l);
 
 	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 1f1a05a..34ff4bbc 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3360,15 +3360,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla24xx_async_gpsc_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x205e,
 	    "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
 	    sp->name, fcport->port_name, sp->handle,
 	    fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -3729,13 +3729,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_gpnid_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0x2067,
+	    "Async-%s hdl=%x ID %3phC.\n", sp->name,
+	    sp->handle, ct_req->req.port_id.port_id);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
 		goto done_free_sp;
 
-	ql_dbg(ql_dbg_disc, vha, 0x2067,
-	    "Async-%s hdl=%x ID %3phC.\n", sp->name,
-	    sp->handle, ct_req->req.port_id.port_id);
 	return rval;
 
 done_free_sp:
@@ -4044,6 +4045,41 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
 	}
 }
 
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+	struct qla_work_evt *e;
+
+	if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+		return QLA_PARAMETER_ERROR;
+
+	e = qla2x00_alloc_work(vha, cmd);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.iosb.sp = sp;
+
+	return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+	struct qla_work_evt *e;
+
+	if (cmd != QLA_EVT_GPNFT)
+		return QLA_PARAMETER_ERROR;
+
+	e = qla2x00_alloc_work(vha, cmd);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+	e->u.gpnft.sp = sp;
+
+	return qla2x00_post_work(vha, e);
+}
+
 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
 	struct srb *sp)
 {
@@ -4144,22 +4180,36 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 {
 	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
-	struct qla_work_evt *e;
 	struct ct_sns_req *ct_req =
 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
 	u16 cmd = be16_to_cpu(ct_req->command);
 	u8 fc4_type = sp->gen2;
 	unsigned long flags;
+	int rc;
 
 	/* gen2 field is holding the fc4type */
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async done-%s res %x FC4Type %x\n",
 	    sp->name, res, sp->gen2);
 
+	sp->rc = res;
 	if (res) {
 		unsigned long flags;
+		const char *name = sp->name;
 
-		sp->free(sp);
+		/*
+		 * We are in an Interrupt context, queue up this
+		 * sp for GNNFT_DONE work. This will allow all
+		 * the resource to get freed up.
+		 */
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GNNFT_DONE);
+		if (rc) {
+			/* Cleanup here to prevent memory leak */
+			qla24xx_sp_unmap(vha, sp);
+			sp->free(sp);
+		}
+
 		spin_lock_irqsave(&vha->work_lock, flags);
 		vha->scan.scan_flags &= ~SF_SCANNING;
 		vha->scan.scan_retry++;
@@ -4170,9 +4220,9 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			qla2xxx_wake_dpc(vha);
 		} else {
-			ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-			    "Async done-%s rescan failed on all retries\n",
-			    sp->name);
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+			    "Async done-%s rescan failed on all retries.\n",
+			    name);
 		}
 		return;
 	}
@@ -4187,77 +4237,31 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 		vha->scan.scan_flags &= ~SF_SCANNING;
 		spin_unlock_irqrestore(&vha->work_lock, flags);
 
-		e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
-		if (!e) {
-			/*
-			 * please ignore kernel warning. Otherwise,
-			 * we have mem leak.
-			 */
-			if (sp->u.iocb_cmd.u.ctarg.req) {
-				dma_free_coherent(&vha->hw->pdev->dev,
-				    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-				    sp->u.iocb_cmd.u.ctarg.req,
-				    sp->u.iocb_cmd.u.ctarg.req_dma);
-				sp->u.iocb_cmd.u.ctarg.req = NULL;
-			}
-			if (sp->u.iocb_cmd.u.ctarg.rsp) {
-				dma_free_coherent(&vha->hw->pdev->dev,
-				    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-				    sp->u.iocb_cmd.u.ctarg.rsp,
-				    sp->u.iocb_cmd.u.ctarg.rsp_dma);
-				sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-			}
-
-			ql_dbg(ql_dbg_disc, vha, 0xffff,
-			    "Async done-%s unable to alloc work element\n",
-			    sp->name);
-			sp->free(sp);
+		sp->rc = res;
+		rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT);
+		if (!rc) {
+			qla24xx_sp_unmap(vha, sp);
 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			return;
 		}
-		e->u.gpnft.fc4_type = FC4_TYPE_NVME;
-		sp->rc = res;
-		e->u.gpnft.sp = sp;
-
-		qla2x00_post_work(vha, e);
-		return;
 	}
 
-	if (cmd == GPN_FT_CMD)
-		e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
-	else
-		e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
-	if (!e) {
-		/* please ignore kernel warning. Otherwise, we have mem leak. */
-		if (sp->u.iocb_cmd.u.ctarg.req) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-			    sp->u.iocb_cmd.u.ctarg.req,
-			    sp->u.iocb_cmd.u.ctarg.req_dma);
-			sp->u.iocb_cmd.u.ctarg.req = NULL;
-		}
-		if (sp->u.iocb_cmd.u.ctarg.rsp) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-			    sp->u.iocb_cmd.u.ctarg.rsp,
-			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
-			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-		}
+	if (cmd == GPN_FT_CMD) {
+		del_timer(&sp->u.iocb_cmd.timer);
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GPNFT_DONE);
+	} else {
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GNNFT_DONE);
+	}
 
-		ql_dbg(ql_dbg_disc, vha, 0xffff,
-		    "Async done-%s unable to alloc work element\n",
-		    sp->name);
-		sp->free(sp);
+	if (rc) {
+		qla24xx_sp_unmap(vha, sp);
 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		return;
 	}
-
-	sp->rc = res;
-	e->u.iosb.sp = sp;
-
-	qla2x00_post_work(vha, e);
 }
 
 /*
@@ -4356,7 +4360,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
 {
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "%s enter\n", __func__);
-	del_timer(&sp->u.iocb_cmd.timer);
 	qla24xx_async_gnnft(vha, sp, sp->gen2);
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f84f9bf..bee9cfb 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -54,7 +54,7 @@ qla2x00_sp_timeout(struct timer_list *t)
 	unsigned long flags;
 	struct qla_hw_data *ha = sp->vha->hw;
 
-	WARN_ON(irqs_disabled());
+	WARN_ON_ONCE(irqs_disabled());
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	req = sp->qpair->req;
 	req->outstanding_cmds[sp->handle] = NULL;
@@ -216,8 +216,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 	struct srb_iocb *lio;
 	int rval = QLA_FUNCTION_FAILED;
 
-	if (!vha->flags.online)
-		goto done;
+	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+	    fcport->loop_id == FC_NO_LOOP_ID) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "%s: %8phC - not sending command.\n",
+		    __func__, fcport->port_name);
+		return rval;
+	}
 
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -247,6 +252,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 
 	}
 
+	ql_dbg(ql_dbg_disc, vha, 0x2072,
+	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    fcport->login_retry);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		fcport->flags |= FCF_LOGIN_NEEDED;
@@ -254,11 +265,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 		goto done_free_sp;
 	}
 
-	ql_dbg(ql_dbg_disc, vha, 0x2072,
-	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
-		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
-	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
-	    fcport->login_retry);
 	return rval;
 
 done_free_sp:
@@ -303,15 +309,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
 	sp->done = qla2x00_async_logout_sp_done;
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2070,
 	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 		fcport->d_id.b.area, fcport->d_id.b.al_pa,
 		fcport->port_name);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -489,13 +496,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
 	sp->done = qla2x00_async_adisc_sp_done;
 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x206f,
 	    "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
@@ -792,6 +801,9 @@ qla24xx_async_gnl_sp_done(void *s, int res)
 	    sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
 	    sp->u.iocb_cmd.u.mbx.in_mb[2]);
 
+	if (res == QLA_FUNCTION_TIMEOUT)
+		return;
+
 	memset(&ea, 0, sizeof(ea));
 	ea.sp = sp;
 	ea.rc = res;
@@ -975,17 +987,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
 	    "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
 	    sp->name, res, fcport->port_name, mb[1], mb[2]);
 
-	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
-	if (res == QLA_FUNCTION_TIMEOUT)
-		return;
-
 	if (res == QLA_FUNCTION_TIMEOUT) {
 		dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
 			sp->u.iocb_cmd.u.mbx.in_dma);
 		return;
 	}
 
+	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
 	memset(&ea, 0, sizeof(ea));
 	ea.event = FCME_GPDB_DONE;
 	ea.fcport = fcport;
@@ -1120,8 +1128,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	struct port_database_24xx *pd;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+	    fcport->loop_id == FC_NO_LOOP_ID) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "%s: %8phC - not sending command.\n",
+		    __func__, fcport->port_name);
 		return rval;
+	}
 
 	fcport->disc_state = DSC_GPDB;
 
@@ -1161,14 +1174,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 
 	sp->done = qla24xx_async_gpdb_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x20dc,
 	    "Async-%s %8phC hndl %x opt %x\n",
 	    sp->name, fcport->port_name, sp->handle, opt);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -1698,15 +1710,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 	tm_iocb->u.tmf.data = tag;
 	sp->done = qla2x00_tmf_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
 	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	wait_for_completion(&tm_iocb->u.tmf.comp);
 
 	rval = tm_iocb->u.tmf.data;
@@ -1790,14 +1801,14 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
 
 	sp->done = qla24xx_abort_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_async, vha, 0x507c,
 	    "Abort command issued - hdl=%x, target_id=%x\n",
 	    cmd_sp->handle, fcport->tgt_id);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	if (wait) {
 		wait_for_completion(&abt_iocb->u.abt.comp);
 		rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
@@ -1903,8 +1914,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 		return;
 	}
 
-	if (fcport->disc_state == DSC_DELETE_PEND)
+	if ((fcport->disc_state == DSC_DELETE_PEND) ||
+	    (fcport->disc_state == DSC_DELETED)) {
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 		return;
+	}
 
 	if (ea->sp->gen2 != fcport->login_gen) {
 		/* target side must have changed it. */
@@ -4732,7 +4746,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 		ql_log(ql_log_warn, vha, 0xd049,
 		    "Failed to allocate ct_sns request.\n");
 		kfree(fcport);
-		fcport = NULL;
+		return NULL;
 	}
 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
 	INIT_LIST_HEAD(&fcport->gnl_entry);
@@ -6556,8 +6570,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 	}
 
 	/* Clear all async request states across all VPs. */
-	list_for_each_entry(fcport, &vha->vp_fcports, list)
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+		fcport->scan_state = 0;
+	}
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	list_for_each_entry(vp, &ha->vp_list, list) {
 		atomic_inc(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 42b8f0d..60b6019 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3395,6 +3395,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	return 0;
 
 probe_failed:
+	if (base_vha->gnl.l) {
+		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+				base_vha->gnl.l, base_vha->gnl.ldma);
+		base_vha->gnl.l = NULL;
+	}
+
 	if (base_vha->timer_active)
 		qla2x00_stop_timer(base_vha);
 	base_vha->flags.online = 0;
@@ -3624,7 +3630,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
 	if (!atomic_read(&pdev->enable_cnt)) {
 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
 		    base_vha->gnl.l, base_vha->gnl.ldma);
-
+		base_vha->gnl.l = NULL;
 		scsi_host_put(base_vha->host);
 		kfree(ha);
 		pci_set_drvdata(pdev, NULL);
@@ -3663,6 +3669,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
 	dma_free_coherent(&ha->pdev->dev,
 		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
 
+	base_vha->gnl.l = NULL;
+
 	vfree(base_vha->scan.l);
 
 	if (IS_QLAFX00(ha))
@@ -4602,6 +4610,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
 		    "Alloc failed for scan database.\n");
 		dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
 		    vha->gnl.l, vha->gnl.ldma);
+		vha->gnl.l = NULL;
 		scsi_remove_host(vha->host);
 		return NULL;
 	}
@@ -4855,6 +4864,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 	if (fcport) {
 		fcport->id_changed = 1;
 		fcport->scan_state = QLA_FCPORT_FOUND;
+		fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 		memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
 
 		if (pla) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9d7feb0..d6dc320 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1023,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
 
 	if (logout_started) {
 		bool traced = false;
+		u16 cnt = 0;
 
 		while (!READ_ONCE(sess->logout_completed)) {
 			if (!traced) {
@@ -1032,6 +1033,9 @@ void qlt_free_session_done(struct work_struct *work)
 				traced = true;
 			}
 			msleep(100);
+			cnt++;
+			if (cnt > 200)
+				break;
 		}
 
 		ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1216,7 +1220,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
 		sess->logout_on_delete = 0;
 		sess->logo_ack_needed = 0;
 		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
-		sess->scan_state = 0;
 	}
 }
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfe..e731af5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -970,6 +970,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 	ses->sdb = scmd->sdb;
 	ses->next_rq = scmd->request->next_rq;
 	ses->result = scmd->result;
+	ses->resid_len = scmd->req.resid_len;
 	ses->underflow = scmd->underflow;
 	ses->prot_op = scmd->prot_op;
 	ses->eh_eflags = scmd->eh_eflags;
@@ -981,6 +982,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
 	scmd->request->next_rq = NULL;
 	scmd->result = 0;
+	scmd->req.resid_len = 0;
 
 	if (sense_bytes) {
 		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1034,6 +1036,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
 	scmd->sdb = ses->sdb;
 	scmd->request->next_rq = ses->next_rq;
 	scmd->result = ses->result;
+	scmd->req.resid_len = ses->resid_len;
 	scmd->underflow = ses->underflow;
 	scmd->prot_op = ses->prot_op;
 	scmd->eh_eflags = ses->eh_eflags;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 648c717..af349d1 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1256,6 +1256,18 @@ static void scsi_initialize_rq(struct request *rq)
 	cmd->retries = 0;
 }
 
+/*
+ * Only called when the request isn't completed by SCSI, and not freed by
+ * SCSI
+ */
+static void scsi_cleanup_rq(struct request *rq)
+{
+	if (rq->rq_flags & RQF_DONTPREP) {
+		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+		rq->rq_flags &= ~RQF_DONTPREP;
+	}
+}
+
 /* Add a command to the list used by the aacraid and dpt_i2o drivers */
 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
 {
@@ -2345,6 +2357,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
 	.init_request	= scsi_mq_init_request,
 	.exit_request	= scsi_mq_exit_request,
 	.initialize_rq_fn = scsi_initialize_rq,
+	.cleanup_rq	= scsi_cleanup_rq,
 	.map_queues	= scsi_map_queues,
 };
 
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index bd70339..03d9855 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -16,57 +16,15 @@
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_dbg.h>
 
-#define SCSI_LOG_SPOOLSIZE 4096
-
-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
-#warning SCSI logging bitmask too large
-#endif
-
-struct scsi_log_buf {
-	char buffer[SCSI_LOG_SPOOLSIZE];
-	unsigned long map;
-};
-
-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
-
 static char *scsi_log_reserve_buffer(size_t *len)
 {
-	struct scsi_log_buf *buf;
-	unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
-	unsigned long idx = 0;
-
-	preempt_disable();
-	buf = this_cpu_ptr(&scsi_format_log);
-	idx = find_first_zero_bit(&buf->map, map_bits);
-	if (likely(idx < map_bits)) {
-		while (test_and_set_bit(idx, &buf->map)) {
-			idx = find_next_zero_bit(&buf->map, map_bits, idx);
-			if (idx >= map_bits)
-				break;
-		}
-	}
-	if (WARN_ON(idx >= map_bits)) {
-		preempt_enable();
-		return NULL;
-	}
-	*len = SCSI_LOG_BUFSIZE;
-	return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+	*len = 128;
+	return kmalloc(*len, GFP_ATOMIC);
 }
 
 static void scsi_log_release_buffer(char *bufptr)
 {
-	struct scsi_log_buf *buf;
-	unsigned long idx;
-	int ret;
-
-	buf = this_cpu_ptr(&scsi_format_log);
-	if (bufptr >= buf->buffer &&
-	    bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
-		idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
-		ret = test_and_clear_bit(idx, &buf->map);
-		WARN_ON(!ret);
-	}
-	preempt_enable();
+	kfree(bufptr);
 }
 
 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 45e771d..8856b16 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -723,6 +723,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
 		  const char *buf, size_t count)
 {
 	struct kernfs_node *kn;
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	/*
+	 * We need to try to get module, avoiding the module been removed
+	 * during delete.
+	 */
+	if (scsi_device_get(sdev))
+		return -ENODEV;
 
 	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
 	WARN_ON_ONCE(!kn);
@@ -737,9 +745,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
 	 * state into SDEV_DEL.
 	 */
 	device_remove_file(dev, attr);
-	scsi_remove_device(to_scsi_device(dev));
+	scsi_remove_device(sdev);
 	if (kn)
 		sysfs_unbreak_active_protection(kn);
+	scsi_device_put(sdev);
 	return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e102edf..745ebb0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1572,7 +1572,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
 		/* we need to evaluate the error return  */
 		if (scsi_sense_valid(sshdr) &&
 			(sshdr->asc == 0x3a ||	/* medium not present */
-			 sshdr->asc == 0x20))	/* invalid command */
+			 sshdr->asc == 0x20 ||	/* invalid command */
+			 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))	/* drive is password locked */
 				/* this is no error here */
 				return 0;
 
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 85db36c3..2e5f185 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -93,7 +93,7 @@
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
 	select EXTCON
-	select EXTCON_GPIO
+	select EXTCON_STORAGE_CD_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2ce111e..63918b0 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -382,7 +382,8 @@ static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
 	 * non-ICE RAMs of host controller.
 	 */
 	list_for_each_entry(clki, &hba->clk_list_head, list) {
-		if (!strcmp(clki->name, "core_clk_ice"))
+		if (!strcmp(clki->name, "core_clk_ice") ||
+			!strcmp(clki->name, "core_clk_ice_hw_ctl"))
 			clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
 		else
 			clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
@@ -772,13 +773,14 @@ static int ufs_qcom_config_vreg(struct device *dev,
 		ret = regulator_set_load(vreg->reg, uA_load);
 		if (ret)
 			goto out;
-
-		min_uV = on ? vreg->min_uV : 0;
-		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
-		if (ret) {
-			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+		if (vreg->min_uV && vreg->max_uV) {
+			min_uV = on ? vreg->min_uV : 0;
+			ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+			if (ret) {
+				dev_err(dev, "%s: %s failed, err=%d\n",
 					__func__, vreg->name, ret);
-			goto out;
+				goto out;
+			}
 		}
 	}
 out:
@@ -845,6 +847,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 		if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
 			ret = ufs_qcom_disable_vreg(hba->dev,
 					host->vddp_ref_clk);
+		if (host->vccq_parent && !hba->auto_bkops_enabled)
+			ufs_qcom_config_vreg(hba->dev,
+					host->vccq_parent, false);
+
 		ufs_qcom_ice_suspend(host);
 		if (ufs_qcom_is_link_off(hba)) {
 			/* Assert PHY soft reset */
@@ -878,6 +884,8 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 				   hba->spm_lvl > UFS_PM_LVL_3))
 		ufs_qcom_enable_vreg(hba->dev,
 				      host->vddp_ref_clk);
+	if (host->vccq_parent)
+		ufs_qcom_config_vreg(hba->dev, host->vccq_parent, true);
 
 	err = ufs_qcom_enable_lane_clks(host);
 	if (err)
@@ -1394,6 +1402,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 	int ret = 0;
 	int res = 0;
 
+	hba->ufs_stats.clk_hold.ctx = PWR_CHG_NOTIFY;
+	ufshcd_hold(hba, false);
+
 	if (!dev_req_params) {
 		pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 		ret = -EINVAL;
@@ -1484,6 +1495,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 		break;
 	}
 out:
+	hba->ufs_stats.clk_rel.ctx = PWR_CHG_NOTIFY;
+	ufshcd_release(hba, false);
 	return ret;
 }
 
@@ -1609,6 +1622,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int err = 0;
+	struct list_head *head = &hba->clk_list_head;
+	struct ufs_clk_info *clki;
 
 	/*
 	 * In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1650,6 +1665,31 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
 				host->is_phy_pwr_on = false;
 			}
 		}
+
+		if (list_empty(head))
+			goto out;
+		/*
+		 * As per the latest hardware programming guide,
+		 * during Hibern8 enter with power collapse :
+		 * SW should disable HW clock control for UFS ICE
+		 * clock (GCC_UFS_ICE_CORE_CBCR.HW_CTL=0)
+		 * before ufs_ice_core_clk is turned off.
+		 * In device tree, we need to add UFS ICE clocks
+		 * in below fixed order:
+		 * clock-names =
+		 * "core_clk_ice";
+		 * "core_clk_ice_hw_ctl";
+		 * This way no extra check is required in UFS
+		 * clock enable path as clk enable order will be
+		 * already taken care in ufshcd_setup_clocks().
+		 */
+		list_for_each_entry(clki, head, list) {
+			if (!IS_ERR_OR_NULL(clki->clk) &&
+				!strcmp(clki->name, "core_clk_ice_hw_ctl")) {
+				clk_disable_unprepare(clki->clk);
+				clki->enabled = on;
+			}
+		}
 	}
 
 out:
@@ -2117,7 +2157,10 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
 	if (ret) {
 		dev_dbg(dev, "%s: unable to find %s err %d, using default\n",
 			__func__, prop_name, ret);
-		vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+		if (!strcmp(name, "qcom,vddp-ref-clk"))
+			vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+		else if (!strcmp(name, "qcom,vccq-parent"))
+			vreg->min_uV = 0;
 		ret = 0;
 	}
 
@@ -2126,7 +2169,10 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
 	if (ret) {
 		dev_dbg(dev, "%s: unable to find %s err %d, using default\n",
 			__func__, prop_name, ret);
-		vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+		if (!strcmp(name, "qcom,vddp-ref-clk"))
+			vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+		else if (!strcmp(name, "qcom,vccq-parent"))
+			vreg->max_uV = 0;
 		ret = 0;
 	}
 
@@ -2284,9 +2330,20 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 		}
 	}
 
+	err = ufs_qcom_parse_reg_info(host, "qcom,vccq-parent",
+				      &host->vccq_parent);
+	if (host->vccq_parent) {
+		err = ufs_qcom_config_vreg(hba->dev, host->vccq_parent, true);
+		if (err) {
+			dev_err(dev, "%s: failed vccq-parent set load: %d\n",
+				__func__, err);
+			goto out_disable_vddp;
+		}
+	}
+
 	err = ufs_qcom_init_lane_clks(host);
 	if (err)
-		goto out_disable_vddp;
+		goto out_set_load_vccq_parent;
 
 	ufs_qcom_parse_lpm(host);
 	if (host->disable_lpm)
@@ -2311,6 +2368,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 
 	goto out;
 
+out_set_load_vccq_parent:
+	if (host->vccq_parent)
+		ufs_qcom_config_vreg(hba->dev, host->vccq_parent, false);
 out_disable_vddp:
 	if (host->vddp_ref_clk)
 		ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
@@ -2775,6 +2835,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
 	ufs_qcom_ice_print_regs(host);
 }
 
+static u32 ufs_qcom_get_user_cap_mode(struct ufs_hba *hba)
+{
+	return UFS_WB_BUFF_PRESERVE_USER_SPACE;
+}
+
 /**
  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  *
@@ -2801,6 +2866,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
 #ifdef CONFIG_DEBUG_FS
 	.add_debugfs		= ufs_qcom_dbg_add_debugfs,
 #endif
+	.get_user_cap_mode	= ufs_qcom_get_user_cap_mode,
 };
 
 static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 011697a5..9197742 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -386,6 +386,7 @@ struct ufs_qcom_host {
 	bool is_ice_cfg_work_set;
 	struct request *req_pending;
 	struct ufs_vreg *vddp_ref_clk;
+	struct ufs_vreg *vccq_parent;
 	bool work_pending;
 	bool is_phy_pwr_on;
 };
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 4da79b9..371f160 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -137,7 +137,7 @@ enum desc_header_offset {
 };
 
 enum ufs_desc_def_size {
-	QUERY_DESC_DEVICE_DEF_SIZE		= 0x40,
+	QUERY_DESC_DEVICE_DEF_SIZE		= 0x59,
 	QUERY_DESC_CONFIGURATION_DEF_SIZE	= 0x90,
 	QUERY_DESC_UNIT_DEF_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_DEF_SIZE	= 0x06,
@@ -204,6 +204,7 @@ enum device_desc_param {
 	DEVICE_DESC_PARAM_PSA_MAX_DATA		= 0x25,
 	DEVICE_DESC_PARAM_PSA_TMT		= 0x29,
 	DEVICE_DESC_PARAM_PRDCT_REV		= 0x2A,
+	DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP	= 0x4F,
 };
 
 /* Interconnect descriptor parameters offsets in bytes*/
@@ -374,6 +375,38 @@ enum ufs_dev_pwr_mode {
 	UFS_POWERDOWN_PWR_MODE	= 3,
 };
 
+enum ufs_dev_wb_buf_avail_size {
+	UFS_WB_0_PERCENT_BUF_REMAIN = 0x0,
+	UFS_WB_10_PERCENT_BUF_REMAIN = 0x1,
+	UFS_WB_20_PERCENT_BUF_REMAIN = 0x2,
+	UFS_WB_30_PERCENT_BUF_REMAIN = 0x3,
+	UFS_WB_40_PERCENT_BUF_REMAIN = 0x4,
+	UFS_WB_50_PERCENT_BUF_REMAIN = 0x5,
+	UFS_WB_60_PERCENT_BUF_REMAIN = 0x6,
+	UFS_WB_70_PERCENT_BUF_REMAIN = 0x7,
+	UFS_WB_80_PERCENT_BUF_REMAIN = 0x8,
+	UFS_WB_90_PERCENT_BUF_REMAIN = 0x9,
+	UFS_WB_100_PERCENT_BUF_REMAIN = 0xA,
+};
+
+enum ufs_dev_wb_buf_life_time_est {
+	UFS_WB_0_10_PERCENT_USED = 0x1,
+	UFS_WB_10_20_PERCENT_USED = 0x2,
+	UFS_WB_20_30_PERCENT_USED = 0x3,
+	UFS_WB_30_40_PERCENT_USED = 0x4,
+	UFS_WB_40_50_PERCENT_USED = 0x5,
+	UFS_WB_50_60_PERCENT_USED = 0x6,
+	UFS_WB_60_70_PERCENT_USED = 0x7,
+	UFS_WB_70_80_PERCENT_USED = 0x8,
+	UFS_WB_80_90_PERCENT_USED = 0x9,
+	UFS_WB_90_100_PERCENT_USED = 0xA,
+	UFS_WB_MAX_USED = 0xB,
+};
+
+enum ufs_dev_wb_buf_user_cap_config {
+	UFS_WB_BUFF_PRESERVE_USER_SPACE = 1,
+	UFS_WB_BUFF_USER_SPACE_RED_EN = 2,
+};
 /**
  * struct utp_upiu_header - UPIU header structure
  * @dword_0: UPIU header DW-0
@@ -535,6 +568,7 @@ struct ufs_vreg {
 	int max_uV;
 	bool low_voltage_sup;
 	bool low_voltage_active;
+	bool sys_suspend_pwr_off;
 	int min_uA;
 	int max_uA;
 };
@@ -554,12 +588,18 @@ enum {
 	UFS_DEV_REMOVABLE_NON_BOOTABLE	= 0x03,
 };
 
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+	UFS_DEV_WRITE_BOOSTER_SUP	= BIT(8),
+};
+
 struct ufs_dev_info {
 	/* device descriptor info */
 	u8	b_device_sub_class;
 	u16	w_manufacturer_id;
 	u8	i_product_name;
 	u16	w_spec_version;
+	u32	d_ext_ufs_feature_sup;
 
 	/* query flags */
 	bool f_power_on_wp_en;
@@ -571,6 +611,8 @@ struct ufs_dev_info {
 
 	/* Device deviations from standard UFS device spec. */
 	unsigned int quirks;
+
+	bool keep_vcc_on;
 };
 
 #define MAX_MODEL_LEN 16
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index b6f1cf5..60169a3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #define UFS_VENDOR_TOSHIBA     0x198
 #define UFS_VENDOR_SAMSUNG     0x1CE
 #define UFS_VENDOR_SKHYNIX     0x1AD
+#define UFS_VENDOR_WDC         0x145
 
 /**
  * ufs_dev_fix - ufs device quirk info
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 382e923..bd0415c 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -217,9 +217,24 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 	} else if (!strcmp(name, "vccq")) {
 		vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
 		vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
+		/**
+		 * Only if the SoC supports turning off VCCQ or VCCQ2 power
+		 * supply source during power collapse, set a flag to turn off
+		 * the specified power supply to reduce the system power
+		 * consumption during system suspend events. The tradeoffs are:
+		 *   - System resume time will increase due
+		 *     to UFS device full re-initialization time.
+		 *   - UFS device life may be affected due to multiple
+		 *     UFS power on/off events.
+		 * The benefits vs tradeoff should be considered carefully.
+		 */
+		if (of_property_read_bool(np, "vccq-pwr-collapse-sup"))
+			vreg->sys_suspend_pwr_off = true;
 	} else if (!strcmp(name, "vccq2")) {
 		vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
 		vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
+		if (of_property_read_bool(np, "vccq2-pwr-collapse-sup"))
+			vreg->sys_suspend_pwr_off = true;
 	}
 
 	goto out;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ec0a589..22c2174 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,7 @@
 #include <linux/nls.h>
 #include <linux/of.h>
 #include <linux/bitfield.h>
+#include <linux/blkdev.h>
 #include <linux/suspend.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
@@ -50,6 +51,13 @@
 #include "ufs-debugfs.h"
 #include "ufs-qcom.h"
 
+static bool ufshcd_wb_sup(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static bool ufshcd_wb_is_buf_flush_needed(struct ufs_hba *hba);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+
 #ifdef CONFIG_DEBUG_FS
 
 static int ufshcd_tag_req_type(struct request *rq)
@@ -376,23 +384,44 @@ static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
 	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
 }
 
-static bool ufshcd_is_card_present(struct ufs_hba *hba)
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba)
 {
-	if (ufshcd_is_card_online(hba))
-		/*
-		 * TODO: need better way to ensure that this delay is
-		 * more than extcon's debounce-ms
-		 */
-		msleep(300);
-
 	/*
-	 * Check if card was online and offline/removed now or
-	 * card was already offline.
+	 * Query dAvailableWriteBoosterBufferSize attribute and enable
+	 * the Write BoosterBuffer Flush if only 30% Write Booster
+	 * Buffer is available.
+	 * In reduction case, flush only if 10% is available
 	 */
-	if (ufshcd_is_card_offline(hba))
-		return false;
+	if (ufshcd_wb_is_buf_flush_needed(hba))
+		ufshcd_wb_buf_flush_enable(hba);
+	else
+		ufshcd_wb_buf_flush_disable(hba);
+}
 
-	return true;
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba))
+		return;
+
+	ret = ufshcd_wb_ctrl(hba, true);
+	if (ret)
+		dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+	else
+		dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+	ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+	if (ret)
+		dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+			__func__, ret);
+}
+
+static inline bool ufshcd_is_device_offline(struct ufs_hba *hba)
+{
+	if (hba->extcon && ufshcd_is_card_offline(hba))
+		return true;
+	else
+		return false;
 }
 
 static int ufshcd_card_get_extcon_state(struct ufs_hba *hba)
@@ -441,6 +470,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_NO_FASTAUTO),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_WDC, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
@@ -469,6 +500,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
 	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8HL1",
 		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ16",
+		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 
 	END_FIX
 };
@@ -512,6 +545,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg);
 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg);
 static void ufshcd_register_pm_notifier(struct ufs_hba *hba);
 static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba);
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
+static void ufshcd_remove_scsi_devices(struct ufs_hba *hba);
+static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay);
 
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
@@ -1776,7 +1812,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 {
 	int ret = 0;
 
-	if (hba->extcon && ufshcd_is_card_offline(hba))
+	if (ufshcd_is_device_offline(hba))
 		return 0;
 
 	/* let's not get into low power until clock scaling is completed */
@@ -1797,31 +1833,27 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 		ufshcd_custom_cmd_log(hba, "Gear-scaled-down");
 	}
 
-	/*
-	 * If auto hibern8 is enabled then put the link in
-	 * hibern8 manually, this is to avoid auto hibern8
-	 * racing during clock frequency scaling sequence.
-	 */
-	if (ufshcd_is_auto_hibern8_enabled(hba)) {
-		ret = ufshcd_uic_hibern8_enter(hba);
-		if (ret)
-			/* link will be bad state so no need to scale_up_gear */
-			goto clk_scaling_unprepare;
-		ufshcd_custom_cmd_log(hba, "Hibern8-entered");
-	}
+	/* Enter hibern8 before scaling clocks */
+	ret = ufshcd_uic_hibern8_enter(hba);
+	if (ret)
+		/* link will be bad state so no need to scale_up_gear */
+		goto clk_scaling_unprepare;
+	ufshcd_custom_cmd_log(hba, "Hibern8-entered");
 
 	ret = ufshcd_scale_clks(hba, scale_up);
-	if (ret)
+	if (ret) {
+		dev_err(hba->dev, "%s: scaling %s clks failed %d\n", __func__,
+			scale_up ? "up" : "down", ret);
 		goto scale_up_gear;
+	}
 	ufshcd_custom_cmd_log(hba, "Clk-freq-switched");
 
-	if (ufshcd_is_auto_hibern8_enabled(hba)) {
-		ret = ufshcd_uic_hibern8_exit(hba);
-		if (ret)
-			/* link will be bad state so no need to scale_up_gear */
-			goto clk_scaling_unprepare;
-		ufshcd_custom_cmd_log(hba, "Hibern8-Exited");
-	}
+	/* Exit hibern8 after scaling clocks */
+	ret = ufshcd_uic_hibern8_exit(hba);
+	if (ret)
+		/* link will be bad state so no need to scale_up_gear */
+		goto clk_scaling_unprepare;
+	ufshcd_custom_cmd_log(hba, "Hibern8-Exited");
 
 	/* scale up the gear after scaling up clocks */
 	if (scale_up) {
@@ -1843,9 +1875,15 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 				hba->clk_gating.delay_ms_pwr_save;
 	}
 
+	/* Enable Write Booster if we have scaled up else disable it */
+	up_write(&hba->lock);
+	ufshcd_wb_ctrl(hba, scale_up);
+	down_write(&hba->lock);
 	goto clk_scaling_unprepare;
 
 scale_up_gear:
+	if (ufshcd_uic_hibern8_exit(hba))
+		goto clk_scaling_unprepare;
 	if (!scale_up)
 		ufshcd_scale_gear(hba, true);
 clk_scaling_unprepare:
@@ -2158,6 +2196,9 @@ static void ufshcd_ungate_work(struct work_struct *work)
 	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_enable_clocks(hba);
 
+	if (ufshcd_is_device_offline(hba))
+		goto unblock_reqs;
+
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		/* Prevent gating in this path */
@@ -2200,7 +2241,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
-		if (hba->extcon && ufshcd_is_card_offline(hba))
+		if (ufshcd_is_device_offline(hba))
 			break;
 		/*
 		 * Wait for the ungate work to complete if in progress.
@@ -2305,6 +2346,9 @@ static void ufshcd_gate_work(struct work_struct *work)
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	if (ufshcd_is_device_offline(hba))
+		goto disable_clocks;
+
 	if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 	    hba->hibern8_on_idle.is_enabled)
 		/*
@@ -2324,6 +2368,7 @@ static void ufshcd_gate_work(struct work_struct *work)
 		ufshcd_set_link_hibern8(hba);
 	}
 
+disable_clocks:
 	/*
 	 * If auto hibern8 is enabled then the link will already
 	 * be in hibern8 state and the ref clock can be gated.
@@ -3027,6 +3072,8 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
 	hba->lrb[task_tag].issue_time_stamp = ktime_get();
 	hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -3160,6 +3207,8 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 	/* Write UIC Cmd */
 	ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
 		      REG_UIC_COMMAND);
+	/* Make sure that UIC command is committed immediately */
+	wmb();
 }
 
 /**
@@ -3214,6 +3263,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 		return -EIO;
 	}
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	if (completion)
 		init_completion(&uic_cmd->done);
 
@@ -3704,7 +3756,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 		goto out_unlock;
 	}
 
-	if (hba->extcon && ufshcd_is_card_offline(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		set_host_byte(cmd, DID_BAD_TARGET);
 		cmd->scsi_done(cmd);
 		goto out_unlock;
@@ -4040,6 +4092,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 	unsigned long flags;
 	bool has_read_lock = false;
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
 	 * In shutdown context, it comes in with lock acquired.
@@ -5118,6 +5173,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	u8 status;
 	int ret;
 	bool reenable_intr = false;
+	int wait_retries = 6; /* Allows 3secs max wait time */
 
 	mutex_lock(&hba->uic_cmd_mutex);
 	init_completion(&uic_async_done);
@@ -5144,11 +5200,42 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 		goto out;
 	}
 
+more_wait:
 	if (!wait_for_completion_timeout(hba->uic_async_done,
 					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+		u32 intr_status = 0;
+		s64 ts_since_last_intr;
+
 		dev_err(hba->dev,
 			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
 			cmd->command, cmd->argument3);
+		/*
+		 * The controller must have triggered interrupt but ISR couldn't
+		 * run due to interrupt starvation.
+		 * Or ISR must have executed just after the timeout
+		 * (which clears IS registers)
+		 * If either of these two cases is true, then
+		 * wait for little more time for completion.
+		 */
+		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+		ts_since_last_intr = ktime_ms_delta(ktime_get(),
+						hba->ufs_stats.last_intr_ts);
+
+		if ((intr_status & UFSHCD_UIC_PWR_MASK) ||
+		    ((hba->ufs_stats.last_intr_status & UFSHCD_UIC_PWR_MASK) &&
+		     (ts_since_last_intr < (s64)UIC_CMD_TIMEOUT))) {
+			dev_info(hba->dev, "IS:0x%08x last_intr_sts:0x%08x last_intr_ts:%lld, retry-cnt:%d\n",
+				intr_status, hba->ufs_stats.last_intr_status,
+				hba->ufs_stats.last_intr_ts, wait_retries);
+			if (wait_retries--)
+				goto more_wait;
+
+			/*
+			 * If same state continues event after more wait time,
+			 * something must be hogging CPU.
+			 */
+			BUG_ON(hba->crash_on_err);
+		}
 		ret = -ETIMEDOUT;
 		goto out;
 	}
@@ -5163,9 +5250,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
 
 out:
-	if (ret) {
-		if (hba->extcon && !ufshcd_is_card_present(hba))
-			goto skip_dump;
+	if (ret && !ufshcd_is_device_offline(hba)) {
 		ufsdbg_set_err_state(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
@@ -5174,7 +5259,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 		BUG_ON(hba->crash_on_err);
 	}
 
-skip_dump:
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
@@ -5226,6 +5310,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
 	int ret = 0;
 	unsigned long flags;
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -5329,10 +5416,9 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 
 	for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 		ret = __ufshcd_uic_hibern8_enter(hba);
-		if (!ret)
+		if (!ret || ufshcd_is_device_offline(hba))
 			goto out;
-		else if (ret != -EAGAIN &&
-			 !(hba->extcon && ufshcd_is_card_offline(hba)))
+		else if (ret != -EAGAIN)
 			/* Unable to recover the link, so no point proceeding */
 			BUG_ON(1);
 	}
@@ -5362,7 +5448,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 			__func__, ret);
 		ret = ufshcd_link_recovery(hba);
 		/* Unable to recover the link, so no point proceeding */
-		if (ret && !(hba->extcon && ufshcd_is_card_offline(hba)))
+		if (ret && !ufshcd_is_device_offline(hba))
 			BUG_ON(1);
 	} else {
 		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
@@ -5567,6 +5653,12 @@ int ufshcd_change_power_mode(struct ufs_hba *hba,
 			DL_TC0ReplayTimeOutVal_Default);
 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
 			DL_AFC0ReqTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+			DL_FC1ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+			DL_TC1ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+			DL_AFC1ReqTimeOutVal_Default);
 
 	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
 			DL_FC0ProtectionTimeOutVal_Default);
@@ -6834,6 +6926,163 @@ static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
 				__func__, err);
 }
 
+static bool ufshcd_wb_sup(struct ufs_hba *hba)
+{
+	return !!(hba->dev_info.d_ext_ufs_feature_sup &
+		  UFS_DEV_WRITE_BOOSTER_SUP);
+}
+
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+	int ret;
+	enum query_opcode opcode;
+
+	if (!ufshcd_wb_sup(hba))
+		return 0;
+
+	if (enable)
+		opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+	else
+		opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+	ret = ufshcd_query_flag_retry(hba, opcode,
+				      QUERY_FLAG_IDN_WB_EN, NULL);
+	if (ret) {
+		dev_err(hba->dev, "%s write booster %s failed %d\n",
+			__func__, enable ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	hba->wb_enabled = enable;
+	dev_dbg(hba->dev, "%s write booster %s %d\n",
+			__func__, enable ? "enable" : "disable", ret);
+
+	return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+	int val;
+
+	if (set)
+		val =  UPIU_QUERY_OPCODE_SET_FLAG;
+	else
+		val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+	return ufshcd_query_flag_retry(hba, val,
+			       QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+				       NULL);
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba) || hba->wb_buf_flush_enabled)
+		return 0;
+
+	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
+	if (ret)
+		dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+			__func__, ret);
+	else
+		hba->wb_buf_flush_enabled = true;
+
+	dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+	return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!ufshcd_wb_sup(hba) || !hba->wb_buf_flush_enabled)
+		return 0;
+
+	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, NULL);
+	if (ret) {
+		dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+			__func__, ret);
+	} else {
+		hba->wb_buf_flush_enabled = false;
+		dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+	}
+
+	return ret;
+}
+
+static bool ufshcd_wb_is_buf_flush_needed(struct ufs_hba *hba)
+{
+	int ret;
+	u32 cur_buf, status, avail_buf;
+
+	if (!ufshcd_wb_sup(hba))
+		return false;
+
+	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+				      QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+				      0, 0, &avail_buf);
+	if (ret) {
+		dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+			 __func__, ret);
+		return false;
+	}
+
+	ret = ufshcd_vops_get_user_cap_mode(hba);
+	if (ret <= 0) {
+		dev_dbg(hba->dev, "Get user-cap reduction mode: failed: %d\n",
+			ret);
+		/* Most commonly used */
+		ret = UFS_WB_BUFF_PRESERVE_USER_SPACE;
+	}
+
+	hba->dev_info.keep_vcc_on = false;
+	if (ret == UFS_WB_BUFF_USER_SPACE_RED_EN) {
+		if (avail_buf <= UFS_WB_10_PERCENT_BUF_REMAIN) {
+			hba->dev_info.keep_vcc_on = true;
+			return true;
+		}
+		return false;
+	} else if (ret == UFS_WB_BUFF_PRESERVE_USER_SPACE) {
+		ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+					      QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+					      0, 0, &cur_buf);
+		if (ret) {
+			dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+				 __func__, ret);
+			return false;
+		}
+
+		if (!cur_buf) {
+			dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+				 cur_buf);
+			return false;
+		}
+
+		ret = ufshcd_get_ee_status(hba, &status);
+		if (ret) {
+			dev_err(hba->dev, "%s: failed to get exception status %d\n",
+				__func__, ret);
+			if (avail_buf < UFS_WB_40_PERCENT_BUF_REMAIN) {
+				hba->dev_info.keep_vcc_on = true;
+				return true;
+			}
+			return false;
+		}
+
+		status &= hba->ee_ctrl_mask;
+
+		if ((status & MASK_EE_URGENT_BKOPS) ||
+		    (avail_buf < UFS_WB_40_PERCENT_BUF_REMAIN)) {
+			hba->dev_info.keep_vcc_on = true;
+			return true;
+		}
+	}
+	return false;
+}
+
 /**
  * ufshcd_exception_event_handler - handle exceptions raised by device
  * @work: pointer to work data
@@ -6992,7 +7241,7 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	if (hba->extcon && !ufshcd_is_card_present(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		hba->saved_err = 0;
 		hba->saved_uic_err = 0;
@@ -7075,8 +7324,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	/*
 	 * if host reset is required then skip clearing the pending
-	 * transfers forcefully because they will automatically get
-	 * cleared after link startup.
+	 * transfers forcefully because they will get cleared during
+	 * host reset and restore
 	 */
 	if (needs_reset)
 		goto skip_pending_xfer_clear;
@@ -7192,7 +7441,7 @@ static void ufshcd_rls_handler(struct work_struct *work)
 
 	hba = container_of(work, struct ufs_hba, rls_work);
 
-	if (hba->extcon && !ufshcd_is_card_present(hba))
+	if (ufshcd_is_device_offline(hba))
 		return;
 
 	pm_runtime_get_sync(hba->dev);
@@ -7364,7 +7613,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 			queue_eh_work = true;
 	}
 
-	if (hba->extcon && ufshcd_is_card_offline(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		/* ignore UIC errors if card is offline */
 		retval |= IRQ_HANDLED;
 	} else if (queue_eh_work) {
@@ -7494,7 +7743,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 	} while (intr_status && --retries);
 
-	if (retval == IRQ_NONE) {
+	if (retval == IRQ_NONE && !ufshcd_is_device_offline(hba)) {
 		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 					__func__, intr_status);
 		ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
@@ -7647,7 +7896,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
-	ufshcd_print_cmd_log(hba);
 	lrbp = &hba->lrb[tag];
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -7891,9 +8139,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 	int err;
 	unsigned long flags;
 
-	/* Reset the host controller */
+	/*
+	 * Stop the host controller and complete the requests
+	 * cleared by h/w
+	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufshcd_hba_stop(hba, false);
+	hba->silence_err_logs = true;
+	ufshcd_complete_requests(hba);
+	hba->silence_err_logs = false;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* scale up clocks to max frequency before full reinitialization */
@@ -7968,7 +8222,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 	 * There is no point proceeding even after failing
 	 * to recover after multiple retries.
 	 */
-	BUG_ON(err && ufshcd_is_embedded_dev(hba));
+	BUG_ON(err && ufshcd_is_embedded_dev(hba) && !hba->extcon);
 
 	/*
 	 * After reset the door-bell might be cleared, complete
@@ -8302,6 +8556,21 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
+
+	/* Enable WB only for UFS-3.1 OR if desc len >= 0x59 */
+	if ((dev_desc->wspecversion >= 0x310) ||
+	    (dev_desc->wmanufacturerid == UFS_VENDOR_TOSHIBA &&
+	     dev_desc->wspecversion >= 0x300 &&
+	     hba->desc_size.dev_desc >= 0x59))
+		hba->dev_info.d_ext_ufs_feature_sup =
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP]
+								<< 24 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 1]
+								<< 16 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 2]
+								<< 8 |
+			desc_buf[DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 3];
+
 	/* Zero-pad entire buffer for string termination. */
 	memset(desc_buf, 0, buff_len);
 
@@ -8743,11 +9012,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	ktime_t start = ktime_get();
 
 reinit:
-	if (hba->extcon && (ufshcd_card_get_extcon_state(hba) <= 0)) {
-		ret = -ENOLINK;
-		goto out;
-	}
-
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
@@ -8825,6 +9089,22 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 			goto out;
 	}
 
+	/**
+	 * UFS3.0 and newer devices use Vcc and Vccq(1.2V)
+	 * while UFS2.1 devices use Vcc and Vccq2(1.8V) power
+	 * supplies. If the system allows turning off the regulators
+	 * during power collapse event, turn off the regulators
+	 * during system suspend events. This will cause the UFS
+	 * device to re-initialize upon system resume events.
+	 */
+	if ((hba->dev_info.w_spec_version >= 0x300 && hba->vreg_info.vccq &&
+		hba->vreg_info.vccq->sys_suspend_pwr_off) ||
+		(hba->dev_info.w_spec_version < 0x300 &&
+		hba->vreg_info.vccq2->sys_suspend_pwr_off))
+		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+				UFS_POWERDOWN_PWR_MODE,
+				UIC_LINK_OFF_STATE);
+
 	/* UFS device is also active now */
 	ufshcd_set_ufs_dev_active(hba);
 	ufshcd_force_reset_auto_bkops(hba);
@@ -8860,6 +9140,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	/* set the state as operational after switching to desired gear */
 	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 
+	ufshcd_wb_config(hba);
+
 	/*
 	 * If we are in error handling context or in power management callbacks
 	 * context, no need to scan the host
@@ -8891,10 +9173,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 					goto out;
 			}
 			hba->clk_scaling.is_allowed = true;
+			hba->clk_scaling.is_suspended = false;
 		}
 
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
+		if (hba->extcon)
+			hba->card_rpm_paired = true;
 	}
 
 	/*
@@ -8907,21 +9192,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	if (ret) {
 		ufshcd_set_ufs_dev_poweroff(hba);
 		ufshcd_set_link_off(hba);
-		if (hba->extcon) {
-			if (!ufshcd_is_card_online(hba))
-				ufsdbg_clr_err_state(hba);
-			ufshcd_set_card_offline(hba);
-		}
-	} else if (hba->extcon) {
-		ufshcd_set_card_online(hba);
 	}
 
 	/*
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
 		pm_runtime_put_sync(hba->dev);
+		if (hba->extcon)
+			hba->card_rpm_paired = true;
+	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -8929,92 +9210,160 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	return ret;
 }
 
-static void ufshcd_remove_device(struct ufs_hba *hba)
+static void ufshcd_remove_scsi_devices(struct ufs_hba *hba)
 {
+	struct Scsi_Host *shost = hba->host;
 	struct scsi_device *sdev;
-	struct scsi_device *sdev_cache[UFS_MAX_LUS];
-	int sdev_count = 0, i;
 	unsigned long flags;
 
-	hba->card_removal_in_progress = 1;
-	ufshcd_hold_all(hba);
-	/* Reset the host controller */
+	spin_lock_irqsave(shost->host_lock, flags);
+restart:
+	list_for_each_entry(sdev, &shost->__devices, siblings) {
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL ||
+		    !get_device(&sdev->sdev_gendev))
+			continue;
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		scsi_remove_device(sdev);
+		put_device(&sdev->sdev_gendev);
+		spin_lock_irqsave(shost->host_lock, flags);
+		goto restart;
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+static void ufshcd_remove_card(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->silence_err_logs = true;
-	ufshcd_hba_stop(hba, false);
+	ufshcd_set_card_removal_ongoing(hba);
+	ufshcd_set_card_offline(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	ufshcd_set_ufs_dev_poweroff(hba);
-	ufshcd_set_link_off(hba);
-	__ufshcd_shutdown_clkscaling(hba);
-
+	/* Turn on host vreg and clocks */
+	ufshcd_setup_hba_vreg(hba, true);
+	ufshcd_enable_clocks(hba);
+	/* Make sure clocks are stable */
+	usleep_range(50, 60);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_hba_stop(hba, false);
+	/* Clear interrupt status and disable interrupts */
+	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+		      REG_INTERRUPT_STATUS);
+	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+	/*
+	 * Make sure that UFS interrupts are disabled and
+	 * any pending interrupt status is cleared.
+	 */
+	mb();
+	hba->silence_err_logs = true;
 	/* Complete requests that have door-bell cleared by h/w */
 	ufshcd_complete_requests(hba);
-
-	/* remove all scsi devices */
-	list_for_each_entry(sdev, &hba->host->__devices, siblings) {
-		if (sdev_count < UFS_MAX_LUS) {
-			sdev_cache[sdev_count] = sdev;
-			sdev_count++;
-		}
-	}
-
-	for (i = 0; i < sdev_count; i++)
-		scsi_remove_device(sdev_cache[i]);
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	/* Complete the flying async UIC command if there is one */
+	/* Complete the sync/async UIC command if there is one */
 	if (hba->uic_async_done)
 		complete(hba->uic_async_done);
-	hba->silence_err_logs = false;
+	else if (hba->active_uic_cmd)
+		complete(&hba->active_uic_cmd->done);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	ufshcd_release_all(hba);
-	hba->card_removal_in_progress = 0;
+	cancel_delayed_work_sync(&hba->card_detect_work);
+	/* Flush runtime PM events */
+	pm_runtime_get_sync(hba->dev);
+	/* Clear runtime PM errors if any */
+	pm_runtime_set_active(hba->dev);
+	cancel_work_sync(&hba->rls_work);
+	cancel_work_sync(&hba->eh_work);
+	cancel_work_sync(&hba->eeh_work);
+	hba->auto_bkops_enabled = false;
+	__ufshcd_shutdown_clkscaling(hba);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_clear_eh_in_progress(hba);
+	hba->saved_err = 0;
+	hba->saved_uic_err = 0;
+	hba->saved_ce_err = 0;
+	hba->auto_h8_err = false;
+	hba->force_host_reset = false;
+	hba->ufshcd_state = UFSHCD_STATE_RESET;
+	hba->silence_err_logs = false;
+	ufsdbg_clr_err_state(hba);
+	ufshcd_set_ufs_dev_poweroff(hba);
+	ufshcd_set_link_off(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	/*
+	 * Remove scsi devices only when we are not in middle
+	 * of system resume events.
+	 */
+	if (!down_trylock(&hba->sdev_sema)) {
+		ufshcd_remove_scsi_devices(hba);
+		up(&hba->sdev_sema);
+	}
+	ufshcd_clear_card_removal_ongoing(hba);
+	pm_runtime_put_sync(hba->dev);
 }
 
 static void ufshcd_card_detect_handler(struct work_struct *work)
 {
 	struct ufs_hba *hba;
+	unsigned long flags;
+	int ret;
 
-	hba = container_of(work, struct ufs_hba, card_detect_work);
+	hba = container_of(to_delayed_work(work), struct ufs_hba,
+			   card_detect_work);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!ufshcd_is_card_removal_ongoing(hba))
+		ufshcd_set_card_online(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	if (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) {
 		pm_runtime_get_sync(hba->dev);
-		ufshcd_detect_device(hba);
-		/* ufshcd_probe_hba() calls pm_runtime_put_sync() on exit */
-	} else if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device) {
-		pm_runtime_get_sync(hba->dev);
-		ufshcd_remove_device(hba);
-		pm_runtime_put_sync(hba->dev);
-		ufsdbg_clr_err_state(hba);
+		if (ufshcd_is_clkgating_allowed(hba)) {
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			hba->clk_gating.active_reqs = 0;
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+		}
+		hba->card_rpm_paired = false;
+		ret = ufshcd_detect_device(hba);
+		if (ret) {
+			ufshcd_set_card_offline(hba);
+			ufsdbg_clr_err_state(hba);
+			dev_err(hba->dev, "%s: device detect failed: %d\n",
+				__func__, ret);
+		}
+
+		/*
+		 * pm_runtime_put_sync() may not be called if
+		 * failure happens before or inside ufshcd_probe_hba()
+		 */
+		if (!hba->card_rpm_paired) {
+			cancel_work_sync(&hba->eh_work);
+			pm_runtime_put_sync(hba->dev);
+		}
 	}
 }
 
+static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay)
+{
+	if (hba->extcon && !hba->card_detect_disabled)
+		schedule_delayed_work(&hba->card_detect_work, delay);
+}
+
 static int ufshcd_card_detect_notifier(struct notifier_block *nb,
 				       unsigned long event, void *ptr)
 {
 	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
 
-	if (event) {
-		if (hba->card_removal_in_progress)
-			goto out;
-		ufshcd_set_card_online(hba);
-	} else
-		ufshcd_set_card_offline(hba);
-
-	if (ufshcd_is_card_offline(hba) && !hba->sdev_ufs_device)
-		goto out;
-
 	/*
-	 * card insertion/removal are very infrequent events and having this
+	 * card insertion/removal are not frequent events and having this
 	 * message helps if there is some issue with card detection/removal.
 	 */
 	dev_info(hba->dev, "%s: card %s notification rcvd\n",
-		__func__, ufshcd_is_card_online(hba) ? "inserted" : "removed");
+		__func__, event ? "inserted" : "removed");
 
-	schedule_work(&hba->card_detect_work);
-out:
+	if (event)
+		ufshcd_detect_card(hba, msecs_to_jiffies(200));
+	else
+		ufshcd_remove_card(hba);
+
 	return NOTIFY_DONE;
 }
 
@@ -9061,15 +9410,24 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
 
-	/*
-	 * Don't allow clock gating and hibern8 enter for faster device
-	 * detection.
-	 */
-	ufshcd_hold_all(hba);
-	ufshcd_probe_hba(hba);
-	ufshcd_release_all(hba);
-
-	ufshcd_extcon_register(hba);
+	if (hba->extcon) {
+		ufshcd_hba_stop(hba, true);
+		ufshcd_set_ufs_dev_poweroff(hba);
+		ufshcd_set_link_off(hba);
+		ufshcd_set_card_offline(hba);
+		pm_runtime_put_sync(hba->dev);
+		ufshcd_extcon_register(hba);
+		if (ufshcd_card_get_extcon_state(hba) > 0)
+			ufshcd_detect_card(hba, 0);
+	} else {
+		/*
+		 * Don't allow clock gating and hibern8 enter for faster device
+		 * detection.
+		 */
+		ufshcd_hold_all(hba);
+		ufshcd_probe_hba(hba);
+		ufshcd_release_all(hba);
+	}
 }
 
 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
@@ -9551,6 +9909,12 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 	struct ufs_vreg_info *info = &hba->vreg_info;
 	int ret = 0;
 
+	if (hba->extcon)
+		mutex_lock(&hba->card_mutex);
+
+	if (!on && ufshcd_is_card_removal_ongoing(hba))
+		goto out;
+
 	if (info->vdd_hba) {
 		ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
@@ -9558,6 +9922,9 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 			ufshcd_vops_update_sec_cfg(hba, on);
 	}
 
+out:
+	if (hba->extcon)
+		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -9651,13 +10018,19 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	bool clk_state_changed = false;
 
 	if (list_empty(head))
-		goto out;
+		return ret;
+
+	if (hba->extcon)
+		mutex_lock(&hba->card_mutex);
+
+	if (!on && ufshcd_is_card_removal_ongoing(hba))
+		goto out_unlock;
 
 	/* call vendor specific bus vote before enabling the clocks */
 	if (on) {
 		ret = ufshcd_vops_set_bus_vote(hba, on);
 		if (ret)
-			return ret;
+			goto out_unlock;
 	}
 
 	/*
@@ -9668,7 +10041,7 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	if (!on) {
 		ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
 		if (ret)
-			return ret;
+			goto out_unlock;
 	}
 
 	list_for_each_entry(clki, head, list) {
@@ -9740,6 +10113,9 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 			(on ? "on" : "off"),
 			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+out_unlock:
+	if (hba->extcon)
+		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -10067,12 +10443,30 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 	 *
 	 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
 	 * in low power state which would save some power.
+	 *
+	 * If Write Booster is enabled and the device needs to flush the WB
+	 * buffer OR if bkops status is urgent for WB, keep Vcc on.
 	 */
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
 	    !hba->dev_info.is_lu_power_on_wp) {
-		ufshcd_setup_vreg(hba, false);
-	} else if (!ufshcd_is_ufs_dev_active(hba)) {
 		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+		if (hba->dev_info.w_spec_version >= 0x300 &&
+			hba->vreg_info.vccq &&
+			hba->vreg_info.vccq->sys_suspend_pwr_off)
+			ufshcd_toggle_vreg(hba->dev,
+				hba->vreg_info.vccq, false);
+		else
+			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+
+		if (hba->dev_info.w_spec_version < 0x300 &&
+			hba->vreg_info.vccq2->sys_suspend_pwr_off)
+			ufshcd_toggle_vreg(hba->dev,
+				hba->vreg_info.vccq2, false);
+		else
+			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+		if (!hba->dev_info.keep_vcc_on)
+			ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
 		if (!ufshcd_is_link_active(hba)) {
 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
@@ -10085,23 +10479,41 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 	int ret = 0;
 
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
-	    !hba->dev_info.is_lu_power_on_wp) {
-		ret = ufshcd_setup_vreg(hba, true);
-	} else if (!ufshcd_is_ufs_dev_active(hba)) {
-		if (!ret && !ufshcd_is_link_active(hba)) {
+		!hba->dev_info.is_lu_power_on_wp) {
+		if (hba->dev_info.w_spec_version < 0x300 &&
+			hba->vreg_info.vccq2->sys_suspend_pwr_off)
+			ret = ufshcd_toggle_vreg(hba->dev,
+				hba->vreg_info.vccq2, true);
+		else
+			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+		if (ret)
+			goto vcc_disable;
+
+		if (hba->dev_info.w_spec_version >= 0x300 &&
+			hba->vreg_info.vccq &&
+			hba->vreg_info.vccq->sys_suspend_pwr_off)
+			ret = ufshcd_toggle_vreg(hba->dev,
+				hba->vreg_info.vccq, true);
+		else
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
-			if (ret)
-				goto vcc_disable;
+		if (ret)
+			goto vccq2_lpm;
+		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+		if (!ufshcd_is_link_active(hba)) {
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
 			if (ret)
-				goto vccq_lpm;
+				goto vcc_disable;
+			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+			if (ret)
+				goto vccq2_lpm;
 		}
 		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 	}
 	goto out;
 
-vccq_lpm:
-	ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vccq2_lpm:
+	ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
 vcc_disable:
 	ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
 out:
@@ -10130,26 +10542,27 @@ static int ufshcd_pm_notify(struct notifier_block *notify_block,
 {
 	struct ufs_hba *hba = container_of(
 		notify_block, struct ufs_hba, pm_notify);
-	int ret = 0;
 
 	if (!hba->extcon)
-		return ret;
+		return 0;
 
 	switch (mode) {
 	case PM_SUSPEND_PREPARE:
-		ret = ufshcd_extcon_unregister(hba);
-		if (ret)
-			break;
-		cancel_work_sync(&hba->card_detect_work);
+		hba->card_detect_disabled = true;
+		cancel_delayed_work_sync(&hba->card_detect_work);
+		down(&hba->sdev_sema);
 		break;
 	case PM_POST_SUSPEND:
-		ret = ufshcd_extcon_register(hba);
-		if (ret)
-			break;
-		extcon_sync(hba->extcon, EXTCON_MECHANICAL);
+		if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device)
+			ufshcd_remove_scsi_devices(hba);
+		up(&hba->sdev_sema);
+		hba->card_detect_disabled = false;
+		if (ufshcd_card_get_extcon_state(hba) > 0 &&
+		    !hba->sdev_ufs_device)
+			ufshcd_detect_card(hba, 0);
 	}
 
-	return ret;
+	return 0;
 }
 
 static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
@@ -10249,11 +10662,16 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 			/* make sure that auto bkops is disabled */
 			ufshcd_disable_auto_bkops(hba);
 		}
+		ufshcd_wb_toggle_flush(hba);
+	} else if (!ufshcd_is_runtime_pm(pm_op)) {
+		ufshcd_wb_buf_flush_disable(hba);
+		hba->dev_info.keep_vcc_on = false;
 	}
 
 	if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
-	     ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
-	       !ufshcd_is_runtime_pm(pm_op))) {
+	    ((ufshcd_is_runtime_pm(pm_op) && (!hba->auto_bkops_enabled)
+	      && !hba->wb_buf_flush_enabled) ||
+	     !ufshcd_is_runtime_pm(pm_op))) {
 		/* ensure that bkops is disabled */
 		ufshcd_disable_auto_bkops(hba);
 		ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
@@ -10270,8 +10688,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 
 set_vreg_lpm:
-	ufshcd_vreg_set_lpm(hba);
-
+	if (!hba->auto_bkops_enabled)
+		ufshcd_vreg_set_lpm(hba);
 disable_clks:
 	/*
 	 * Call vendor specific suspend callback. As these callbacks may access
@@ -10282,6 +10700,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (ret)
 		goto set_link_active;
 
+	/*
+	 * Disable the host irq as host controller as there won't be any
+	 * host controller transaction expected till resume.
+	 */
+	ufshcd_disable_irq(hba);
+
 	if (!ufshcd_is_link_active(hba))
 		ret = ufshcd_disable_clocks(hba, false);
 	else
@@ -10298,16 +10722,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
 	}
-	/*
-	 * Disable the host irq as host controller as there won't be any
-	 * host controller transaction expected till resume.
-	 */
-	ufshcd_disable_irq(hba);
+
 	/* Put the host controller in low power mode if possible */
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
 
 set_link_active:
+	ufshcd_enable_irq(hba);
 	if (hba->clk_scaling.is_allowed)
 		ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
@@ -10377,8 +10798,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	if (hba->extcon &&
 	    (ufshcd_is_card_offline(hba) ||
-	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) ||
-	     !ufshcd_card_get_extcon_state(hba)))
+	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
 		goto skip_dev_ops;
 
 	if (ufshcd_is_link_hibern8(hba)) {
@@ -10407,6 +10827,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 			hba->hibern8_on_idle.state = HIBERN8_EXITED;
 	}
 
+	ufshcd_wb_buf_flush_disable(hba);
 	if (!ufshcd_is_ufs_dev_active(hba)) {
 		ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
 		if (ret)
@@ -10664,9 +11085,17 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 {
 	int ret = 0;
 
+	if (!hba->is_powered)
+		goto out;
+
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
+	if (hba->extcon) {
+		hba->card_detect_disabled = true;
+		cancel_delayed_work_sync(&hba->card_detect_work);
+	}
+
 	pm_runtime_get_sync(hba->dev);
 	ufshcd_hold_all(hba);
 	ufshcd_mark_shutdown_ongoing(hba);
@@ -10877,9 +11306,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
-	INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
+	INIT_DELAYED_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
 	INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
 
+	sema_init(&hba->sdev_sema, 1);
+	mutex_init(&hba->card_mutex);
+
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
 
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 15122de..17b9f7d 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -363,6 +363,7 @@ struct ufs_hba_variant_ops {
 	u32	(*get_scale_down_gear)(struct ufs_hba *hba);
 	int	(*set_bus_vote)(struct ufs_hba *hba, bool on);
 	int	(*phy_initialization)(struct ufs_hba *);
+	u32	(*get_user_cap_mode)(struct ufs_hba *hba);
 #ifdef CONFIG_DEBUG_FS
 	void	(*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
 	void	(*remove_debugfs)(struct ufs_hba *hba);
@@ -614,6 +615,7 @@ enum ufshcd_ctx {
 	H8_EXIT_WORK,
 	UIC_CMD_SEND,
 	PWRCTL_CMD_SEND,
+	PWR_CHG_NOTIFY,
 	TM_CMD_SEND,
 	XFR_REQ_COMPL,
 	CLK_SCALE_WORK,
@@ -766,8 +768,13 @@ enum ufshcd_card_state {
  * @card_detect_nb: card detector notifier registered with @extcon
  * @card_detect_work: work to exectute the card detect function
  * @card_state: card state event, enum ufshcd_card_state defines possible states
- * @card_removal_in_progress: to track card removal progress
+ * @card_removal_in_prog: flag to track card removal progress
  * @pm_notify: used to register for PM events
+ * @sdev_sema: semaphore to protect scsi devices from being removed
+ * @card_mutex: mutex to serialize ON/OFF sequences of hba vregs and clocks
+ * @card_rpm_paired: indicates whether runtime PM events are paired after card
+ *  detection is finished
+ * @card_detect_disabled: to enable/disable card detect
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -974,6 +981,7 @@ struct ufs_hba {
 	/* Keeps information of the UFS device connected to this host */
 	struct ufs_dev_info dev_info;
 	bool auto_bkops_enabled;
+	bool wb_buf_flush_enabled;
 
 #ifdef CONFIG_DEBUG_FS
 	struct debugfs_files debugfs_files;
@@ -1003,10 +1011,14 @@ struct ufs_hba {
 
 	struct extcon_dev *extcon;
 	struct notifier_block card_detect_nb;
-	struct work_struct card_detect_work;
+	struct delayed_work card_detect_work;
 	atomic_t card_state;
-	int card_removal_in_progress;
+	unsigned long card_removal_in_prog;
 	struct notifier_block pm_notify;
+	struct semaphore sdev_sema;
+	struct mutex card_mutex;
+	bool card_rpm_paired;
+	bool card_detect_disabled;
 
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
@@ -1073,8 +1085,24 @@ struct ufs_hba {
 
 	bool phy_init_g4;
 	bool force_g4;
+	bool wb_enabled;
 };
 
+static inline void ufshcd_set_card_removal_ongoing(struct ufs_hba *hba)
+{
+	set_bit(0, &hba->card_removal_in_prog);
+}
+
+static inline void ufshcd_clear_card_removal_ongoing(struct ufs_hba *hba)
+{
+	clear_bit(0, &hba->card_removal_in_prog);
+}
+
+static inline bool ufshcd_is_card_removal_ongoing(struct ufs_hba *hba)
+{
+	return !!(test_bit(0, &hba->card_removal_in_prog));
+}
+
 static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
 {
 	set_bit(0, &hba->shutdown_in_prog);
@@ -1620,4 +1648,10 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 		     const char *prefix);
 
+static inline unsigned int ufshcd_vops_get_user_cap_mode(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops->get_user_cap_mode)
+		return hba->var->vops->get_user_cap_mode(hba);
+	return 0;
+}
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 9acbbfd..62aa8fc 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -54,7 +54,7 @@
 #define RX_HS_UNTERMINATED_ENABLE		0x00A6
 #define RX_ENTER_HIBERN8			0x00A7
 #define RX_BYPASS_8B10B_ENABLE			0x00A8
-#define RX_TERMINATION_FORCE_ENABLE		0x0089
+#define RX_TERMINATION_FORCE_ENABLE		0x00A9
 #define RX_MIN_ACTIVATETIME_CAPABILITY		0x008F
 #define RX_HIBERN8TIME_CAPABILITY		0x0092
 #define RX_HS_ADAPT_INITIAL_CAPABILITY		0x009F
@@ -185,6 +185,9 @@
 #define DL_FC0ProtectionTimeOutVal_Default	8191
 #define DL_TC0ReplayTimeOutVal_Default		65535
 #define DL_AFC0ReqTimeOutVal_Default		32767
+#define DL_FC1ProtectionTimeOutVal_Default	8191
+#define DL_TC1ReplayTimeOutVal_Default		65535
+#define DL_AFC1ReqTimeOutVal_Default		32767
 
 #define DME_LocalFC0ProtectionTimeOutVal	0xD041
 #define DME_LocalTC0ReplayTimeOutVal		0xD042
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 0e754ff..8652230 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -170,6 +170,10 @@ static int ngd_slim_qmi_new_server(struct qmi_handle *hdl,
 	qmi->svc_info.sq_family = AF_QIPCRTR;
 	qmi->svc_info.sq_node = service->node;
 	qmi->svc_info.sq_port = service->port;
+	if (dev->lpass_mem_usage) {
+		dev->lpass_mem->start = dev->lpass_phy_base;
+		dev->lpass.base = dev->lpass_virt_base;
+	}
 	atomic_set(&dev->ssr_in_progress, 0);
 	schedule_work(&dev->dsp.dom_up);
 
@@ -692,8 +696,12 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
 		*(puc++) = (txn->ec & 0xFF);
 		*(puc++) = (txn->ec >> 8)&0xFF;
 	}
-	if (txn->wbuf)
-		memcpy(puc, txn->wbuf, txn->len);
+	if (txn->wbuf) {
+		if (dev->lpass_mem_usage)
+			memcpy_toio(puc, txn->wbuf, txn->len);
+		else
+			memcpy(puc, txn->wbuf, txn->len);
+	}
 	if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
 		(txn->mc == SLIM_USR_MC_CONNECT_SRC ||
 		 txn->mc == SLIM_USR_MC_CONNECT_SINK ||
@@ -1750,6 +1758,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	int ret;
 	struct resource		*bam_mem;
 	struct resource		*slim_mem;
+	struct resource		*lpass_mem;
 	struct resource		*irq, *bam_irq;
 	bool			rxreg_access = false;
 	bool			slim_mdm = false;
@@ -1790,6 +1799,16 @@ static int ngd_slim_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
 		return PTR_ERR(dev);
 	}
+
+	dev->lpass_mem_usage = false;
+	lpass_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_lpass_mem");
+	if (lpass_mem) {
+		dev_dbg(&pdev->dev, "Slimbus lpass memory is used\n");
+		dev->lpass_mem_usage = true;
+		dev->lpass_phy_base = (unsigned long long)lpass_mem->start;
+	}
+
 	dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
 				GFP_KERNEL);
 	if (!dev->wr_comp) {
@@ -1849,26 +1868,40 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	} else
 		dev->sysfs_created = true;
 
-	dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+	dev->base = devm_ioremap(&pdev->dev, slim_mem->start,
+					resource_size(slim_mem));
 	if (!dev->base) {
 		dev_err(&pdev->dev, "IOremap failed\n");
 		ret = -ENOMEM;
 		goto err_ioremap_failed;
 	}
-	dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+	dev->bam.base = devm_ioremap(&pdev->dev, bam_mem->start,
+					resource_size(bam_mem));
 	if (!dev->bam.base) {
 		dev_err(&pdev->dev, "BAM IOremap failed\n");
 		ret = -ENOMEM;
-		goto err_ioremap_bam_failed;
+		goto err_ioremap_failed;
 	}
+
+	if (lpass_mem) {
+		dev->lpass.base = devm_ioremap(&pdev->dev, lpass_mem->start,
+					resource_size(lpass_mem));
+		if (!dev->lpass.base) {
+			dev_err(&pdev->dev, "LPASS IOremap failed\n");
+			ret = -ENOMEM;
+			goto err_ioremap_failed;
+		}
+		dev->lpass_virt_base = dev->lpass.base;
+	}
+
 	if (pdev->dev.of_node) {
 
 		ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
 					&dev->ctrl.nr);
 		if (ret) {
-			dev_err(&pdev->dev, "Cell index not specified:%d\n",
-								ret);
-			goto err_ctrl_failed;
+			dev_err(&pdev->dev,
+					"Cell index not specified:%d\n", ret);
+			goto err_ioremap_failed;
 		}
 		rxreg_access = of_property_read_bool(pdev->dev.of_node,
 					"qcom,rxreg-access");
@@ -1889,7 +1922,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 		if (ret) {
 			dev_err(dev->dev, "%s: Failed to of_platform_populate %d\n",
 				__func__, ret);
-			goto err_ctrl_failed;
+			goto err_ioremap_failed;
 		}
 	} else {
 		dev->ctrl.nr = pdev->id;
@@ -1918,6 +1951,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	dev->ctrl.port_xfer = msm_slim_port_xfer;
 	dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
 	dev->bam_mem = bam_mem;
+	dev->lpass_mem = lpass_mem;
 	dev->rx_slim = ngd_slim_rx;
 
 	init_completion(&dev->reconf);
@@ -1946,7 +1980,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	ret = slim_add_numbered_controller(&dev->ctrl);
 	if (ret) {
 		dev_err(dev->dev, "error adding controller\n");
-		goto err_ctrl_failed;
+		goto err_ioremap_failed;
 	}
 
 	dev->ctrl.dev.parent = &pdev->dev;
@@ -1968,7 +2002,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
 
 	if (ret) {
 		dev_err(&pdev->dev, "request IRQ failed\n");
-		goto err_request_irq_failed;
+		goto err_ioremap_failed;
 	}
 
 	init_completion(&dev->qmi.qmi_comp);
@@ -2015,11 +2049,6 @@ static int ngd_slim_probe(struct platform_device *pdev)
 	kthread_stop(dev->rx_msgq_thread);
 err_rx_thread_create_failed:
 	free_irq(dev->irq, dev);
-err_request_irq_failed:
-err_ctrl_failed:
-	iounmap(dev->bam.base);
-err_ioremap_bam_failed:
-	iounmap(dev->base);
 err_ioremap_failed:
 	if (dev->sysfs_created)
 		sysfs_remove_file(&dev->dev->kobj,
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index d6798d0..b2192a6 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -170,26 +170,37 @@ int msm_slim_sps_mem_alloc(
 
 	mem->size = len;
 	mem->min_size = 0;
-	mem->base = dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL);
+	mem->base = dev->lpass_mem_usage ? dev->lpass.base :
+		dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL);
 
 	if (!mem->base) {
-		dev_err(dma_dev, "dma_alloc_coherent(%d) failed\n", len);
+		dev_err(dma_dev, "dma_alloc_coherent (%d) failed\n", len);
 		return -ENOMEM;
 	}
 
-	mem->phys_base = phys;
-	memset(mem->base, 0x00, mem->size);
+	mem->phys_base = dev->lpass_mem_usage ?
+			(unsigned long long)dev->lpass_mem->start : phys;
+	if (dev->lpass_mem_usage) {
+		memset_io(mem->base, 0x00, mem->size);
+		dev->lpass.base = dev->lpass.base + mem->size;
+		dev->lpass_mem->start = dev->lpass_mem->start + mem->size;
+	} else {
+		memset(mem->base, 0x00, mem->size);
+	}
 	return 0;
 }
 
 void
 msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
 {
-	if (mem->base && mem->phys_base)
-		dma_free_coherent(dev->dev, mem->size, mem->base,
+	if (!dev->lpass_mem_usage) {
+		if (mem->base && mem->phys_base)
+			dma_free_coherent(dev->dev, mem->size, mem->base,
 							mem->phys_base);
-	else
-		dev_err(dev->dev, "cant dma free. they are NULL\n");
+		else
+			dev_err(dev->dev, "Cannot free DMA as it is NULL\n");
+	}
+
 	mem->size = 0;
 	mem->base = NULL;
 	mem->phys_base = 0;
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index cd5b1ca..5fed0aa 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -257,6 +257,7 @@ struct msm_slim_bulk_wr {
 
 struct msm_slim_iommu {
 	struct device			*cb_dev;
+	struct dma_iommu_mapping	*iommu_map;
 	bool				s1_bypass;
 };
 
@@ -266,6 +267,11 @@ struct msm_slim_ctrl {
 	struct device		*dev;
 	struct msm_slim_iommu	iommu_desc;
 	void __iomem		*base;
+	struct msm_slim_sps_bam	lpass;
+	struct resource		*lpass_mem;
+	u32			lpass_phy_base;
+	void __iomem		*lpass_virt_base;
+	bool			lpass_mem_usage;
 	struct resource		*slew_mem;
 	struct resource		*bam_mem;
 	u32			curr_bw;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 473e29d..06615db 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -517,6 +517,25 @@
 	  gladiator hang detection and collects the context for the
 	  gladiator hang.
 
+config MSM_GLADIATOR_ERP
+	tristate "GLADIATOR coherency interconnect error reporting driver"
+	help
+	  Support dumping debug information for the GLADIATOR
+	  cache interconnect in the error interrupt handler.
+	  Meant to be used for debug scenarios only.
+
+	  If unsure, say N.
+
+config PANIC_ON_GLADIATOR_ERROR
+	depends on MSM_GLADIATOR_ERP
+	bool "Panic on GLADIATOR error report"
+	help
+	  Panic upon detection of an Gladiator coherency interconnect error
+	  in order to support dumping debug information.
+	  Meant to be used for debug scenarios only.
+
+	  If unsure, say N.
+
 config QCOM_FSA4480_I2C
 	bool "Fairchild FSA4480 chip with I2C"
 	select REGMAP_I2C
@@ -544,6 +563,18 @@
 	  kernel panic. On certain MSM SoCs, this provides us
 	  additional debugging information.
 
+
+config QCOM_WDOG_IPI_ENABLE
+	bool "Qcom WDT pet optimization"
+	depends on QCOM_WATCHDOG_V2
+	default n
+	help
+	  When this option is enabled, watchdog sends IPI to cores in low power
+	  mode also. For power optimizations, by default watchdog don't ping
+	  cores in low power mode at pettime.
+
+	  To track CPUs health on LPM, or on debug builds enable it.
+
 config QCOM_BUS_SCALING
 	bool "Bus scaling driver"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ef36b9c..45ecf84 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
 obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
 obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o
+obj-$(CONFIG_MSM_GLADIATOR_ERP) += gladiator_erp.o
 obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o minidump_log.o
 obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
 obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
@@ -56,6 +57,7 @@
 	obj-y += subsystem_notif.o
 	obj-y += subsystem_restart.o
 	obj-y += ramdump.o
+	obj-y += microdump_collector.o
 endif
 obj-$(CONFIG_QCOM_EUD) += eud.o
 obj-$(CONFIG_SOC_BUS) += socinfo.o
@@ -76,13 +78,20 @@
 obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
 obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
 obj-$(CONFIG_MSM_IDLE_STATS)	+= lpm-stats.o
-obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+ifdef CONFIG_QCOM_RPMH
+       obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+else
+       obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_master_stat.o
+endif
 obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
 obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o
 obj-$(CONFIG_QTI_DDR_STATS_LOG) += ddr_stats.o
 obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
 obj-$(CONFIG_QCOM_HYP_CORE_CTL) += hyp_core_ctl.o
 obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd-debug.o
+endif
 obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
 obj-$(CONFIG_ICNSS) += icnss.o
 obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index eb0c41c..53376fb 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -157,6 +157,47 @@ static int dcc_sram_writel(struct dcc_drvdata *drvdata,
 	return 0;
 }
 
+static void dcc_sram_memset(const struct device *dev, void __iomem *dst,
+			    int c, size_t count)
+{
+	u64 qc = (u8)c;
+
+	qc |= qc << 8;
+	qc |= qc << 16;
+
+	if (!count || !IS_ALIGNED((unsigned long)dst, 4)
+	    || !IS_ALIGNED((unsigned long)count, 4)) {
+		dev_err(dev,
+			"Target address or size not aligned with 4 bytes\n");
+		return;
+	}
+
+	while (count >= 4) {
+		__raw_writel_no_log(qc, dst);
+		dst += 4;
+		count -= 4;
+	}
+}
+
+static int dcc_sram_memcpy(void *to, const void __iomem *from,
+							size_t count)
+{
+	if (!count || (!IS_ALIGNED((unsigned long)from, 4) ||
+			!IS_ALIGNED((unsigned long)to, 4) ||
+			!IS_ALIGNED((unsigned long)count, 4))) {
+		return -EINVAL;
+	}
+
+	while (count >= 4) {
+		*(unsigned int *)to = __raw_readl_no_log(from);
+		to += 4;
+		from += 4;
+		count -= 4;
+	}
+
+	return 0;
+}
+
 static bool dcc_ready(struct dcc_drvdata *drvdata)
 {
 	uint32_t val;
@@ -528,7 +569,7 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 	return 0;
 overstep:
 	ret = -EINVAL;
-	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+	dcc_sram_memset(drvdata->dev, drvdata->ram_base, 0, drvdata->ram_size);
 	dev_err(drvdata->dev, "DCC SRAM oversteps, 0x%x (0x%x)\n",
 		sram_offset, drvdata->ram_size);
 err:
@@ -604,7 +645,8 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 	mutex_lock(&drvdata->mutex);
 
 	if (!is_dcc_enabled(drvdata)) {
-		memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+		dcc_sram_memset(drvdata->dev, drvdata->ram_base, 0xDE,
+			drvdata->ram_size);
 	}
 
 	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
@@ -680,7 +722,7 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
 		dcc_writel(drvdata, 0, DCC_LL_LOCK(curr_list));
 		drvdata->enable[curr_list] = false;
 	}
-	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+	dcc_sram_memset(drvdata->dev, drvdata->ram_base, 0, drvdata->ram_size);
 	drvdata->ram_cfg = 0;
 	drvdata->ram_start = 0;
 	mutex_unlock(&drvdata->mutex);
@@ -1491,6 +1533,7 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
 {
 	unsigned char *buf;
 	struct dcc_drvdata *drvdata = file->private_data;
+	int ret;
 
 	/* EOF check */
 	if (drvdata->ram_size <= *ppos)
@@ -1503,7 +1546,13 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
 	if (!buf)
 		return -ENOMEM;
 
-	memcpy_fromio(buf, (drvdata->ram_base + *ppos), len);
+	ret = dcc_sram_memcpy(buf, (drvdata->ram_base + *ppos), len);
+	if (ret) {
+		dev_err(drvdata->dev,
+			"Target address or size not aligned with 4 bytes\n");
+		kfree(buf);
+		return ret;
+	}
 
 	if (copy_to_user(data, buf, len)) {
 		dev_err(drvdata->dev,
@@ -1748,7 +1797,7 @@ static int dcc_probe(struct platform_device *pdev)
 		drvdata->nr_config[i] = 0;
 	}
 
-	memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+	dcc_sram_memset(drvdata->dev, drvdata->ram_base, 0, drvdata->ram_size);
 
 	drvdata->curr_list = DCC_INVALID_LINK_LIST;
 
diff --git a/drivers/soc/qcom/dfc_qmap.c b/drivers/soc/qcom/dfc_qmap.c
index a4b2095..5e26739 100644
--- a/drivers/soc/qcom/dfc_qmap.c
+++ b/drivers/soc/qcom/dfc_qmap.c
@@ -148,6 +148,7 @@ static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
 	skb->protocol = htons(ETH_P_MAP);
 	skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
 
+	rmnet_ctl_log_debug("TXI", skb->data, skb->len);
 	trace_dfc_qmap(skb->data, skb->len, false);
 	dev_queue_xmit(skb);
 }
@@ -433,6 +434,7 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
 	skb->dev = qos->real_dev;
 
 	/* This cmd needs to be sent in-band */
+	rmnet_ctl_log_info("TXI", skb->data, skb->len);
 	trace_dfc_qmap(skb->data, skb->len, false);
 	rmnet_map_tx_qmap_cmd(skb);
 }
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index f175881..f9c54b6 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -911,74 +911,52 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
 			struct rmnet_bearer_map *bearer,
 			struct qos_info *qos)
 {
-	int rc = 0, qlen;
-	int enable;
-	int i;
+	bool enable;
 
-	enable = bearer->grant_size ? 1 : 0;
+	enable = bearer->grant_size ? true : false;
 
-	for (i = 0; i < MAX_MQ_NUM; i++) {
-		if (qos->mq[i].bearer == bearer) {
-			/* Do not flow disable ancillary q in tcp bidir */
-			if (qos->mq[i].ancillary &&
-			    bearer->tcp_bidir && !enable)
-				continue;
+	qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
+	trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
+			 bearer->grant_size,
+			 0, bearer->mq_idx, enable);
 
-			qlen = qmi_rmnet_flow_control(dev, i, enable);
-			trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
-					 bearer->grant_size,
-					 qlen, i, enable);
-			rc++;
-		}
+	/* Do not flow disable tcp ack q in tcp bidir */
+	if (bearer->ack_mq_idx != INVALID_MQ &&
+	    (enable || !bearer->tcp_bidir)) {
+		qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
+		trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
+				 bearer->grant_size,
+				 0, bearer->ack_mq_idx, enable);
 	}
 
-	if (enable == 0 && bearer->ack_req)
+	if (!enable && bearer->ack_req)
 		dfc_send_ack(dev, bearer->bearer_id,
 			     bearer->seq, qos->mux_id,
 			     DFC_ACK_TYPE_DISABLE);
 
-	return rc;
+	return 0;
 }
 
 static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 				struct qos_info *qos, u8 ack_req, u32 ancillary,
 				struct dfc_flow_status_info_type_v01 *fc_info)
 {
-	struct rmnet_bearer_map *bearer_itm;
-	int rc = 0, qlen;
-	bool enable;
-	int i;
+	struct rmnet_bearer_map *bearer;
 
-	enable = fc_info->num_bytes > 0 ? 1 : 0;
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->grant_size = fc_info->num_bytes;
+		bearer->grant_thresh =
+			qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->seq = fc_info->seq_num;
+		bearer->ack_req = ack_req;
+		bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		bearer->last_grant = fc_info->num_bytes;
+		bearer->last_seq = fc_info->seq_num;
 
-	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
-		bearer_itm->grant_size = fc_info->num_bytes;
-		bearer_itm->grant_thresh =
-			qmi_rmnet_grant_per(bearer_itm->grant_size);
-		bearer_itm->seq = fc_info->seq_num;
-		bearer_itm->ack_req = ack_req;
-		bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
-		bearer_itm->last_grant = fc_info->num_bytes;
-		bearer_itm->last_seq = fc_info->seq_num;
+		dfc_bearer_flow_ctl(dev, bearer, qos);
 	}
 
-	for (i = 0; i < MAX_MQ_NUM; i++) {
-		bearer_itm = qos->mq[i].bearer;
-		if (!bearer_itm)
-			continue;
-		qlen = qmi_rmnet_flow_control(dev, i, enable);
-		trace_dfc_qmi_tc(dev->name, bearer_itm->bearer_id,
-				 fc_info->num_bytes,
-				 qlen, i, enable);
-		rc++;
-	}
-
-	if (enable == 0 && ack_req)
-		dfc_send_ack(dev, fc_info->bearer_id,
-			     fc_info->seq_num, fc_info->mux_id,
-			     DFC_ACK_TYPE_DISABLE);
-
-	return rc;
+	return 0;
 }
 
 static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -1023,9 +1001,8 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 
 		if (action)
 			rc = dfc_bearer_flow_ctl(dev, itm, qos);
-	} else {
-		qos->default_grant = fc_info->num_bytes;
 	}
+
 	return rc;
 }
 
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 8c701af..a8e21ab 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -93,65 +93,33 @@ static struct platform_device *eud_private;
 static void enable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
-	struct power_supply *usb_psy = NULL;
-	union power_supply_propval pval = {0};
-	union power_supply_propval tval = {0};
 	int ret;
 
-	usb_psy = power_supply_get_by_name("usb");
-	if (!usb_psy) {
-		dev_warn(&pdev->dev, "%s: Could not get usb power_supply\n",
-					__func__);
-		return;
+	/* write into CSR to enable EUD */
+	writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+
+	/* Enable vbus, chgr & safe mode warning interrupts */
+	writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+			priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+
+	/* Enable secure eud if supported */
+	if (priv->secure_eud_en) {
+		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+				   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
+		if (ret)
+			dev_err(&pdev->dev,
+			"scm_io_write failed with rc:%d\n", ret);
 	}
 
-	ret = power_supply_get_property(usb_psy,
-			POWER_SUPPLY_PROP_PRESENT, &pval);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: Unable to read USB PRESENT: %d\n",
-					__func__, ret);
-		return;
-	}
+	/* Ensure Register Writes Complete */
+	wmb();
 
-	ret = power_supply_get_property(usb_psy,
-			POWER_SUPPLY_PROP_REAL_TYPE, &tval);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: Unable to read USB TYPE: %d\n",
-					__func__, ret);
-		return;
-	}
-
-	if (pval.intval && (tval.intval == POWER_SUPPLY_TYPE_USB ||
-	    tval.intval == POWER_SUPPLY_TYPE_USB_CDP)) {
-		/* write into CSR to enable EUD */
-		writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
-		/* Enable vbus, chgr & safe mode warning interrupts */
-		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
-				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
-		/* Enable secure eud if supported */
-		if (priv->secure_eud_en) {
-			ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
-					   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
-			if (ret)
-				dev_err(&pdev->dev,
-				"scm_io_write failed with rc:%d\n", ret);
-		}
-
-		/* Ensure Register Writes Complete */
-		wmb();
-
-		/*
-		 * Set the default cable state to usb connect and charger
-		 * enable
-		 */
-		extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
-		extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
-	} else {
-		dev_warn(&pdev->dev,
-			"%s: Connect USB cable before enabling EUD\n",
-			__func__);
-		return;
-	}
+	/*
+	 * Set the default cable state to usb connect and charger
+	 * enable
+	 */
+	extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
+	extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
 
 	dev_dbg(&pdev->dev, "%s: EUD is Enabled\n", __func__);
 }
@@ -549,6 +517,7 @@ static int msm_eud_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, chip);
 
+	chip->dev = &pdev->dev;
 	chip->extcon = devm_extcon_dev_allocate(&pdev->dev, eud_extcon_cable);
 	if (IS_ERR(chip->extcon)) {
 		dev_err(chip->dev, "%s: failed to allocate extcon device\n",
diff --git a/drivers/soc/qcom/gladiator_erp.c b/drivers/soc/qcom/gladiator_erp.c
new file mode 100644
index 0000000..91a51e3
--- /dev/null
+++ b/drivers/soc/qcom/gladiator_erp.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2017,2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define MODULE_NAME "gladiator_error_reporting"
+
+#define INVALID_NUM	0xDEADBEEF
+
+struct reg_off {
+	unsigned int gladiator_id_coreid;
+	unsigned int gladiator_id_revisionid;
+	unsigned int gladiator_faulten;
+	unsigned int gladiator_errvld;
+	unsigned int gladiator_errclr;
+	unsigned int gladiator_errlog0;
+	unsigned int gladiator_errlog1;
+	unsigned int gladiator_errlog2;
+	unsigned int gladiator_errlog3;
+	unsigned int gladiator_errlog4;
+	unsigned int gladiator_errlog5;
+	unsigned int gladiator_errlog6;
+	unsigned int gladiator_errlog7;
+	unsigned int gladiator_errlog8;
+	unsigned int observer_0_id_coreid;
+	unsigned int observer_0_id_revisionid;
+	unsigned int observer_0_faulten;
+	unsigned int observer_0_errvld;
+	unsigned int observer_0_errclr;
+	unsigned int observer_0_errlog0;
+	unsigned int observer_0_errlog1;
+	unsigned int observer_0_errlog2;
+	unsigned int observer_0_errlog3;
+	unsigned int observer_0_errlog4;
+	unsigned int observer_0_errlog5;
+	unsigned int observer_0_errlog6;
+	unsigned int observer_0_errlog7;
+	unsigned int observer_0_errlog8;
+	unsigned int observer_0_stallen;
+};
+
+struct reg_masks_shift {
+	unsigned int gld_trans_opcode_mask;
+	unsigned int gld_trans_opcode_shift;
+	unsigned int gld_error_type_mask;
+	unsigned int gld_error_type_shift;
+	unsigned int gld_len1_mask;
+	unsigned int gld_len1_shift;
+	unsigned int gld_trans_sourceid_mask;
+	unsigned int gld_trans_sourceid_shift;
+	unsigned int gld_trans_targetid_mask;
+	unsigned int gld_trans_targetid_shift;
+	unsigned int gld_errlog_error;
+	unsigned int gld_errlog5_error_type_mask;
+	unsigned int gld_errlog5_error_type_shift;
+	unsigned int gld_ace_port_parity_mask;
+	unsigned int gld_ace_port_parity_shift;
+	unsigned int gld_ace_port_disconnect_mask;
+	unsigned int gld_ace_port_disconnect_shift;
+	unsigned int gld_ace_port_directory_mask;
+	unsigned int gld_ace_port_directory_shift;
+	unsigned int gld_index_parity_mask;
+	unsigned int gld_index_parity_shift;
+	unsigned int obs_trans_opcode_mask;
+	unsigned int obs_trans_opcode_shift;
+	unsigned int obs_error_type_mask;
+	unsigned int obs_error_type_shift;
+	unsigned int obs_len1_mask;
+	unsigned int obs_len1_shift;
+};
+
+struct msm_gladiator_data {
+	void __iomem *gladiator_virt_base;
+	int erp_irq;
+	struct notifier_block pm_notifier_block;
+	struct clk *qdss_clk;
+	struct reg_off *reg_offs;
+	struct reg_masks_shift *reg_masks_shifts;
+	bool glad_v2;
+	bool glad_v3;
+};
+
+static int enable_panic_on_error;
+module_param(enable_panic_on_error, int, 0000);
+
+enum gld_trans_opcode {
+	GLD_RD,
+	GLD_RDX,
+	GLD_RDL,
+	GLD_RESERVED,
+	GLD_WR,
+	GLD_WRC,
+	GLD_PRE,
+};
+
+enum obs_trans_opcode {
+	OBS_RD,
+	OBS_RDW,
+	OBS_RDL,
+	OBS_RDX,
+	OBS_WR,
+	OBS_WRW,
+	OBS_WRC,
+	OBS_RESERVED,
+	OBS_PRE,
+	OBS_URG,
+};
+
+enum obs_err_code {
+	OBS_SLV,
+	OBS_DEC,
+	OBS_UNS,
+	OBS_DISC,
+	OBS_SEC,
+	OBS_HIDE,
+	OBS_TMO,
+	OBS_RSV,
+};
+
+enum err_log {
+	ID_COREID,
+	ID_REVISIONID,
+	FAULTEN,
+	ERRVLD,
+	ERRCLR,
+	ERR_LOG0,
+	ERR_LOG1,
+	ERR_LOG2,
+	ERR_LOG3,
+	ERR_LOG4,
+	ERR_LOG5,
+	ERR_LOG6,
+	ERR_LOG7,
+	ERR_LOG8,
+	STALLEN,
+	MAX_NUM,
+};
+
+enum type_logger_error {
+	DATA_TRANSFER_ERROR,
+	DVM_ERROR,
+	TX_ERROR,
+	TXR_ERROR,
+	DISCONNECT_ERROR,
+	DIRECTORY_ERROR,
+	PARITY_ERROR,
+};
+
+static void clear_gladiator_error(void __iomem *gladiator_virt_base,
+				struct reg_off *offs)
+{
+	writel_relaxed(1, gladiator_virt_base + offs->gladiator_errclr);
+	writel_relaxed(1, gladiator_virt_base + offs->observer_0_errclr);
+}
+
+static inline void print_gld_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case GLD_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case GLD_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case GLD_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case GLD_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case GLD_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case GLD_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_gld_errtype(unsigned int errtype)
+{
+	if (errtype == 0)
+		pr_alert("Error type: Snoop data transfer\n");
+	else if (errtype == 1)
+		pr_alert("Error type: DVM error\n");
+	else if (errtype == 3)
+		pr_alert("Error type: Disconnect, directory, or parity error\n");
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static void decode_gld_errlog0(u32 err_reg,
+			struct reg_masks_shift *mask_shifts)
+{
+	unsigned int opc, errtype, len1;
+
+	opc = (err_reg & mask_shifts->gld_trans_opcode_mask) >>
+					mask_shifts->gld_trans_opcode_shift;
+	errtype = (err_reg & mask_shifts->gld_error_type_mask) >>
+					mask_shifts->gld_error_type_shift;
+	len1 = (err_reg & mask_shifts->gld_len1_mask) >>
+					mask_shifts->gld_len1_shift;
+
+	print_gld_transaction(opc);
+	print_gld_errtype(errtype);
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_gld_errlog1(u32 err_reg,
+			struct reg_masks_shift *mask_shifts)
+{
+	if ((err_reg & mask_shifts->gld_errlog_error) ==
+					mask_shifts->gld_errlog_error)
+		pr_alert("Transaction issued on IO target generic interface\n");
+	else
+		pr_alert("Transaction source ID: %d\n",
+				(err_reg & mask_shifts->gld_trans_sourceid_mask)
+				>> mask_shifts->gld_trans_sourceid_shift);
+}
+
+static void decode_gld_errlog2(u32 err_reg,
+			struct reg_masks_shift *mask_shifts)
+{
+	if ((err_reg & mask_shifts->gld_errlog_error) ==
+					mask_shifts->gld_errlog_error)
+		pr_alert("Error response coming from: external DVM network\n");
+	else
+		pr_alert("Error response coming from: Target ID: %d\n",
+				(err_reg & mask_shifts->gld_trans_targetid_mask)
+				>> mask_shifts->gld_trans_targetid_shift);
+}
+
+static void decode_ace_port_index(u32 type, u32 error,
+			struct reg_masks_shift *mask_shifts)
+{
+	unsigned int port;
+
+	switch (type) {
+	case DISCONNECT_ERROR:
+		port = (error & mask_shifts->gld_ace_port_disconnect_mask)
+			>> mask_shifts->gld_ace_port_disconnect_shift;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case DIRECTORY_ERROR:
+		port = (error & mask_shifts->gld_ace_port_directory_mask)
+			>> mask_shifts->gld_ace_port_directory_shift;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case PARITY_ERROR:
+		port = (error & mask_shifts->gld_ace_port_parity_mask)
+			>> mask_shifts->gld_ace_port_parity_shift;
+		pr_alert("ACE port index: %d\n", port);
+	}
+}
+
+static void decode_index_parity(u32 error, struct reg_masks_shift *mask_shifts)
+{
+	pr_alert("Index: %d\n",
+			(error & mask_shifts->gld_index_parity_mask)
+			>> mask_shifts->gld_index_parity_shift);
+}
+
+static void decode_gld_logged_error(u32 err_reg5,
+				struct reg_masks_shift *mask_shifts)
+{
+	unsigned int log_err_type, i, value;
+
+	log_err_type = (err_reg5 & mask_shifts->gld_errlog5_error_type_mask)
+		>> mask_shifts->gld_errlog5_error_type_shift;
+	for (i = 0 ; i <= 6 ; i++) {
+		value = log_err_type & 0x1;
+		switch (i) {
+		case DATA_TRANSFER_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Data transfer error\n");
+			break;
+		case DVM_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: DVM error\n");
+			break;
+		case TX_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Tx error\n");
+			break;
+		case TXR_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: TxR error\n");
+			break;
+		case DISCONNECT_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Disconnect error\n");
+			decode_ace_port_index(
+					DISCONNECT_ERROR,
+					err_reg5,
+					mask_shifts);
+			break;
+		case DIRECTORY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Directory error\n");
+			decode_ace_port_index(
+					DIRECTORY_ERROR,
+					err_reg5,
+					mask_shifts);
+			break;
+		case PARITY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Parity error\n");
+			decode_ace_port_index(PARITY_ERROR, err_reg5,
+					mask_shifts);
+			decode_index_parity(err_reg5, mask_shifts);
+			break;
+		}
+		log_err_type = log_err_type >> 1;
+	}
+}
+
+static void decode_gld_errlog(u32 err_reg, unsigned int err_log,
+				struct msm_gladiator_data *msm_gld_data)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_gld_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG1:
+		decode_gld_errlog1(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG2:
+		decode_gld_errlog2(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		pr_alert("Mid 32-bits(63-32) of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG7:
+		break;
+	case ERR_LOG8:
+		pr_alert("Upper 32-bits(95-64) of user: %08x\n", err_reg);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static inline void print_obs_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case OBS_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case OBS_RDW:
+		pr_alert("Transaction type: WRAPPED READ\n");
+		break;
+	case OBS_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case OBS_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case OBS_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case OBS_WRW:
+		pr_alert("Transaction type: WRAPPED WRITE\n");
+		break;
+	case OBS_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case OBS_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	case OBS_URG:
+		pr_alert("Transaction type: Urgency Packet\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_obs_errcode(unsigned int errcode)
+{
+	switch (errcode) {
+	case OBS_SLV:
+		pr_alert("Error code: Target error detected by slave\n");
+		pr_alert("Source: Target\n");
+		break;
+	case OBS_DEC:
+		pr_alert("Error code: Address decode error\n");
+		pr_alert("Source: Initiator NIU\n");
+		break;
+	case OBS_UNS:
+		pr_alert("Error code: Unsupported request\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	case OBS_DISC:
+		pr_alert("Error code: Disconnected target or domain\n");
+		pr_alert("Source: Power Disconnect\n");
+		break;
+	case OBS_SEC:
+		pr_alert("Error code: Security violation\n");
+		pr_alert("Source: Initiator NIU or Firewall\n");
+		break;
+	case OBS_HIDE:
+		pr_alert("Error :Hidden security violation, reported as OK\n");
+		pr_alert("Source: Firewall\n");
+		break;
+	case OBS_TMO:
+		pr_alert("Error code: Time-out\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	default:
+		pr_alert("Error code: Unknown; code:%u\n", errcode);
+	}
+}
+
+static void decode_obs_errlog0(u32 err_reg,
+			struct reg_masks_shift *mask_shifts)
+{
+	unsigned int opc, errcode;
+
+	opc = (err_reg & mask_shifts->obs_trans_opcode_mask) >>
+				mask_shifts->obs_trans_opcode_shift;
+	errcode = (err_reg & mask_shifts->obs_error_type_mask) >>
+				mask_shifts->obs_error_type_shift;
+
+	print_obs_transaction(opc);
+	print_obs_errcode(errcode);
+}
+
+static void decode_obs_errlog0_len(u32 err_reg,
+				struct reg_masks_shift *mask_shifts)
+{
+	unsigned int len1;
+
+	len1 = (err_reg & mask_shifts->obs_len1_mask) >>
+				mask_shifts->obs_len1_shift;
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_obs_errlog(u32 err_reg, unsigned int err_log,
+		struct msm_gladiator_data *msm_gld_data)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_obs_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
+		decode_obs_errlog0_len(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG1:
+		pr_alert("RouteId of the error: %08x\n", err_reg);
+		break;
+	case ERR_LOG2:
+		/* reserved error log register */
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 12-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 13-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		/* reserved error log register */
+		break;
+	case ERR_LOG7:
+		pr_alert("Security filed of the logged error: %08x\n", err_reg);
+		break;
+	case ERR_LOG8:
+		/* reserved error log register */
+		break;
+	case STALLEN:
+		pr_alert("stall mode of the error logger: %08x\n",
+				err_reg & 0x1);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static void decode_obs_errlog_v3(u32 err_reg, unsigned int err_log,
+		struct msm_gladiator_data *msm_gld_data)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_obs_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG1:
+		decode_obs_errlog0_len(err_reg, msm_gld_data->reg_masks_shifts);
+		break;
+	case ERR_LOG2:
+		pr_alert("Path of the error: %08x\n", err_reg);
+		break;
+	case ERR_LOG3:
+		pr_alert("ExtID of the error: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("ERRLOG2_LSB: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("ERRLOG2_MSB: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		pr_alert("ERRLOG3_LSB: %08x\n", err_reg);
+		break;
+	case ERR_LOG7:
+		pr_alert("ERRLOG3_MSB: %08x\n", err_reg);
+		break;
+	case FAULTEN:
+		pr_alert("stall mode of the error logger: %08x\n",
+				err_reg & 0x3);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static u32 get_gld_offset(unsigned int err_log, struct reg_off *offs)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case FAULTEN:
+		offset = offs->gladiator_faulten;
+		break;
+	case ERRVLD:
+		offset = offs->gladiator_errvld;
+		break;
+	case ERRCLR:
+		offset = offs->gladiator_errclr;
+		break;
+	case ERR_LOG0:
+		offset = offs->gladiator_errlog0;
+		break;
+	case ERR_LOG1:
+		offset = offs->gladiator_errlog1;
+		break;
+	case ERR_LOG2:
+		offset = offs->gladiator_errlog2;
+		break;
+	case ERR_LOG3:
+		offset = offs->gladiator_errlog3;
+		break;
+	case ERR_LOG4:
+		offset = offs->gladiator_errlog4;
+		break;
+	case ERR_LOG5:
+		offset = offs->gladiator_errlog5;
+		break;
+	case ERR_LOG6:
+		offset = offs->gladiator_errlog6;
+		break;
+	case ERR_LOG7:
+		offset = offs->gladiator_errlog7;
+		break;
+	case ERR_LOG8:
+		offset = offs->gladiator_errlog8;
+		break;
+	default:
+		pr_alert("Invalid gladiator error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static u32 get_obs_offset(unsigned int err_log, struct reg_off *offs)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case FAULTEN:
+		offset = offs->observer_0_faulten;
+		break;
+	case ERRVLD:
+		offset = offs->observer_0_errvld;
+		break;
+	case ERRCLR:
+		offset = offs->observer_0_errclr;
+		break;
+	case ERR_LOG0:
+		offset = offs->observer_0_errlog0;
+		break;
+	case ERR_LOG1:
+		offset = offs->observer_0_errlog1;
+		break;
+	case ERR_LOG2:
+		offset = offs->observer_0_errlog2;
+		break;
+	case ERR_LOG3:
+		offset = offs->observer_0_errlog3;
+		break;
+	case ERR_LOG4:
+		offset = offs->observer_0_errlog4;
+		break;
+	case ERR_LOG5:
+		offset = offs->observer_0_errlog5;
+		break;
+	case ERR_LOG6:
+		offset = offs->observer_0_errlog6;
+		break;
+	case ERR_LOG7:
+		offset = offs->observer_0_errlog7;
+		break;
+	case ERR_LOG8:
+		offset = offs->observer_0_errlog8;
+		break;
+	case STALLEN:
+		offset = offs->observer_0_stallen;
+		break;
+	default:
+		pr_alert("Invalid observer error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static void decode_gld_errlog5(struct msm_gladiator_data *msm_gld_data)
+{
+	unsigned int errtype;
+	u32 err_reg0, err_reg5;
+	struct reg_masks_shift *mask_shifts = msm_gld_data->reg_masks_shifts;
+
+	err_reg0 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG0, msm_gld_data->reg_offs));
+	err_reg5 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG5, msm_gld_data->reg_offs));
+
+	errtype = (err_reg0 & mask_shifts->gld_error_type_mask) >>
+			mask_shifts->gld_error_type_shift;
+	if (errtype == 3)
+		decode_gld_logged_error(err_reg5, mask_shifts);
+	else if (errtype == 0 || errtype == 1)
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg5);
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static void dump_gld_err_regs(struct msm_gladiator_data *msm_gld_data,
+			unsigned int err_buf[MAX_NUM])
+{
+	unsigned int err_log;
+	unsigned int start = FAULTEN;
+	unsigned int end = ERR_LOG8;
+
+	if (msm_gld_data->glad_v2 || msm_gld_data->glad_v3) {
+		start = FAULTEN;
+		end = ERR_LOG8;
+	}
+
+	pr_alert("Main log register data:\n");
+	for (err_log = start; err_log <= end; err_log++) {
+		err_buf[err_log] = readl_relaxed(
+				msm_gld_data->gladiator_virt_base +
+				get_gld_offset(err_log,
+					msm_gld_data->reg_offs));
+		pr_alert("%08x ", err_buf[err_log]);
+	}
+}
+
+static void dump_obsrv_err_regs(struct msm_gladiator_data *msm_gld_data,
+			unsigned int err_buf[MAX_NUM])
+{
+	unsigned int err_log;
+	unsigned int start = ID_COREID;
+	unsigned int end = STALLEN;
+
+	if (msm_gld_data->glad_v2) {
+		start = ID_COREID;
+		end = STALLEN;
+	} else if (msm_gld_data->glad_v3) {
+		start = FAULTEN;
+		end = ERR_LOG7;
+	}
+
+	pr_alert("Observer log register data:\n");
+	for (err_log = start; err_log <= end; err_log++) {
+		err_buf[err_log] = readl_relaxed(
+				msm_gld_data->gladiator_virt_base +
+				get_obs_offset(
+					err_log,
+					msm_gld_data->reg_offs)
+				);
+		pr_alert("%08x ", err_buf[err_log]);
+	}
+}
+
+static void parse_gld_err_regs(struct msm_gladiator_data *msm_gld_data,
+			unsigned int err_buf[MAX_NUM])
+{
+	unsigned int err_log;
+
+	pr_alert("Main error log register data:\n");
+	for (err_log = ERR_LOG0; err_log <= ERR_LOG8; err_log++) {
+		/* skip log register 7 as its reserved */
+		if (err_log == ERR_LOG7)
+			continue;
+		if (err_log == ERR_LOG5) {
+			decode_gld_errlog5(msm_gld_data);
+			continue;
+		}
+		decode_gld_errlog(err_buf[err_log], err_log,
+				msm_gld_data);
+	}
+}
+
+static void parse_obsrv_err_regs(struct msm_gladiator_data *msm_gld_data,
+			unsigned int err_buf[MAX_NUM])
+{
+	unsigned int err_log;
+
+	pr_alert("Observor error log register data:\n");
+	if (msm_gld_data->glad_v2) {
+		for (err_log = ERR_LOG0; err_log <= STALLEN; err_log++)	{
+			/* skip log register 2, 6 and 8 as they are reserved */
+			if ((err_log == ERR_LOG2) || (err_log == ERR_LOG6)
+					|| (err_log == ERR_LOG8))
+				continue;
+			decode_obs_errlog(err_buf[err_log], err_log,
+					msm_gld_data);
+		}
+	} else if (msm_gld_data->glad_v3) {
+		decode_obs_errlog_v3(err_buf[STALLEN], STALLEN,
+					msm_gld_data);
+		for (err_log = ERR_LOG0; err_log <= ERR_LOG7; err_log++) {
+			decode_obs_errlog_v3(err_buf[err_log], err_log,
+					msm_gld_data);
+		}
+	}
+
+}
+
+static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
+{
+	unsigned int gld_err_buf[MAX_NUM], obs_err_buf[MAX_NUM];
+
+	struct msm_gladiator_data *msm_gld_data = dev_id;
+
+	/* Check validity */
+	bool gld_err_valid = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			msm_gld_data->reg_offs->gladiator_errvld);
+
+	bool obsrv_err_valid = readl_relaxed(
+			msm_gld_data->gladiator_virt_base +
+			msm_gld_data->reg_offs->observer_0_errvld);
+
+	if (!gld_err_valid && !obsrv_err_valid) {
+		pr_err("%s Invalid Gladiator error reported, clear it\n",
+				__func__);
+		/* Clear IRQ */
+		clear_gladiator_error(msm_gld_data->gladiator_virt_base,
+					msm_gld_data->reg_offs);
+		return IRQ_HANDLED;
+	}
+	pr_alert("Gladiator Error Detected:\n");
+	if (gld_err_valid)
+		dump_gld_err_regs(msm_gld_data, gld_err_buf);
+
+	if (obsrv_err_valid)
+		dump_obsrv_err_regs(msm_gld_data, obs_err_buf);
+
+	if (gld_err_valid)
+		parse_gld_err_regs(msm_gld_data, gld_err_buf);
+
+	if (obsrv_err_valid)
+		parse_obsrv_err_regs(msm_gld_data, obs_err_buf);
+
+	/* Clear IRQ */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base,
+				msm_gld_data->reg_offs);
+	if (enable_panic_on_error)
+		panic("Gladiator Cache Interconnect Error Detected!\n");
+	else
+		WARN(1, "Gladiator Cache Interconnect Error Detected\n");
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id gladiator_erp_match_table[] = {
+	{ .compatible = "qcom,msm-gladiator-v2" },
+	{ .compatible = "qcom,msm-gladiator-v3" },
+	{},
+};
+
+static int parse_dt_node(struct platform_device *pdev,
+		struct msm_gladiator_data *msm_gld_data)
+{
+	int ret = 0;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "gladiator_base");
+	if (!res)
+		return -ENODEV;
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				resource_size(res),
+				"msm-gladiator-erp")) {
+
+		dev_err(&pdev->dev, "%s cannot reserve gladiator erp region\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->gladiator_virt_base  = devm_ioremap(&pdev->dev,
+			res->start, resource_size(res));
+	if (!msm_gld_data->gladiator_virt_base) {
+		dev_err(&pdev->dev, "%s cannot map gladiator register space\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->erp_irq = platform_get_irq(pdev, 0);
+	if (!msm_gld_data->erp_irq)
+		return -ENODEV;
+
+	/* clear existing errors before enabling the interrupt */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base,
+			msm_gld_data->reg_offs);
+	ret = devm_request_irq(&pdev->dev, msm_gld_data->erp_irq,
+			msm_gladiator_isr, IRQF_TRIGGER_HIGH,
+			"gladiator-error", msm_gld_data);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register irq handler\n");
+
+	return ret;
+}
+
+static inline void gladiator_irq_init(void __iomem *gladiator_virt_base,
+				struct reg_off *offs)
+{
+	writel_relaxed(1, gladiator_virt_base + offs->gladiator_faulten);
+	writel_relaxed(1, gladiator_virt_base + offs->observer_0_faulten);
+}
+
+#define CCI_LEVEL 2
+static int gladiator_erp_pm_callback(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	unsigned int level = (unsigned long) data;
+	struct msm_gladiator_data *msm_gld_data = container_of(nb,
+			struct msm_gladiator_data, pm_notifier_block);
+
+	if (level != CCI_LEVEL)
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case CPU_CLUSTER_PM_EXIT:
+		gladiator_irq_init(msm_gld_data->gladiator_virt_base,
+				msm_gld_data->reg_offs);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static void init_offsets_and_masks_v2(struct msm_gladiator_data *msm_gld_data)
+{
+	msm_gld_data->reg_offs->gladiator_id_coreid		= 0x0;
+	msm_gld_data->reg_offs->gladiator_id_revisionid		= 0x4;
+	msm_gld_data->reg_offs->gladiator_faulten		= 0x1010;
+	msm_gld_data->reg_offs->gladiator_errvld		= 0x1014;
+	msm_gld_data->reg_offs->gladiator_errclr		= 0x1018;
+	msm_gld_data->reg_offs->gladiator_errlog0		= 0x101C;
+	msm_gld_data->reg_offs->gladiator_errlog1		= 0x1020;
+	msm_gld_data->reg_offs->gladiator_errlog2		= 0x1024;
+	msm_gld_data->reg_offs->gladiator_errlog3		= 0x1028;
+	msm_gld_data->reg_offs->gladiator_errlog4		= 0x102C;
+	msm_gld_data->reg_offs->gladiator_errlog5		= 0x1030;
+	msm_gld_data->reg_offs->gladiator_errlog6		= 0x1034;
+	msm_gld_data->reg_offs->gladiator_errlog7		= 0x1038;
+	msm_gld_data->reg_offs->gladiator_errlog8		= 0x103C;
+	msm_gld_data->reg_offs->observer_0_id_coreid		= 0x8000;
+	msm_gld_data->reg_offs->observer_0_id_revisionid	= 0x8004;
+	msm_gld_data->reg_offs->observer_0_faulten		= 0x8008;
+	msm_gld_data->reg_offs->observer_0_errvld		= 0x800C;
+	msm_gld_data->reg_offs->observer_0_errclr		= 0x8010;
+	msm_gld_data->reg_offs->observer_0_errlog0		= 0x8014;
+	msm_gld_data->reg_offs->observer_0_errlog1		= 0x8018;
+	msm_gld_data->reg_offs->observer_0_errlog2		= 0x801C;
+	msm_gld_data->reg_offs->observer_0_errlog3		= 0x8020;
+	msm_gld_data->reg_offs->observer_0_errlog4		= 0x8024;
+	msm_gld_data->reg_offs->observer_0_errlog5		= 0x8028;
+	msm_gld_data->reg_offs->observer_0_errlog6		= 0x802C;
+	msm_gld_data->reg_offs->observer_0_errlog7		= 0x8030;
+	msm_gld_data->reg_offs->observer_0_errlog8		= 0x8034;
+	msm_gld_data->reg_offs->observer_0_stallen		= 0x8038;
+
+	msm_gld_data->reg_masks_shifts->gld_trans_opcode_mask = 0xE;
+	msm_gld_data->reg_masks_shifts->gld_trans_opcode_shift = 1;
+	msm_gld_data->reg_masks_shifts->gld_error_type_mask = 0x700;
+	msm_gld_data->reg_masks_shifts->gld_error_type_shift = 8;
+	msm_gld_data->reg_masks_shifts->gld_len1_mask = 0xFFF;
+	msm_gld_data->reg_masks_shifts->gld_len1_shift = 16;
+	msm_gld_data->reg_masks_shifts->gld_trans_sourceid_mask = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_trans_sourceid_shift = 0;
+	msm_gld_data->reg_masks_shifts->gld_trans_targetid_mask = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_trans_targetid_shift = 0;
+	msm_gld_data->reg_masks_shifts->gld_errlog_error = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_mask =
+								0xFF000000;
+	msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_shift = 24;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_parity_mask = 0xc000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_parity_shift = 14;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_mask = 0xf0000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_shift = 16;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_directory_mask = 0xf00000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_directory_shift = 20;
+	msm_gld_data->reg_masks_shifts->gld_index_parity_mask = 0x1FFF;
+	msm_gld_data->reg_masks_shifts->gld_index_parity_shift = 0;
+	msm_gld_data->reg_masks_shifts->obs_trans_opcode_mask = 0x1E;
+	msm_gld_data->reg_masks_shifts->obs_trans_opcode_shift = 1;
+	msm_gld_data->reg_masks_shifts->obs_error_type_mask = 0x700;
+	msm_gld_data->reg_masks_shifts->obs_error_type_shift = 8;
+	msm_gld_data->reg_masks_shifts->obs_len1_mask = 0x7F0;
+	msm_gld_data->reg_masks_shifts->obs_len1_shift = 16;
+}
+
+static void init_offsets_and_masks_v3(struct msm_gladiator_data *msm_gld_data)
+{
+	msm_gld_data->reg_offs->gladiator_id_coreid	= 0x0;
+	msm_gld_data->reg_offs->gladiator_id_revisionid = 0x4;
+	msm_gld_data->reg_offs->gladiator_faulten	= 0x1010;
+	msm_gld_data->reg_offs->gladiator_errvld	= 0x1014;
+	msm_gld_data->reg_offs->gladiator_errclr	= 0x1018;
+	msm_gld_data->reg_offs->gladiator_errlog0	= 0x101C;
+	msm_gld_data->reg_offs->gladiator_errlog1	= 0x1020;
+	msm_gld_data->reg_offs->gladiator_errlog2	= 0x1024;
+	msm_gld_data->reg_offs->gladiator_errlog3	= 0x1028;
+	msm_gld_data->reg_offs->gladiator_errlog4	= 0x102C;
+	msm_gld_data->reg_offs->gladiator_errlog5	= 0x1030;
+	msm_gld_data->reg_offs->gladiator_errlog6	= 0x1034;
+	msm_gld_data->reg_offs->gladiator_errlog7	= 0x1038;
+	msm_gld_data->reg_offs->gladiator_errlog8	= 0x103C;
+	msm_gld_data->reg_offs->observer_0_id_coreid	= INVALID_NUM;
+	msm_gld_data->reg_offs->observer_0_id_revisionid = INVALID_NUM;
+	msm_gld_data->reg_offs->observer_0_faulten	= 0x2008;
+	msm_gld_data->reg_offs->observer_0_errvld	= 0x2010;
+	msm_gld_data->reg_offs->observer_0_errclr	= 0x2018;
+	msm_gld_data->reg_offs->observer_0_errlog0	= 0x2020;
+	msm_gld_data->reg_offs->observer_0_errlog1	= 0x2024;
+	msm_gld_data->reg_offs->observer_0_errlog2	= 0x2028;
+	msm_gld_data->reg_offs->observer_0_errlog3	= 0x202C;
+	msm_gld_data->reg_offs->observer_0_errlog4	= 0x2030;
+	msm_gld_data->reg_offs->observer_0_errlog5	= 0x2034;
+	msm_gld_data->reg_offs->observer_0_errlog6	= 0x2038;
+	msm_gld_data->reg_offs->observer_0_errlog7	= 0x203C;
+	msm_gld_data->reg_offs->observer_0_errlog8	= INVALID_NUM;
+	msm_gld_data->reg_offs->observer_0_stallen	= INVALID_NUM;
+
+	msm_gld_data->reg_masks_shifts->gld_trans_opcode_mask = 0xE;
+	msm_gld_data->reg_masks_shifts->gld_trans_opcode_shift = 1;
+	msm_gld_data->reg_masks_shifts->gld_error_type_mask = 0x700;
+	msm_gld_data->reg_masks_shifts->gld_error_type_shift = 8;
+	msm_gld_data->reg_masks_shifts->gld_len1_mask = 0xFFF0000;
+	msm_gld_data->reg_masks_shifts->gld_len1_shift = 16;
+	msm_gld_data->reg_masks_shifts->gld_trans_sourceid_mask = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_trans_sourceid_shift = 0;
+	msm_gld_data->reg_masks_shifts->gld_trans_targetid_mask = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_trans_targetid_shift = 0;
+	msm_gld_data->reg_masks_shifts->gld_errlog_error = 0x7;
+	msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_mask =
+								0xFF000000;
+	msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_shift = 24;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_parity_mask = 0xc000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_parity_shift = 14;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_mask = 0xf0000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_shift = 16;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_directory_mask = 0xf00000;
+	msm_gld_data->reg_masks_shifts->gld_ace_port_directory_shift = 20;
+	msm_gld_data->reg_masks_shifts->gld_index_parity_mask = 0x1FFF;
+	msm_gld_data->reg_masks_shifts->gld_index_parity_shift = 0;
+	msm_gld_data->reg_masks_shifts->obs_trans_opcode_mask = 0x70;
+	msm_gld_data->reg_masks_shifts->obs_trans_opcode_shift = 4;
+	msm_gld_data->reg_masks_shifts->obs_error_type_mask = 0x700;
+	msm_gld_data->reg_masks_shifts->obs_error_type_shift = 8;
+	msm_gld_data->reg_masks_shifts->obs_len1_mask = 0x1FF;
+	msm_gld_data->reg_masks_shifts->obs_len1_shift = 0;
+}
+
+static int gladiator_erp_probe(struct platform_device *pdev)
+{
+	int ret = -1;
+	struct msm_gladiator_data *msm_gld_data;
+
+	msm_gld_data = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_gladiator_data), GFP_KERNEL);
+	if (!msm_gld_data) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	msm_gld_data->reg_offs = devm_kzalloc(&pdev->dev,
+			sizeof(struct reg_off), GFP_KERNEL);
+	msm_gld_data->reg_masks_shifts = devm_kzalloc(&pdev->dev,
+			sizeof(struct reg_masks_shift), GFP_KERNEL);
+
+	if (!msm_gld_data->reg_offs || !msm_gld_data->reg_masks_shifts) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	msm_gld_data->glad_v2 = of_device_is_compatible(pdev->dev.of_node,
+					"qcom,msm-gladiator-v2");
+	msm_gld_data->glad_v3 = of_device_is_compatible(pdev->dev.of_node,
+					"qcom,msm-gladiator-v3");
+
+	if (msm_gld_data->glad_v2)
+		init_offsets_and_masks_v2(msm_gld_data);
+	else if (msm_gld_data->glad_v3)
+		init_offsets_and_masks_v3(msm_gld_data);
+
+	if (msm_gld_data->glad_v2) {
+		if (of_property_match_string(pdev->dev.of_node,
+					"clock-names", "atb_clk") >= 0) {
+			msm_gld_data->qdss_clk = devm_clk_get(&pdev->dev,
+								"atb_clk");
+			if (IS_ERR(msm_gld_data->qdss_clk)) {
+				dev_err(&pdev->dev, "Failed to get QDSS ATB clock\n");
+				goto bail;
+			}
+		} else {
+			dev_err(&pdev->dev, "No matching string of QDSS ATB clock\n");
+			goto bail;
+		}
+
+		ret = clk_prepare_enable(msm_gld_data->qdss_clk);
+		if (ret)
+			goto err_atb_clk;
+	}
+
+	ret = parse_dt_node(pdev, msm_gld_data);
+	if (ret)
+		goto bail;
+	msm_gld_data->pm_notifier_block.notifier_call =
+		gladiator_erp_pm_callback;
+
+	gladiator_irq_init(msm_gld_data->gladiator_virt_base,
+			msm_gld_data->reg_offs);
+	platform_set_drvdata(pdev, msm_gld_data);
+	cpu_pm_register_notifier(&msm_gld_data->pm_notifier_block);
+#ifdef CONFIG_PANIC_ON_GLADIATOR_ERROR
+	enable_panic_on_error = 1;
+#endif
+	dev_info(&pdev->dev, "MSM Gladiator Error Reporting Initialized\n");
+	return ret;
+
+err_atb_clk:
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+
+bail:
+	dev_err(&pdev->dev, "Probe failed bailing out\n");
+	return ret;
+}
+
+static int gladiator_erp_remove(struct platform_device *pdev)
+{
+	struct msm_gladiator_data *msm_gld_data = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	cpu_pm_unregister_notifier(&msm_gld_data->pm_notifier_block);
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+	return 0;
+}
+
+static struct platform_driver gladiator_erp_driver = {
+	.probe = gladiator_erp_probe,
+	.remove = gladiator_erp_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.of_match_table = gladiator_erp_match_table,
+	},
+};
+
+static int __init init_gladiator_erp(void)
+{
+	int ret;
+
+	ret = scm_is_secure_device();
+	if (ret == 0) {
+		pr_info("Gladiator Error Reporting not available\n");
+		return -ENODEV;
+	}
+
+	return platform_driver_register(&gladiator_erp_driver);
+}
+module_init(init_gladiator_erp);
+
+static void __exit exit_gladiator_erp(void)
+{
+	return platform_driver_unregister(&gladiator_erp_driver);
+}
+module_exit(exit_gladiator_erp);
+
+MODULE_DESCRIPTION("Gladiator Error Reporting");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 678efeb..622e642 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -68,6 +68,8 @@ struct glink_ssr {
 
 	u32 seq_num;
 	struct completion completion;
+	struct work_struct unreg_work;
+	struct kref refcount;
 };
 
 struct edge_info {
@@ -85,6 +87,31 @@ struct edge_info {
 };
 LIST_HEAD(edge_infos);
 
+static void glink_ssr_release(struct kref *ref)
+{
+	struct glink_ssr *ssr = container_of(ref, struct glink_ssr,
+					     refcount);
+	struct glink_ssr_nb *nb, *tmp;
+
+	list_for_each_entry_safe(nb, tmp, &ssr->notify_list, list)
+		kfree(nb);
+
+	kfree(ssr);
+}
+
+static void glink_ssr_ssr_unreg_work(struct work_struct *work)
+{
+	struct glink_ssr *ssr = container_of(work, struct glink_ssr,
+					     unreg_work);
+	struct glink_ssr_nb *nb, *tmp;
+
+	list_for_each_entry_safe(nb, tmp, &ssr->notify_list, list) {
+		subsys_notif_unregister_notifier(nb->ssr_register_handle,
+						 &nb->nb);
+	}
+	kref_put(&ssr->refcount, glink_ssr_release);
+}
+
 static int glink_ssr_ssr_cb(struct notifier_block *this,
 			    unsigned long code, void *data)
 {
@@ -94,6 +121,11 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
 	struct do_cleanup_msg msg;
 	int ret;
 
+	if (!dev || !ssr->ept)
+		return NOTIFY_DONE;
+
+	kref_get(&ssr->refcount);
+
 	if (code == SUBSYS_AFTER_SHUTDOWN) {
 		ssr->seq_num++;
 		reinit_completion(&ssr->completion);
@@ -112,6 +144,7 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
 		if (ret) {
 			GLINK_ERR(dev, "fail to send do cleanup to %s %d\n",
 				  nb->ssr_label, ret);
+			kref_put(&ssr->refcount, glink_ssr_release);
 			return NOTIFY_DONE;
 		}
 
@@ -119,6 +152,7 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
 		if (!ret)
 			GLINK_ERR(dev, "timeout waiting for cleanup resp\n");
 	}
+	kref_put(&ssr->refcount, glink_ssr_release);
 	return NOTIFY_DONE;
 }
 
@@ -169,7 +203,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 		if (!node)
 			break;
 
-		nb = devm_kzalloc(dev, sizeof(*nb), GFP_KERNEL);
+		nb = kzalloc(sizeof(*nb), GFP_KERNEL);
 		if (!nb)
 			return;
 
@@ -182,6 +216,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 		if (ret < 0) {
 			GLINK_ERR(dev, "no qcom,glink-label for %s\n",
 				  nb->ssr_label);
+			kfree(nb);
 			continue;
 		}
 
@@ -193,6 +228,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 		if (IS_ERR_OR_NULL(handle)) {
 			GLINK_ERR(dev, "register fail for %s SSR notifier\n",
 				  nb->ssr_label);
+			kfree(nb);
 			continue;
 		}
 
@@ -205,12 +241,14 @@ static int glink_ssr_probe(struct rpmsg_device *rpdev)
 {
 	struct glink_ssr *ssr;
 
-	ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+	ssr = kzalloc(sizeof(*ssr), GFP_KERNEL);
 	if (!ssr)
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&ssr->notify_list);
 	init_completion(&ssr->completion);
+	INIT_WORK(&ssr->unreg_work, glink_ssr_ssr_unreg_work);
+	kref_init(&ssr->refcount);
 
 	ssr->dev = &rpdev->dev;
 	ssr->ept = rpdev->ept;
@@ -225,14 +263,12 @@ static int glink_ssr_probe(struct rpmsg_device *rpdev)
 static void glink_ssr_remove(struct rpmsg_device *rpdev)
 {
 	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
-	struct glink_ssr_nb *nb;
 
-	list_for_each_entry(nb, &ssr->notify_list, list) {
-		subsys_notif_unregister_notifier(nb->ssr_register_handle,
-						 &nb->nb);
-	}
-
+	ssr->dev = NULL;
+	ssr->ept = NULL;
 	dev_set_drvdata(&rpdev->dev, NULL);
+
+	schedule_work(&ssr->unreg_work);
 }
 
 static const struct rpmsg_device_id glink_ssr_match[] = {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 5f8a6ac..c1e399a 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -83,6 +83,7 @@ static struct icnss_vreg_info icnss_vreg_info[] = {
 	{NULL, "vdd-cx-mx", 752000, 752000, 0, 0, false},
 	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
 	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+	{NULL, "vdd-3.3-ch1", 3312000, 3312000, 0, 0, false},
 	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
 };
 
@@ -593,8 +594,7 @@ static irqreturn_t fw_crash_indication_handler(int irq, void *ctx)
 		set_bit(ICNSS_FW_DOWN, &priv->state);
 		icnss_ignore_fw_timeout(true);
 
-		if (test_bit(ICNSS_FW_READY, &priv->state) &&
-		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state)) {
+		if (test_bit(ICNSS_FW_READY, &priv->state)) {
 			fw_down_data.crashed = true;
 			icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
@@ -997,7 +997,6 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
 	icnss_call_driver_shutdown(priv);
 
 	clear_bit(ICNSS_PDR, &priv->state);
-	clear_bit(ICNSS_MODEM_CRASHED, &priv->state);
 	clear_bit(ICNSS_REJUVENATE, &priv->state);
 	clear_bit(ICNSS_PD_RESTART, &priv->state);
 	priv->early_crash_ind = false;
@@ -1261,9 +1260,8 @@ static int icnss_driver_event_idle_shutdown(void *data)
 	if (!penv->ops || !penv->ops->idle_shutdown)
 		return 0;
 
-	if (test_bit(ICNSS_MODEM_CRASHED, &penv->state) ||
-			test_bit(ICNSS_PDR, &penv->state) ||
-			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+	if (penv->is_ssr || test_bit(ICNSS_PDR, &penv->state) ||
+	    test_bit(ICNSS_REJUVENATE, &penv->state)) {
 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown callback\n");
 		ret = -EBUSY;
 	} else {
@@ -1284,9 +1282,8 @@ static int icnss_driver_event_idle_restart(void *data)
 	if (!penv->ops || !penv->ops->idle_restart)
 		return 0;
 
-	if (test_bit(ICNSS_MODEM_CRASHED, &penv->state) ||
-			test_bit(ICNSS_PDR, &penv->state) ||
-			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+	if (penv->is_ssr || test_bit(ICNSS_PDR, &penv->state) ||
+	    test_bit(ICNSS_REJUVENATE, &penv->state)) {
 		icnss_pr_err("SSR/PDR is already in-progress during idle restart callback\n");
 		ret = -EBUSY;
 	} else {
@@ -1391,6 +1388,35 @@ static int icnss_msa0_ramdump(struct icnss_priv *priv)
 	return do_ramdump(priv->msa0_dump_dev, &segment, 1);
 }
 
+static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
+							void *data)
+{
+	struct notif_data *notif = data;
+	int ret = 0;
+
+	if (!notif->crashed) {
+		if (atomic_read(&priv->is_shutdown)) {
+			atomic_set(&priv->is_shutdown, false);
+			if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
+				!test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) {
+				icnss_call_driver_remove(priv);
+			}
+		}
+
+		if (test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
+			if (!wait_for_completion_timeout(
+					&priv->unblock_shutdown,
+					msecs_to_jiffies(PROBE_TIMEOUT)))
+				icnss_pr_err("modem block shutdown timeout\n");
+		}
+
+		ret = wlfw_send_modem_shutdown_msg(priv);
+		if (ret < 0)
+			icnss_pr_err("Fail to send modem shutdown Indication %d\n",
+				     ret);
+	}
+}
+
 static int icnss_modem_notifier_nb(struct notifier_block *nb,
 				  unsigned long code,
 				  void *data)
@@ -1400,7 +1426,6 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
 					       modem_ssr_nb);
 	struct icnss_uevent_fw_down_data fw_down_data;
-	int ret = 0;
 
 	icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
 
@@ -1416,39 +1441,14 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	priv->is_ssr = true;
 
-	if (notif->crashed)
-		set_bit(ICNSS_MODEM_CRASHED, &priv->state);
-
-	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
-	    atomic_read(&priv->is_shutdown)) {
-		atomic_set(&priv->is_shutdown, false);
-		if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
-		    !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) {
-			icnss_call_driver_remove(priv);
-		}
-	}
-
-	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
-	    test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
-		if (!wait_for_completion_timeout(&priv->unblock_shutdown,
-				msecs_to_jiffies(PROBE_TIMEOUT)))
-			icnss_pr_err("modem block shutdown timeout\n");
-	}
-
-	if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed) {
-		ret = wlfw_send_modem_shutdown_msg(priv);
-		if (ret < 0)
-			icnss_pr_err("Fail to send modem shutdown Indication %d\n",
-				     ret);
-	}
+	icnss_update_state_send_modem_shutdown(priv, data);
 
 	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
 		set_bit(ICNSS_FW_DOWN, &priv->state);
 		icnss_ignore_fw_timeout(true);
 
 		fw_down_data.crashed = !!notif->crashed;
-		if (test_bit(ICNSS_FW_READY, &priv->state) &&
-		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+		if (test_bit(ICNSS_FW_READY, &priv->state))
 			icnss_call_driver_uevent(priv,
 						 ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
@@ -1475,8 +1475,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 	event_data->crashed = notif->crashed;
 
 	fw_down_data.crashed = !!notif->crashed;
-	if (test_bit(ICNSS_FW_READY, &priv->state) &&
-	    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+	if (test_bit(ICNSS_FW_READY, &priv->state))
 		icnss_call_driver_uevent(priv,
 					 ICNSS_UEVENT_FW_DOWN,
 					 &fw_down_data);
@@ -1598,8 +1597,7 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
 		icnss_ignore_fw_timeout(true);
 
 		fw_down_data.crashed = event_data->crashed;
-		if (test_bit(ICNSS_FW_READY, &priv->state) &&
-		    !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
+		if (test_bit(ICNSS_FW_READY, &priv->state))
 			icnss_call_driver_uevent(priv,
 						 ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
@@ -1623,6 +1621,8 @@ static int icnss_get_service_location_notify(struct notifier_block *nb,
 	int curr_state;
 	int ret;
 	int i;
+	int j;
+	bool duplicate;
 	struct service_notifier_context *notifier;
 
 	icnss_pr_dbg("Get service notify opcode: %lu, state: 0x%lx\n", opcode,
@@ -1648,6 +1648,16 @@ static int icnss_get_service_location_notify(struct notifier_block *nb,
 	priv->service_notifier_nb.notifier_call = icnss_service_notifier_notify;
 
 	for (i = 0; i < pd->total_domains; i++) {
+		duplicate = false;
+		for (j = i + 1; j < pd->total_domains; j++) {
+			if (!strcmp(pd->domain_list[i].name,
+			    pd->domain_list[j].name))
+				duplicate = true;
+		}
+
+		if (duplicate)
+			continue;
+
 		icnss_pr_dbg("%d: domain_name: %s, instance_id: %d\n", i,
 			     pd->domain_list[i].name,
 			     pd->domain_list[i].instance_id);
@@ -2264,9 +2274,8 @@ int icnss_idle_shutdown(struct device *dev)
 		return -EINVAL;
 	}
 
-	if (test_bit(ICNSS_MODEM_CRASHED, &priv->state) ||
-			test_bit(ICNSS_PDR, &priv->state) ||
-			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
+	    test_bit(ICNSS_REJUVENATE, &penv->state)) {
 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown\n");
 		return -EBUSY;
 	}
@@ -2285,9 +2294,8 @@ int icnss_idle_restart(struct device *dev)
 		return -EINVAL;
 	}
 
-	if (test_bit(ICNSS_MODEM_CRASHED, &priv->state) ||
-			test_bit(ICNSS_PDR, &priv->state) ||
-			test_bit(ICNSS_REJUVENATE, &penv->state)) {
+	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
+	    test_bit(ICNSS_REJUVENATE, &penv->state)) {
 		icnss_pr_err("SSR/PDR is already in-progress during idle restart\n");
 		return -EBUSY;
 	}
@@ -2711,9 +2719,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 			continue;
 		case ICNSS_PDR:
 			seq_puts(s, "PDR TRIGGERED");
-			continue;
-		case ICNSS_MODEM_CRASHED:
-			seq_puts(s, "MODEM CRASHED");
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -2990,23 +2995,16 @@ static int icnss_regread_show(struct seq_file *s, void *data)
 	return 0;
 }
 
-static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
-				size_t count, loff_t *off)
+static ssize_t icnss_reg_parse(const char __user *user_buf, size_t count,
+			       struct icnss_reg_info *reg_info_ptr)
 {
-	struct icnss_priv *priv =
-		((struct seq_file *)fp->private_data)->private;
-	char buf[64];
-	char *sptr, *token;
-	unsigned int len = 0;
-	uint32_t reg_offset, mem_type;
-	uint32_t data_len = 0;
-	uint8_t *reg_buf = NULL;
+	char buf[64] = {0};
+	char *sptr = NULL, *token = NULL;
 	const char *delim = " ";
-	int ret = 0;
+	unsigned int len = 0;
 
-	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
-	    !test_bit(ICNSS_POWER_ON, &priv->state))
-		return -EINVAL;
+	if (user_buf == NULL)
+		return -EFAULT;
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
@@ -3022,7 +3020,7 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
 	if (!sptr)
 		return -EINVAL;
 
-	if (kstrtou32(token, 0, &mem_type))
+	if (kstrtou32(token, 0, &reg_info_ptr->mem_type))
 		return -EINVAL;
 
 	token = strsep(&sptr, delim);
@@ -3032,32 +3030,53 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
 	if (!sptr)
 		return -EINVAL;
 
-	if (kstrtou32(token, 0, &reg_offset))
+	if (kstrtou32(token, 0, &reg_info_ptr->reg_offset))
 		return -EINVAL;
 
 	token = strsep(&sptr, delim);
 	if (!token)
 		return -EINVAL;
 
-	if (kstrtou32(token, 0, &data_len))
+	if (kstrtou32(token, 0, &reg_info_ptr->data_len))
 		return -EINVAL;
 
-	if (data_len == 0 ||
-	    data_len > WLFW_MAX_DATA_SIZE)
+	if (reg_info_ptr->data_len == 0 ||
+	    reg_info_ptr->data_len > WLFW_MAX_DATA_SIZE)
 		return -EINVAL;
 
+	return 0;
+}
+
+static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
+				   size_t count, loff_t *off)
+{
+	struct icnss_priv *priv =
+		((struct seq_file *)fp->private_data)->private;
+	uint8_t *reg_buf = NULL;
+	int ret = 0;
+	struct icnss_reg_info reg_info;
+
+	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+	    !test_bit(ICNSS_POWER_ON, &priv->state))
+		return -EINVAL;
+
+	ret = icnss_reg_parse(user_buf, count, &reg_info);
+	if (ret)
+		return ret;
+
 	mutex_lock(&priv->dev_lock);
 	kfree(priv->diag_reg_read_buf);
 	priv->diag_reg_read_buf = NULL;
 
-	reg_buf = kzalloc(data_len, GFP_KERNEL);
+	reg_buf = kzalloc(reg_info.data_len, GFP_KERNEL);
 	if (!reg_buf) {
 		mutex_unlock(&priv->dev_lock);
 		return -ENOMEM;
 	}
 
-	ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
-					      mem_type, data_len,
+	ret = wlfw_athdiag_read_send_sync_msg(priv, reg_info.reg_offset,
+					      reg_info.mem_type,
+					      reg_info.data_len,
 					      reg_buf);
 	if (ret) {
 		kfree(reg_buf);
@@ -3065,9 +3084,9 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
 		return ret;
 	}
 
-	priv->diag_reg_read_addr = reg_offset;
-	priv->diag_reg_read_mem_type = mem_type;
-	priv->diag_reg_read_len = data_len;
+	priv->diag_reg_read_addr = reg_info.reg_offset;
+	priv->diag_reg_read_mem_type = reg_info.mem_type;
+	priv->diag_reg_read_len = reg_info.data_len;
 	priv->diag_reg_read_buf = reg_buf;
 	mutex_unlock(&priv->dev_lock);
 
@@ -3210,37 +3229,12 @@ static int icnss_get_vbatt_info(struct icnss_priv *priv)
 	return 0;
 }
 
-static int icnss_probe(struct platform_device *pdev)
+static int icnss_resource_parse(struct icnss_priv *priv)
 {
-	int ret = 0;
-	struct resource *res;
-	int i;
+	int ret = 0, i = 0;
+	struct platform_device *pdev = priv->pdev;
 	struct device *dev = &pdev->dev;
-	struct icnss_priv *priv;
-	const __be32 *addrp;
-	u64 prop_size = 0;
-	struct device_node *np;
-	u32 addr_win[2];
-
-	if (penv) {
-		icnss_pr_err("Driver is already initialized\n");
-		return -EEXIST;
-	}
-
-	icnss_pr_dbg("Platform driver probe\n");
-
-	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->magic = ICNSS_MAGIC;
-	dev_set_drvdata(dev, priv);
-
-	priv->pdev = pdev;
-
-	priv->vreg_info = icnss_vreg_info;
-
-	icnss_allow_recursive_recovery(dev);
+	struct resource *res;
 
 	if (of_property_read_bool(pdev->dev.of_node, "qcom,icnss-adc_tm")) {
 		ret = icnss_get_vbatt_info(priv);
@@ -3293,6 +3287,21 @@ static int icnss_probe(struct platform_device *pdev)
 		}
 	}
 
+	return 0;
+
+out:
+	return ret;
+}
+
+static int icnss_msa_dt_parse(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct platform_device *pdev = priv->pdev;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = NULL;
+	u64 prop_size = 0;
+	const __be32 *addrp = NULL;
+
 	np = of_parse_phandle(dev->of_node,
 			      "qcom,wlan-msa-fixed-region", 0);
 	if (np) {
@@ -3341,6 +3350,20 @@ static int icnss_probe(struct platform_device *pdev)
 	icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%pK MSA Memory Size: 0x%x\n",
 		     &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
 
+	return 0;
+
+out:
+	return ret;
+}
+
+static int icnss_smmu_dt_parse(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct platform_device *pdev = priv->pdev;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	u32 addr_win[2];
+
 	ret = of_property_read_u32_array(dev->of_node,
 					 "qcom,iommu-dma-addr-pool",
 					 addr_win,
@@ -3366,6 +3389,47 @@ static int icnss_probe(struct platform_device *pdev)
 		}
 	}
 
+	return 0;
+}
+
+static int icnss_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct device *dev = &pdev->dev;
+	struct icnss_priv *priv;
+
+	if (penv) {
+		icnss_pr_err("Driver is already initialized\n");
+		return -EEXIST;
+	}
+
+	icnss_pr_dbg("Platform driver probe\n");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->magic = ICNSS_MAGIC;
+	dev_set_drvdata(dev, priv);
+
+	priv->pdev = pdev;
+
+	priv->vreg_info = icnss_vreg_info;
+
+	icnss_allow_recursive_recovery(dev);
+
+	ret = icnss_resource_parse(priv);
+	if (ret)
+		goto out;
+
+	ret = icnss_msa_dt_parse(priv);
+	if (ret)
+		goto out;
+
+	ret = icnss_smmu_dt_parse(priv);
+	if (ret)
+		goto out;
+
 	spin_lock_init(&priv->event_lock);
 	spin_lock_init(&priv->on_off_lock);
 	mutex_init(&priv->dev_lock);
diff --git a/drivers/soc/qcom/icnss_private.h b/drivers/soc/qcom/icnss_private.h
index ba44305..1c425d1 100644
--- a/drivers/soc/qcom/icnss_private.h
+++ b/drivers/soc/qcom/icnss_private.h
@@ -157,7 +157,6 @@ enum icnss_driver_state {
 	ICNSS_MODE_ON,
 	ICNSS_BLOCK_SHUTDOWN,
 	ICNSS_PDR,
-	ICNSS_MODEM_CRASHED,
 };
 
 struct ce_irq_list {
@@ -353,6 +352,12 @@ struct icnss_priv {
 
 };
 
+struct icnss_reg_info {
+	uint32_t mem_type;
+	uint32_t reg_offset;
+	uint32_t data_len;
+};
+
 int icnss_call_driver_uevent(struct icnss_priv *priv,
 				    enum icnss_uevent uevent, void *data);
 int icnss_driver_event_post(enum icnss_driver_event_type type,
diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c
index 2bcf3d2..55e0eea 100644
--- a/drivers/soc/qcom/icnss_qmi.c
+++ b/drivers/soc/qcom/icnss_qmi.c
@@ -133,7 +133,7 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv)
 	for (i = 0; i < resp->mem_region_info_len; i++) {
 
 		if (resp->mem_region_info[i].size > priv->msa_mem_size ||
-		    resp->mem_region_info[i].region_addr > max_mapped_addr ||
+		    resp->mem_region_info[i].region_addr >= max_mapped_addr ||
 		    resp->mem_region_info[i].region_addr < priv->msa_pa ||
 		    resp->mem_region_info[i].size +
 		    resp->mem_region_info[i].region_addr > max_mapped_addr) {
diff --git a/drivers/soc/qcom/llcc-lito.c b/drivers/soc/qcom/llcc-lito.c
index 9919159..e03053c 100644
--- a/drivers/soc/qcom/llcc-lito.c
+++ b/drivers/soc/qcom/llcc-lito.c
@@ -51,16 +51,16 @@
 	}
 
 static struct llcc_slice_config lito_data[] =  {
-	SCT_ENTRY(LLCC_CPUSS,    1, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 1),
-	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MDM,      8, 512, 2, 0, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPUHTW,   11, 256, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPU,      12, 256, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_DISP,     16, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MDMPNG,   21, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_NPU,      23, 512, 2, 1, 0x0,  0xF00, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_MODEMVPE, 29, 128, 1, 1, 0x0FF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_CPUSS,    1, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1),
+	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MDM,      8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_GPUHTW,   11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_GPU,      12, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_DISP,     16, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MDMPNG,   21, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_NPU,      23, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
+	SCT_ENTRY(LLCC_MODEMVPE, 29, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 };
 
 static int lito_qcom_llcc_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index f2780b91..a933f37 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -240,28 +240,12 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
 	struct llcc_slice_desc desc;
 	bool cap_based_alloc_and_pwr_collapse =
 		drv_data->cap_based_alloc_and_pwr_collapse;
-	uint32_t mask = ~0;
 	int v2_ver = of_device_is_compatible(pdev->dev.of_node,
 							 "qcom,llcc-v2");
 
 	sz = drv_data->cfg_size;
 	llcc_table = drv_data->cfg;
 
-	/* Disable the Cache as Non-Cache override and enable
-	 * the Non-Cache as Cache override
-	 */
-	if (v2_ver) {
-		ret  = regmap_write(drv_data->bcast_regmap,
-						 LLCC_TRP_C_AS_NC, 0);
-		if (ret)
-			return ret;
-
-		ret = regmap_write(drv_data->bcast_regmap,
-						 LLCC_TRP_NC_AS_C, mask);
-		if (ret)
-			return ret;
-	}
-
 	for (i = 0; i < sz; i++) {
 		attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
 		attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 82f0717..aefe453 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -14,6 +14,7 @@
 #include <linux/kobject.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/bootmem.h>
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
 #include <asm/tlbflush.h>
@@ -383,7 +384,7 @@ static int mem_online_remaining_blocks(void)
 	start_section_nr = pfn_to_section_nr(memblock_end_pfn);
 	end_section_nr = pfn_to_section_nr(ram_end_pfn);
 
-	if (start_section_nr >= end_section_nr) {
+	if (memblock_end_of_DRAM() >= bootloader_memory_limit) {
 		pr_info("mem-offline: System booted with no zone movable memory blocks. Cannot perform memory offlining\n");
 		return -EINVAL;
 	}
@@ -407,6 +408,8 @@ static int mem_online_remaining_blocks(void)
 			fail = 1;
 		}
 	}
+
+	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
 	return fail;
 }
 
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 459ebea..ab63799 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -13,6 +13,8 @@
 #include <linux/of_address.h>
 #include <soc/qcom/minidump.h>
 #include <soc/qcom/memory_dump.h>
+#include <soc/qcom/qtee_shmbridge.h>
+#include <soc/qcom/secure_buffer.h>
 #include <soc/qcom/scm.h>
 #include <linux/of_device.h>
 #include <linux/dma-mapping.h>
@@ -22,6 +24,37 @@
 
 #define SCM_CMD_DEBUG_LAR_UNLOCK	0x4
 
+#define CPUSS_REGDUMP			0xEF
+
+#define INPUT_DATA_BY_HLOS		0x00C0FFEE
+#define FORMAT_VERSION_1		0x1
+#define CORE_REG_NUM_DEFAULT		0x1
+
+#define MAGIC_INDEX			0
+#define FORMAT_VERSION_INDEX		1
+#define SYS_REG_INPUT_INDEX		2
+#define OUTPUT_DUMP_INDEX		3
+#define PERCORE_INDEX			4
+#define SYSTEM_REGS_INPUT_INDEX	5
+
+struct cpuss_dump_data {
+	void *dump_vaddr;
+	u32 size;
+	u32 core_reg_num;
+	u32 core_reg_used_num;
+	u32 core_reg_end_index;
+	u32 sys_reg_size;
+	u32 used_memory;
+	struct mutex mutex;
+};
+
+struct reg_dump_data {
+	uint32_t magic;
+	uint32_t version;
+	uint32_t system_regs_input_index;
+	uint32_t regdump_output_byte_offset;
+};
+
 struct msm_dump_table {
 	uint32_t version;
 	uint32_t num_entries;
@@ -35,6 +68,388 @@ struct msm_memory_dump {
 
 static struct msm_memory_dump memdump;
 
+/**
+ * update_reg_dump_table - update the register dump table
+ * @core_reg_num: the number of per-core registers
+ *
+ * This function calculates system_regs_input_index and
+ * regdump_output_byte_offset to store into the dump memory.
+ * It also updates members of cpudata by the parameter core_reg_num.
+ *
+ * Returns 0 on success, or -ENOMEM on error of no enough memory.
+ */
+static int update_reg_dump_table(struct device *dev, u32 core_reg_num)
+{
+	int ret = 0;
+	u32 system_regs_input_index = SYSTEM_REGS_INPUT_INDEX +
+			core_reg_num * 2;
+	u32 regdump_output_byte_offset = (system_regs_input_index + 1)
+			* sizeof(uint32_t);
+	struct reg_dump_data *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	mutex_lock(&cpudata->mutex);
+
+	if (regdump_output_byte_offset >= cpudata->size ||
+			regdump_output_byte_offset / sizeof(uint32_t)
+			< system_regs_input_index + 1) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	cpudata->core_reg_num = core_reg_num;
+	cpudata->core_reg_used_num = 0;
+	cpudata->core_reg_end_index = PERCORE_INDEX;
+	cpudata->sys_reg_size = 0;
+	cpudata->used_memory = regdump_output_byte_offset;
+
+	memset(cpudata->dump_vaddr, 0xDE, cpudata->size);
+	p = (struct reg_dump_data *)cpudata->dump_vaddr;
+	p->magic = INPUT_DATA_BY_HLOS;
+	p->version = FORMAT_VERSION_1;
+	p->system_regs_input_index = system_regs_input_index;
+	p->regdump_output_byte_offset = regdump_output_byte_offset;
+	memset((uint32_t *)cpudata->dump_vaddr + PERCORE_INDEX, 0x0,
+			(system_regs_input_index - PERCORE_INDEX + 1)
+			* sizeof(uint32_t));
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+
+static ssize_t core_reg_num_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	if (!cpudata)
+		return -EFAULT;
+
+	mutex_lock(&cpudata->mutex);
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n", cpudata->core_reg_num);
+
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+
+static ssize_t core_reg_num_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	unsigned int val;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&cpudata->mutex);
+
+	if (cpudata->core_reg_used_num || cpudata->sys_reg_size) {
+		dev_err(dev, "Couldn't set core_reg_num, register available in list\n");
+		ret = -EPERM;
+		goto err;
+	}
+	if (val == cpudata->core_reg_num) {
+		ret = 0;
+		goto err;
+	}
+
+	mutex_unlock(&cpudata->mutex);
+
+	ret = update_reg_dump_table(dev, val);
+	if (ret) {
+		dev_err(dev, "Couldn't set core_reg_num, no enough memory\n");
+		return ret;
+	}
+
+	return size;
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(core_reg_num);
+
+/**
+ * This function shows configs of per-core and system registers.
+ */
+static ssize_t register_config_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	char local_buf[64];
+	int len = 0, count = 0;
+	int index, system_index_start, index_end;
+	uint32_t register_offset, length_in_bytes;
+	uint32_t length_in_words;
+	uint32_t *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	buf[0] = '\0';
+
+	if (!cpudata)
+		return -EFAULT;
+
+	mutex_lock(&cpudata->mutex);
+
+	p = (uint32_t *)cpudata->dump_vaddr;
+
+	/* print per-core & system registers */
+	len = snprintf(local_buf, 64, "per-core registers:\n");
+	strlcat(buf, local_buf, PAGE_SIZE);
+	count += len;
+
+	system_index_start = *(p + SYS_REG_INPUT_INDEX);
+	index_end = system_index_start +
+			cpudata->sys_reg_size / sizeof(uint32_t) + 1;
+	for (index = PERCORE_INDEX; index < index_end;) {
+		if (index == system_index_start) {
+			len = snprintf(local_buf, 64, "system registers:\n");
+			if ((count + len) > PAGE_SIZE) {
+				dev_err(dev, "Couldn't write complete config\n");
+				break;
+			}
+
+			strlcat(buf, local_buf, PAGE_SIZE);
+			count += len;
+		}
+
+		register_offset = *(p + index);
+		if (register_offset == 0) {
+			index++;
+			continue;
+		}
+
+		if (register_offset & 0x3) {
+			length_in_words = register_offset & 0x3;
+			length_in_bytes = length_in_words << 2;
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x\n",
+				index, register_offset);
+			index++;
+		} else {
+			length_in_bytes = *(p + index + 1);
+			len = snprintf(local_buf, 64,
+				"Index: 0x%x, addr: 0x%x, length: 0x%x\n",
+				index, register_offset, length_in_bytes);
+			index += 2;
+		}
+
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "Couldn't write complete config\n");
+			break;
+		}
+
+		strlcat(buf, local_buf, PAGE_SIZE);
+		count += len;
+	}
+
+	mutex_unlock(&cpudata->mutex);
+	return count;
+}
+
+/**
+ * This function sets configs of per-core or system registers.
+ */
+static ssize_t register_config_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret;
+	uint32_t register_offset, length_in_bytes, per_core = 0;
+	uint32_t length_in_words;
+	int nval;
+	uint32_t num_cores;
+	u32 extra_memory;
+	u32 used_memory;
+	u32 system_reg_end_index;
+	uint32_t *p;
+	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+
+	nval = sscanf(buf, "%x %x %u", &register_offset,
+				&length_in_bytes, &per_core);
+	if (nval != 2 && nval != 3)
+		return -EINVAL;
+	if (per_core > 1)
+		return -EINVAL;
+	if (register_offset & 0x3) {
+		dev_err(dev, "Invalid address, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes & 0x3) {
+		dev_err(dev, "Invalid length, must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+	if (length_in_bytes == 0) {
+		dev_err(dev, "Invalid length of 0\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&cpudata->mutex);
+
+	p = (uint32_t *)cpudata->dump_vaddr;
+	length_in_words = length_in_bytes >> 2;
+	if (per_core) { /* per-core register */
+		if (cpudata->core_reg_used_num == cpudata->core_reg_num) {
+			dev_err(dev, "Couldn't add per-core config, out of range\n");
+			ret = -EINVAL;
+			goto err;
+		}
+
+		num_cores = num_possible_cpus();
+		extra_memory = length_in_bytes * num_cores;
+		used_memory = cpudata->used_memory + extra_memory;
+		if (extra_memory / num_cores < length_in_bytes ||
+				used_memory > cpudata->size ||
+				used_memory < cpudata->used_memory) {
+			dev_err(dev, "Couldn't add per-core reg config, no enough memory\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		if (length_in_words > 3) {
+			*(p + cpudata->core_reg_end_index) = register_offset;
+			*(p + cpudata->core_reg_end_index + 1) =
+					length_in_bytes;
+			cpudata->core_reg_end_index += 2;
+		} else {
+			*(p + cpudata->core_reg_end_index) = register_offset |
+					length_in_words;
+			cpudata->core_reg_end_index++;
+		}
+
+		cpudata->core_reg_used_num++;
+		cpudata->used_memory = used_memory;
+	} else { /* system register */
+		system_reg_end_index = *(p + SYS_REG_INPUT_INDEX) +
+				cpudata->sys_reg_size / sizeof(uint32_t);
+
+		if (length_in_words > 3) {
+			extra_memory = sizeof(uint32_t) * 2 + length_in_bytes;
+			used_memory = cpudata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > cpudata->size ||
+					used_memory < cpudata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset;
+			*(p + system_reg_end_index + 1) = length_in_bytes;
+			system_reg_end_index += 2;
+			cpudata->sys_reg_size += sizeof(uint32_t) * 2;
+		} else {
+			extra_memory = sizeof(uint32_t) + length_in_bytes;
+			used_memory = cpudata->used_memory + extra_memory;
+			if (extra_memory < length_in_bytes ||
+					used_memory > cpudata->size ||
+					used_memory < cpudata->used_memory) {
+				dev_err(dev, "Couldn't add system reg config, no enough memory\n");
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			*(p + system_reg_end_index) = register_offset |
+					length_in_words;
+			system_reg_end_index++;
+			cpudata->sys_reg_size += sizeof(uint32_t);
+		}
+
+		cpudata->used_memory = used_memory;
+
+		*(p + system_reg_end_index) = 0x0;
+		*(p + OUTPUT_DUMP_INDEX) = (system_reg_end_index + 1)
+				* sizeof(uint32_t);
+	}
+
+	ret = size;
+
+err:
+	mutex_unlock(&cpudata->mutex);
+	return ret;
+}
+static DEVICE_ATTR_RW(register_config);
+
+/**
+ * This function resets the register dump table.
+ */
+static ssize_t register_reset_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	unsigned int val;
+
+	if (kstrtouint(buf, 16, &val))
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	update_reg_dump_table(dev, CORE_REG_NUM_DEFAULT);
+
+	return size;
+}
+static DEVICE_ATTR_WO(register_reset);
+
+static const struct device_attribute *register_dump_attrs[] = {
+	&dev_attr_core_reg_num,
+	&dev_attr_register_config,
+	&dev_attr_register_reset,
+	NULL,
+};
+
+static int register_dump_create_files(struct device *dev,
+			const struct device_attribute **attrs)
+{
+	int ret = 0;
+	int i, j;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "Couldn't create sysfs attribute: %s\n",
+				attrs[i]->attr.name);
+			for (j = 0; j < i; j++)
+				device_remove_file(dev, attrs[j]);
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cpuss_regdump_init(struct platform_device *pdev,
+		void *dump_vaddr, u32 size)
+{
+	struct cpuss_dump_data *cpudata = NULL;
+	int ret;
+
+	cpudata = devm_kzalloc(&pdev->dev,
+			sizeof(struct cpuss_dump_data), GFP_KERNEL);
+	if (!cpudata)
+		goto fail;
+
+	cpudata->dump_vaddr = dump_vaddr;
+	cpudata->size = size;
+
+	mutex_init(&cpudata->mutex);
+	ret = register_dump_create_files(&pdev->dev,
+			register_dump_attrs);
+	if (ret) {
+		devm_kfree(&pdev->dev, cpudata);
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, cpudata);
+
+	return;
+
+fail:
+	pr_err("Failed to initialize CPUSS regdump region\n");
+}
+
 uint32_t msm_dump_table_version(void)
 {
 	return MSM_DUMP_TABLE_VERSION;
@@ -167,7 +582,9 @@ int msm_dump_data_register_nominidump(enum msm_dump_table_ids id,
 }
 EXPORT_SYMBOL(msm_dump_data_register_nominidump);
 
-static int __init init_memory_dump(void)
+#define MSM_DUMP_TOTAL_SIZE_OFFSET	0x724
+static int init_memory_dump(void *dump_vaddr, phys_addr_t phys_addr,
+					size_t size)
 {
 	struct msm_dump_table *table;
 	struct msm_dump_entry entry;
@@ -188,47 +605,34 @@ static int __init init_memory_dump(void)
 		return -ENOMEM;
 	}
 
-	memdump.table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
-	if (!memdump.table) {
-		ret = -ENOMEM;
-		goto err0;
-	}
+	memdump.table = dump_vaddr;
 	memdump.table->version = MSM_DUMP_TABLE_VERSION;
-	memdump.table_phys = virt_to_phys(memdump.table);
-	memcpy_toio(imem_base, &memdump.table_phys, sizeof(memdump.table_phys));
+	memdump.table_phys = phys_addr;
+	memcpy_toio(imem_base, &memdump.table_phys,
+			sizeof(memdump.table_phys));
+	memcpy_toio(imem_base + MSM_DUMP_TOTAL_SIZE_OFFSET,
+			&size, sizeof(size_t));
+
 	/* Ensure write to imem_base is complete before unmapping */
 	mb();
 	pr_info("MSM Memory Dump base table set up\n");
 
 	iounmap(imem_base);
-
-	table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
-	if (!table) {
-		ret = -ENOMEM;
-		goto err1;
-	}
+	dump_vaddr +=  sizeof(*table);
+	phys_addr += sizeof(*table);
+	table = dump_vaddr;
 	table->version = MSM_DUMP_TABLE_VERSION;
-
 	entry.id = MSM_DUMP_TABLE_APPS;
-	entry.addr = virt_to_phys(table);
+	entry.addr = phys_addr;
 	ret = msm_dump_table_register(&entry);
 	if (ret) {
-		pr_info("mem dump apps data table register failed\n");
-		goto err2;
+		pr_err("mem dump apps data table register failed\n");
+		return ret;
 	}
 	pr_info("MSM Memory Dump apps data table set up\n");
 
 	return 0;
-err2:
-	kfree(table);
-err1:
-	kfree(memdump.table);
-	return ret;
-err0:
-	iounmap(imem_base);
-	return ret;
 }
-early_initcall(init_memory_dump);
 
 #ifdef CONFIG_MSM_DEBUG_LAR_UNLOCK
 static int __init init_debug_lar_unlock(void)
@@ -253,17 +657,28 @@ static int __init init_debug_lar_unlock(void)
 early_initcall(init_debug_lar_unlock);
 #endif
 
-static int mem_dump_probe(struct platform_device *pdev)
+#define MSM_DUMP_DATA_SIZE sizeof(struct msm_dump_data)
+static int mem_dump_alloc(struct platform_device *pdev)
 {
 	struct device_node *child_node;
 	const struct device_node *node = pdev->dev.of_node;
-	static dma_addr_t dump_addr;
-	static void *dump_vaddr;
 	struct msm_dump_data *dump_data;
 	struct msm_dump_entry dump_entry;
-	int ret;
+	struct md_region md_entry;
+	size_t total_size;
 	u32 size, id;
+	int ret, no_of_nodes;
+	dma_addr_t dma_handle;
+	phys_addr_t phys_addr;
+	struct sg_table mem_dump_sgt;
+	void *dump_vaddr;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+	u64 shm_bridge_handle;
 
+	total_size = size = ret = no_of_nodes = 0;
+	/* For dump table registration with IMEM */
+	total_size = sizeof(struct msm_dump_table) * 2;
 	for_each_available_child_of_node(node, child_node) {
 		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
 		if (ret) {
@@ -272,6 +687,53 @@ static int mem_dump_probe(struct platform_device *pdev)
 			continue;
 		}
 
+		total_size += size;
+		no_of_nodes++;
+	}
+
+	total_size += (MSM_DUMP_DATA_SIZE * no_of_nodes);
+	total_size = ALIGN(total_size, SZ_4K);
+	dump_vaddr = dma_alloc_coherent(&pdev->dev, total_size,
+						&dma_handle, GFP_KERNEL);
+	if (!dump_vaddr) {
+		dev_err(&pdev->dev, "Couldn't get memory for dump entries\n");
+		return -ENOMEM;
+	}
+
+	dma_get_sgtable(&pdev->dev, &mem_dump_sgt, dump_vaddr,
+						dma_handle, total_size);
+	phys_addr = page_to_phys(sg_page(mem_dump_sgt.sgl));
+	sg_free_table(&mem_dump_sgt);
+
+	ret = qtee_shmbridge_register(phys_addr, total_size, ns_vmids,
+		ns_vm_perms, 1, PERM_READ|PERM_WRITE, &shm_bridge_handle);
+
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create shm bridge.ret=%d\n",
+						ret);
+		dma_free_coherent(&pdev->dev, total_size,
+						dump_vaddr, dma_handle);
+		return ret;
+	}
+
+	memset(dump_vaddr, 0x0, total_size);
+
+	ret = init_memory_dump(dump_vaddr, phys_addr, total_size);
+	if (ret) {
+		dev_err(&pdev->dev, "Memory Dump table set up is failed\n");
+		qtee_shmbridge_deregister(shm_bridge_handle);
+		dma_free_coherent(&pdev->dev, total_size,
+						dump_vaddr, dma_handle);
+		return ret;
+	}
+
+	dump_vaddr += (sizeof(struct msm_dump_table) * 2);
+	phys_addr += (sizeof(struct msm_dump_table) * 2);
+	for_each_available_child_of_node(node, child_node) {
+		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
+		if (ret)
+			continue;
+
 		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
 		if (ret) {
 			dev_err(&pdev->dev, "Unable to find id for %s\n",
@@ -279,40 +741,45 @@ static int mem_dump_probe(struct platform_device *pdev)
 			continue;
 		}
 
-		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
-						&dump_addr, GFP_KERNEL);
-
-		if (!dump_vaddr) {
-			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
-			continue;
-		}
-
-		memset(dump_vaddr, 0x0, size);
-
-		dump_data = devm_kzalloc(&pdev->dev,
-				sizeof(struct msm_dump_data), GFP_KERNEL);
-		if (!dump_data) {
-			dma_free_coherent(&pdev->dev, size, dump_vaddr,
-					dump_addr);
-			continue;
-		}
-
-		dump_data->addr = dump_addr;
+		dump_data = dump_vaddr;
+		dump_data->addr = phys_addr + MSM_DUMP_DATA_SIZE;
 		dump_data->len = size;
 		dump_entry.id = id;
 		strlcpy(dump_data->name, child_node->name,
 					sizeof(dump_data->name));
-		dump_entry.addr = virt_to_phys(dump_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
-		if (ret) {
+		dump_entry.addr = phys_addr;
+		ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+					&dump_entry);
+		if (ret)
 			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
 				id);
-			dma_free_coherent(&pdev->dev, size, dump_vaddr,
-					dump_addr);
-			devm_kfree(&pdev->dev, dump_data);
-		}
+
+		md_entry.phys_addr = dump_data->addr;
+		md_entry.virt_addr = (uintptr_t)dump_vaddr + MSM_DUMP_DATA_SIZE;
+		md_entry.size = size;
+		md_entry.id = id;
+		strlcpy(md_entry.name, child_node->name, sizeof(md_entry.name));
+		if (msm_minidump_add_region(&md_entry))
+			dev_err(&pdev->dev, "Mini dump entry failed id = %d\n",
+				id);
+
+		if (id == CPUSS_REGDUMP)
+			cpuss_regdump_init(pdev,
+				(dump_vaddr + MSM_DUMP_DATA_SIZE), size);
+
+		dump_vaddr += (size + MSM_DUMP_DATA_SIZE);
+		phys_addr += (size  + MSM_DUMP_DATA_SIZE);
 	}
-	return 0;
+
+	return ret;
+}
+
+static int mem_dump_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = mem_dump_alloc(pdev);
+	return ret;
 }
 
 static const struct of_device_id mem_dump_match_table[] = {
diff --git a/drivers/soc/qcom/microdump_collector.c b/drivers/soc/qcom/microdump_collector.c
new file mode 100644
index 0000000..183f42c
--- /dev/null
+++ b/drivers/soc/qcom/microdump_collector.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/soc/qcom/smem.h>
+
+#define SMEM_SSR_REASON_MSS0	421
+#define SMEM_SSR_DATA_MSS0	611
+#define SMEM_MODEM	1
+
+/*
+ * This program collects the data from SMEM regions whenever the modem crashes
+ * and stores it in /dev/ramdump_microdump_modem so as to expose it to
+ * user space.
+ */
+
+struct microdump_data {
+	struct ramdump_device *microdump_dev;
+	void *microdump_modem_notify_handler;
+	struct notifier_block microdump_modem_ssr_nb;
+};
+
+static struct microdump_data *drv;
+
+static int microdump_modem_notifier_nb(struct notifier_block *nb,
+		unsigned long code, void *data)
+{
+	int ret = 0;
+	size_t size_reason = 0, size_data = 0;
+	char *crash_reason = NULL;
+	char *crash_data = NULL;
+	struct ramdump_segment segment[2];
+
+	if (SUBSYS_RAMDUMP_NOTIFICATION != code && SUBSYS_SOC_RESET != code)
+		return NOTIFY_OK;
+
+	memset(segment, 0, sizeof(segment));
+
+	crash_reason = qcom_smem_get(QCOM_SMEM_HOST_ANY
+				, SMEM_SSR_REASON_MSS0, &size_reason);
+
+	if (IS_ERR_OR_NULL(crash_reason)) {
+		pr_info("%s: smem %d not available\n",
+				__func__, SMEM_SSR_REASON_MSS0);
+		goto out;
+	}
+
+	segment[0].v_address = crash_reason;
+	segment[0].size = size_reason;
+
+	crash_data = qcom_smem_get(SMEM_MODEM
+				, SMEM_SSR_DATA_MSS0, &size_data);
+
+	if (IS_ERR_OR_NULL(crash_data)) {
+		pr_info("%s: smem %d not available\n",
+				__func__, SMEM_SSR_DATA_MSS0);
+		goto out;
+	}
+
+	segment[1].v_address = crash_data;
+	segment[1].size = size_data;
+
+	ret = do_ramdump(drv->microdump_dev, segment, 2);
+	if (ret)
+		pr_info("%s: do_ramdump() failed\n", __func__);
+
+out:
+	return NOTIFY_OK;
+}
+
+static int microdump_modem_ssr_register_notifier(struct microdump_data *drv)
+{
+	int ret = 0;
+
+	drv->microdump_modem_ssr_nb.notifier_call = microdump_modem_notifier_nb;
+
+	drv->microdump_modem_notify_handler =
+		subsys_notif_register_notifier("modem",
+			&drv->microdump_modem_ssr_nb);
+
+	if (IS_ERR(drv->microdump_modem_notify_handler)) {
+		pr_err("Modem register notifier failed: %ld\n",
+			PTR_ERR(drv->microdump_modem_notify_handler));
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void microdump_modem_ssr_unregister_notifier(struct microdump_data *drv)
+{
+	subsys_notif_unregister_notifier(drv->microdump_modem_notify_handler,
+					&drv->microdump_modem_ssr_nb);
+	drv->microdump_modem_notify_handler = NULL;
+}
+
+/*
+ * microdump_init() - Registers kernel module for microdump collector
+ *
+ * Creates device file /dev/ramdump_microdump_modem and registers handler for
+ * modem SSR events.
+ *
+ * Returns 0 on success and negative error code in case of errors
+ */
+static int __init microdump_init(void)
+{
+	int ret = -ENOMEM;
+
+	drv = kzalloc(sizeof(struct microdump_data), GFP_KERNEL);
+	if (!drv)
+		goto out;
+
+	drv->microdump_dev = create_ramdump_device("microdump_modem", NULL);
+	if (!drv->microdump_dev) {
+		pr_err("%s: Unable to create a microdump_modem ramdump device\n"
+			, __func__);
+		ret = -ENODEV;
+		goto out_kfree;
+	}
+
+	ret = microdump_modem_ssr_register_notifier(drv);
+	if (ret) {
+		destroy_ramdump_device(drv->microdump_dev);
+		goto out_kfree;
+	}
+	return ret;
+
+out_kfree:
+	pr_err("%s: Failed to register microdump collector\n", __func__);
+	kfree(drv);
+	drv = NULL;
+out:
+	return ret;
+}
+
+static void __exit microdump_exit(void)
+{
+	microdump_modem_ssr_unregister_notifier(drv);
+	destroy_ramdump_device(drv->microdump_dev);
+	kfree(drv);
+}
+
+module_init(microdump_init);
+module_exit(microdump_exit);
+
+MODULE_DESCRIPTION("Microdump Collector");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
index ee7b42f..520c9c4 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
@@ -651,6 +651,15 @@ static int get_bus_node_device_data(
 			of_node_put(qos_clk_node);
 		}
 
+		node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_a_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk))
+			dev_dbg(&pdev->dev,
+				 "%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								ACTIVE_CTX);
+
 		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
 							"node_clk");
 
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
index d45eb85..56643227 100644
--- a/drivers/soc/qcom/msm_minidump.c
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -86,31 +86,33 @@ static inline unsigned int set_section_name(const char *name)
 	return ret;
 }
 
-static inline bool md_check_name(const char *name)
-{
-	struct md_region *mde = minidump_table.entry;
-	int i, regno = minidump_table.num_regions;
-
-	for (i = 0; i < regno; i++, mde++)
-		if (!strcmp(mde->name, name))
-			return true;
-	return false;
-}
-
-/* Return next seq no, if name already exists in the table */
-static inline int md_get_seq_num(const char *name)
+static inline int md_region_num(const char *name, int *seqno)
 {
 	struct md_ss_region *mde = minidump_table.md_regions;
 	int i, regno = minidump_table.md_ss_toc->ss_region_count;
-	int seqno = 0;
+	int ret = -EINVAL;
 
-	for (i = 0; i < (regno - 1); i++, mde++) {
+	for (i = 0; i < regno; i++, mde++) {
 		if (!strcmp(mde->name, name)) {
-			if (mde->seq_num >= seqno)
-				seqno = mde->seq_num + 1;
+			ret = i;
+			if (mde->seq_num > *seqno)
+				*seqno = mde->seq_num;
 		}
 	}
-	return seqno;
+	return ret;
+}
+
+static inline int md_entry_num(const struct md_region *entry)
+{
+	struct md_region *mdr;
+	int i, regno = minidump_table.num_regions;
+
+	for (i = 0; i < regno; i++) {
+		mdr = &minidump_table.entry[i];
+		if (!strcmp(mdr->name, entry->name))
+			return i;
+	}
+	return -EINVAL;
 }
 
 /* Update Mini dump table in SMEM */
@@ -120,14 +122,15 @@ static void md_update_ss_toc(const struct md_region *entry)
 	struct elfhdr *hdr = minidump_elfheader.ehdr;
 	struct elf_shdr *shdr = elf_section(hdr, hdr->e_shnum++);
 	struct elf_phdr *phdr = elf_program(hdr, hdr->e_phnum++);
-	int reg_cnt = minidump_table.md_ss_toc->ss_region_count++;
+	int seq = 0, reg_cnt = minidump_table.md_ss_toc->ss_region_count;
 
 	mdr = &minidump_table.md_regions[reg_cnt];
 
 	strlcpy(mdr->name, entry->name, sizeof(mdr->name));
 	mdr->region_base_address = entry->phys_addr;
 	mdr->region_size = entry->size;
-	mdr->seq_num = md_get_seq_num(entry->name);
+	if (md_region_num(entry->name, &seq) >= 0)
+		mdr->seq_num = seq + 1;
 
 	/* Update elf header */
 	shdr->sh_type = SHT_PROGBITS;
@@ -144,9 +147,9 @@ static void md_update_ss_toc(const struct md_region *entry)
 	phdr->p_paddr = entry->phys_addr;
 	phdr->p_filesz = phdr->p_memsz =  mdr->region_size;
 	phdr->p_flags = PF_R | PF_W;
-
 	minidump_elfheader.elf_offset += shdr->sh_size;
 	mdr->md_valid = MD_REGION_VALID;
+	minidump_table.md_ss_toc->ss_region_count++;
 }
 
 bool msm_minidump_enabled(void)
@@ -167,23 +170,23 @@ int msm_minidump_add_region(const struct md_region *entry)
 {
 	u32 entries;
 	struct md_region *mdr;
-	int ret = 0;
 
 	if (!entry)
 		return -EINVAL;
 
-	if ((strlen(entry->name) > MAX_NAME_LENGTH) ||
-		md_check_name(entry->name) || !entry->virt_addr) {
+	if ((strlen(entry->name) > MAX_NAME_LENGTH) || !entry->virt_addr ||
+		(!IS_ALIGNED(entry->size, 4))) {
 		pr_err("Invalid entry details\n");
 		return -EINVAL;
 	}
 
-	if (!IS_ALIGNED(entry->size, 4)) {
-		pr_err("size should be 4 byte aligned\n");
+	spin_lock(&mdt_lock);
+	if (md_entry_num(entry) >= 0) {
+		pr_err("Entry name already exist.\n");
+		spin_unlock(&mdt_lock);
 		return -EINVAL;
 	}
 
-	spin_lock(&mdt_lock);
 	entries = minidump_table.num_regions;
 	if (entries >= MAX_NUM_ENTRIES) {
 		pr_err("Maximum entries reached.\n");
@@ -209,10 +212,141 @@ int msm_minidump_add_region(const struct md_region *entry)
 
 	spin_unlock(&mdt_lock);
 
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(msm_minidump_add_region);
 
+int msm_minidump_clear_headers(const struct md_region *entry)
+{
+	struct elfhdr *hdr = minidump_elfheader.ehdr;
+	struct elf_shdr *shdr = NULL, *tshdr = NULL;
+	struct elf_phdr *phdr = NULL, *tphdr = NULL;
+	int pidx, shidx, strln, i;
+	char *shname;
+	u64 esize;
+
+	esize = entry->size;
+	for (i = 0; i < hdr->e_phnum; i++) {
+		phdr = elf_program(hdr, i);
+		if ((phdr->p_paddr == entry->phys_addr) &&
+			(phdr->p_memsz == entry->size))
+			break;
+	}
+	if (i == hdr->e_phnum) {
+		pr_err("Cannot find entry in elf\n");
+		return -EINVAL;
+	}
+	pidx = i;
+
+	for (i = 0; i < hdr->e_shnum; i++) {
+		shdr = elf_section(hdr, i);
+		shname = elf_lookup_string(hdr, shdr->sh_name);
+		if (shname && !strcmp(shname, entry->name))
+			if ((shdr->sh_addr == entry->virt_addr) &&
+				(shdr->sh_size == entry->size))
+				break;
+
+	}
+	if (i == hdr->e_shnum) {
+		pr_err("Cannot find entry in elf\n");
+		return -EINVAL;
+	}
+	shidx = i;
+
+	if (shdr->sh_offset != phdr->p_offset) {
+		pr_err("Invalid entry details in elf, Minidump broken..\n");
+		return -EINVAL;
+	}
+
+	/* Clear name in string table */
+	strln = strlen(shname) + 1;
+	memmove(shname, shname + strln,
+		(minidump_elfheader.strtable_idx - shdr->sh_name));
+	minidump_elfheader.strtable_idx -= strln;
+
+	/* Clear program header */
+	tphdr = elf_program(hdr, pidx);
+	for (i = pidx; i < hdr->e_phnum - 1; i++) {
+		tphdr = elf_program(hdr, i + 1);
+		phdr = elf_program(hdr, i);
+		memcpy(phdr, tphdr, sizeof(struct elf_phdr));
+		phdr->p_offset = phdr->p_offset - esize;
+	}
+	memset(tphdr, 0, sizeof(struct elf_phdr));
+	hdr->e_phnum--;
+
+	/* Clear section header */
+	tshdr = elf_section(hdr, shidx);
+	for (i = shidx; i < hdr->e_shnum - 1; i++) {
+		tshdr = elf_section(hdr, i + 1);
+		shdr = elf_section(hdr, i);
+		memcpy(shdr, tshdr, sizeof(struct elf_shdr));
+		shdr->sh_offset -= esize;
+		shdr->sh_name -= strln;
+	}
+	memset(tshdr, 0, sizeof(struct elf_shdr));
+	hdr->e_shnum--;
+
+	minidump_elfheader.elf_offset -= esize;
+	return 0;
+}
+
+int msm_minidump_remove_region(const struct md_region *entry)
+{
+	int rcount, ecount, seq = 0, rgno, ret;
+
+	if (!entry || !minidump_table.md_ss_toc ||
+		(minidump_table.md_ss_toc->md_ss_enable_status !=
+						MD_SS_ENABLED))
+		return -EINVAL;
+
+	spin_lock(&mdt_lock);
+	ecount = minidump_table.num_regions;
+	rcount = minidump_table.md_ss_toc->ss_region_count;
+	rgno = md_entry_num(entry);
+	if (rgno < 0) {
+		pr_err("Not able to find the entry in table\n");
+		goto out;
+	}
+
+	minidump_table.md_ss_toc->md_ss_toc_init = 0;
+
+	/* Remove entry from: entry list, ss region list and elf header */
+	memmove(&minidump_table.entry[rgno], &minidump_table.entry[rgno + 1],
+		((ecount - rgno - 1) * sizeof(struct md_region)));
+	memset(&minidump_table.entry[ecount - 1], 0, sizeof(struct md_region));
+
+
+	rgno = md_region_num(entry->name, &seq);
+	if (rgno < 0) {
+		pr_err("Not able to find region in table\n");
+		goto out;
+	}
+
+	memmove(&minidump_table.md_regions[rgno],
+		&minidump_table.md_regions[rgno + 1],
+		((rcount - rgno - 1) * sizeof(struct md_ss_region)));
+	memset(&minidump_table.md_regions[rcount - 1], 0,
+					sizeof(struct md_ss_region));
+
+
+	ret = msm_minidump_clear_headers(entry);
+	if (ret)
+		goto out;
+
+	minidump_table.md_ss_toc->ss_region_count--;
+	minidump_table.md_ss_toc->md_ss_toc_init = 1;
+
+	minidump_table.num_regions--;
+	spin_unlock(&mdt_lock);
+	return 0;
+out:
+	spin_unlock(&mdt_lock);
+	pr_err("Minidump is broken..disable Minidump collection\n");
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_minidump_remove_region);
+
 static int msm_minidump_add_header(void)
 {
 	struct md_ss_region *mdreg = &minidump_table.md_regions[0];
@@ -226,8 +360,11 @@ static int msm_minidump_add_header(void)
 	 * elf header, MAX_NUM_ENTRIES+4 of section and program elf headers,
 	 * string table section and linux banner.
 	 */
-	elfh_size = sizeof(*ehdr) + MAX_STRTBL_SIZE + (strlen(linux_banner) +
-		1) + ((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 4));
+	elfh_size = sizeof(*ehdr) + MAX_STRTBL_SIZE +
+			(strlen(linux_banner) + 1) +
+			((sizeof(*shdr) + sizeof(*phdr))
+			 * (MAX_NUM_ENTRIES + 4));
+
 	elfh_size = ALIGN(elfh_size, 4);
 
 	minidump_elfheader.ehdr = kzalloc(elfh_size, GFP_KERNEL);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 9007fa3..6fa278f9 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -53,9 +53,20 @@
 #define MAX_LEN 96
 #define NUM_OF_ENCRYPTED_KEY	3
 
+#define pil_log(msg, desc)	\
+	do {			\
+		if (pil_ipc_log)		\
+			pil_ipc("[%s]: %s", desc->name, msg); \
+		else		\
+			trace_pil_event(msg, desc);	\
+	} while (0)
+
+
 static void __iomem *pil_info_base;
 static struct md_global_toc *g_md_toc;
 
+void *pil_ipc_log;
+
 /**
  * proxy_timeout - Override for proxy vote timeouts
  * -1: Use driver-specified timeout
@@ -379,7 +390,11 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
 					      &ss_valid_seg_cnt,
 					      desc->num_aux_minidump_ids);
 
-	ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
+	if (desc->minidump_as_elf32)
+		ret = do_minidump_elf32(ramdump_dev, ramdump_segs,
+					ss_valid_seg_cnt);
+	else
+		ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
 	if (ret)
 		pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n",
 			__func__, desc->name, ret);
@@ -1241,7 +1256,7 @@ int pil_boot(struct pil_desc *desc)
 		goto release_fw;
 	}
 
-	trace_pil_event("before_init_image", desc);
+	pil_log("before_init_image", desc);
 	if (desc->ops->init_image)
 		ret = desc->ops->init_image(desc, fw->data, fw->size);
 	if (ret) {
@@ -1249,7 +1264,7 @@ int pil_boot(struct pil_desc *desc)
 		goto err_boot;
 	}
 
-	trace_pil_event("before_mem_setup", desc);
+	pil_log("before_mem_setup", desc);
 	if (desc->ops->mem_setup)
 		ret = desc->ops->mem_setup(desc, priv->region_start,
 				priv->region_end - priv->region_start);
@@ -1265,7 +1280,7 @@ int pil_boot(struct pil_desc *desc)
 		 * Also for secure boot devices, modem memory has to be released
 		 * after MBA is booted
 		 */
-		trace_pil_event("before_assign_mem", desc);
+		pil_log("before_assign_mem", desc);
 		if (desc->modem_ssr) {
 			ret = pil_assign_mem_to_linux(desc, priv->region_start,
 				(priv->region_end - priv->region_start));
@@ -1284,7 +1299,7 @@ int pil_boot(struct pil_desc *desc)
 		hyp_assign = true;
 	}
 
-	trace_pil_event("before_load_seg", desc);
+	pil_log("before_load_seg", desc);
 
 	/**
 	 * Fallback to serial loading of blobs if the
@@ -1303,7 +1318,7 @@ int pil_boot(struct pil_desc *desc)
 	}
 
 	if (desc->subsys_vmid > 0) {
-		trace_pil_event("before_reclaim_mem", desc);
+		pil_log("before_reclaim_mem", desc);
 		ret =  pil_reclaim_mem(desc, priv->region_start,
 				(priv->region_end - priv->region_start),
 				desc->subsys_vmid);
@@ -1315,14 +1330,14 @@ int pil_boot(struct pil_desc *desc)
 		hyp_assign = false;
 	}
 
-	trace_pil_event("before_auth_reset", desc);
+	pil_log("before_auth_reset", desc);
 	notify_before_auth_and_reset(desc->dev);
 	ret = desc->ops->auth_and_reset(desc);
 	if (ret) {
 		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
 		goto err_auth_and_reset;
 	}
-	trace_pil_event("reset_done", desc);
+	pil_log("reset_done", desc);
 	pil_info(desc, "Brought out of reset\n");
 	desc->modem_ssr = false;
 err_auth_and_reset:
@@ -1557,6 +1572,9 @@ int pil_desc_init(struct pil_desc *desc)
 	if (!desc->unmap_fw_mem)
 		desc->unmap_fw_mem = unmap_fw_mem;
 
+	desc->minidump_as_elf32 = of_property_read_bool(
+					ofnode, "qcom,minidump-as-elf32");
+
 	return 0;
 err_parse_dt:
 	ida_simple_remove(&pil_ida, priv->id);
@@ -1644,6 +1662,9 @@ static int __init msm_pil_init(void)
 	if (!pil_wq)
 		pr_warn("pil: Defaulting to sequential firmware loading.\n");
 
+	pil_ipc_log = ipc_log_context_create(2, "PIL-IPC", 0);
+	if (!pil_ipc_log)
+		pr_warn("Failed to setup PIL ipc logging\n");
 out:
 	return register_pm_notifier(&pil_pm_notifier);
 }
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index c265657..c83b038 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __MSM_PERIPHERAL_LOADER_H
 #define __MSM_PERIPHERAL_LOADER_H
@@ -8,11 +8,20 @@
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
 #include "minidump_private.h"
+#include <linux/ipc_logging.h>
 
 struct device;
 struct module;
 struct pil_priv;
 
+extern void *pil_ipc_log;
+
+#define pil_ipc(__msg, ...) \
+do { \
+	if (pil_ipc_log) \
+		ipc_log_string(pil_ipc_log, \
+			"[%s]: "__msg, __func__,  ##__VA_ARGS__); \
+} while (0)
 /**
  * struct pil_desc - PIL descriptor
  * @name: string used for pil_get()
@@ -34,6 +43,7 @@ struct pil_priv;
  * @modem_ssr: true if modem is restarting, false if booting for first time.
  * @clear_fw_region: Clear fw region on failure in loading.
  * @subsys_vmid: memprot id for the subsystem.
+ * @extra_size: extra memory allocated at the end of the image.
  */
 struct pil_desc {
 	const char *name;
@@ -62,6 +72,8 @@ struct pil_desc {
 	int minidump_id;
 	int *aux_minidump_ids;
 	int num_aux_minidump_ids;
+	bool minidump_as_elf32;
+	u32 extra_size;
 };
 
 /**
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
index dffe543..9550acb 100644
--- a/drivers/soc/qcom/qbt_handler.c
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -966,7 +966,7 @@ static int setup_ipc_irq(struct platform_device *pdev,
 		drvdata->fw_ipc.irq,
 		NULL,
 		qbt_ipc_irq_handler,
-		IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+		IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
 		desc,
 		drvdata);
 
diff --git a/drivers/soc/qcom/qcom_ipcc.c b/drivers/soc/qcom/qcom_ipcc.c
index 83fda192..6633756 100644
--- a/drivers/soc/qcom/qcom_ipcc.c
+++ b/drivers/soc/qcom/qcom_ipcc.c
@@ -7,6 +7,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
 #include <linux/platform_device.h>
 #include <linux/mailbox_controller.h>
 #include <dt-bindings/soc/qcom,ipcc.h>
@@ -59,6 +60,8 @@ struct ipcc_mbox_chan {
 	struct ipcc_protocol_data *proto_data;
 };
 
+static struct ipcc_protocol_data *ipcc_proto_data;
+
 static inline u32 qcom_ipcc_get_packed_id(u16 client_id, u16 signal_id)
 {
 	return (client_id << IPCC_CLIENT_ID_SHIFT) | signal_id;
@@ -136,7 +139,8 @@ static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
 static struct irq_chip qcom_ipcc_irq_chip = {
 	.name = "ipcc",
 	.irq_mask = qcom_ipcc_mask_irq,
-	.irq_unmask = qcom_ipcc_unmask_irq
+	.irq_unmask = qcom_ipcc_unmask_irq,
+	.flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
@@ -304,6 +308,43 @@ static int qcom_ipcc_setup_mbox(struct ipcc_protocol_data *proto_data,
 	return mbox_controller_register(mbox);
 }
 
+#ifdef CONFIG_PM
+static int msm_ipcc_suspend(void)
+{
+	return 0;
+}
+
+static void msm_ipcc_resume(void)
+{
+	int virq;
+	struct irq_desc *desc;
+	const char *name = "null";
+	u32 packed_id;
+	struct ipcc_protocol_data *proto_data = ipcc_proto_data;
+
+	packed_id = readl_no_log(proto_data->base + IPCC_REG_RECV_ID);
+	if (packed_id == IPCC_NO_PENDING_IRQ)
+		return;
+
+	virq = irq_find_mapping(proto_data->irq_domain, packed_id);
+	desc = irq_to_desc(virq);
+	if (desc == NULL)
+		name = "stray irq";
+	else if (desc->action && desc->action->name)
+		name = desc->action->name;
+
+	pr_warn("%s: %d triggered %s\n", __func__, virq, name);
+}
+#else
+#define msm_ipcc_suspend NULL
+#define msm_ipcc_resume NULL
+#endif
+
+static struct syscore_ops msm_ipcc_pm_ops = {
+	.suspend = msm_ipcc_suspend,
+	.resume = msm_ipcc_resume,
+};
+
 static int qcom_ipcc_probe(struct platform_device *pdev)
 {
 	char *name;
@@ -316,6 +357,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
 	if (!proto_data)
 		return -ENOMEM;
 
+	ipcc_proto_data = proto_data;
 	proto_data->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -366,6 +408,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
 
 	enable_irq_wake(proto_data->irq);
 	platform_set_drvdata(pdev, proto_data);
+	register_syscore_ops(&msm_ipcc_pm_ops);
 
 	return 0;
 
@@ -380,6 +423,7 @@ static int qcom_ipcc_remove(struct platform_device *pdev)
 {
 	struct ipcc_protocol_data *proto_data = platform_get_drvdata(pdev);
 
+	unregister_syscore_ops(&msm_ipcc_pm_ops);
 	disable_irq_wake(proto_data->irq);
 	mbox_controller_unregister(&proto_data->mbox);
 	irq_domain_remove(proto_data->irq_domain);
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 65089ad..5218136 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -436,11 +436,9 @@ static void mhi_read_done_work_fn(struct work_struct *work)
 static void usb_write_done(struct qdss_bridge_drvdata *drvdata,
 				   struct qdss_request *d_req)
 {
-	if (d_req->status) {
+	if (d_req->status)
 		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
-		mhi_queue_read(drvdata);
-		return;
-	}
+
 	qdss_buf_tbl_remove(drvdata, d_req->buf);
 	mhi_queue_read(drvdata);
 }
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 46732bf..3366add 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -26,8 +26,11 @@
 #define FLAG_QMAP_MASK 0x0020
 
 #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
+
 #define DFC_SUPPORTED_MODE(m) \
-	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM || \
+	 (m) == DFC_MODE_SA)
+
 #define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
 
 int dfc_mode;
@@ -218,6 +221,131 @@ static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
 	}
 }
 
+static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
+				struct qos_info *qos_info, u8 bearer_id)
+{
+	struct rmnet_bearer_map *bearer;
+
+	bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
+	if (bearer) {
+		bearer->flow_ref++;
+	} else {
+		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
+		if (!bearer)
+			return NULL;
+
+		bearer->bearer_id = bearer_id;
+		bearer->flow_ref = 1;
+		bearer->grant_size = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->mq_idx = INVALID_MQ;
+		bearer->ack_mq_idx = INVALID_MQ;
+		list_add(&bearer->list, &qos_info->bearer_head);
+	}
+
+	return bearer;
+}
+
+static void __qmi_rmnet_bearer_put(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_bearer_map *bearer,
+				   bool reset)
+{
+	struct mq_map *mq;
+	int i, j;
+
+	if (bearer && --bearer->flow_ref == 0) {
+		for (i = 0; i < MAX_MQ_NUM; i++) {
+			mq = &qos_info->mq[i];
+			if (mq->bearer != bearer)
+				continue;
+
+			mq->bearer = NULL;
+			if (reset) {
+				qmi_rmnet_reset_txq(dev, i);
+				qmi_rmnet_flow_control(dev, i, 1);
+				trace_dfc_qmi_tc(dev->name,
+					bearer->bearer_id, 0, 0, i, 1);
+
+				if (dfc_mode == DFC_MODE_SA) {
+					j = i + ACK_MQ_OFFSET;
+					qmi_rmnet_reset_txq(dev, j);
+					qmi_rmnet_flow_control(dev, j, 1);
+					trace_dfc_qmi_tc(dev->name,
+						bearer->bearer_id, 0, 0, j, 1);
+				}
+			}
+		}
+
+		/* Remove from bearer map */
+		list_del(&bearer->list);
+		kfree(bearer);
+	}
+}
+
+static void __qmi_rmnet_update_mq(struct net_device *dev,
+				  struct qos_info *qos_info,
+				  struct rmnet_bearer_map *bearer,
+				  struct rmnet_flow_map *itm)
+{
+	struct mq_map *mq;
+
+	/* In SA mode default mq is not associated with any bearer */
+	if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
+		return;
+
+	mq = &qos_info->mq[itm->mq_idx];
+	if (!mq->bearer) {
+		mq->bearer = bearer;
+
+		if (dfc_mode == DFC_MODE_SA) {
+			bearer->mq_idx = itm->mq_idx;
+			bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
+		} else {
+			if (IS_ANCILLARY(itm->ip_type))
+				bearer->ack_mq_idx = itm->mq_idx;
+			else
+				bearer->mq_idx = itm->mq_idx;
+		}
+
+		qmi_rmnet_flow_control(dev, itm->mq_idx,
+				       bearer->grant_size > 0 ? 1 : 0);
+		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+				 bearer->grant_size, 0, itm->mq_idx,
+				 bearer->grant_size > 0 ? 1 : 0);
+
+		if (dfc_mode == DFC_MODE_SA) {
+			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
+					bearer->grant_size > 0 ? 1 : 0);
+			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+					bearer->grant_size, 0,
+					bearer->ack_mq_idx,
+					bearer->grant_size > 0 ? 1 : 0);
+		}
+	}
+}
+
+static int __qmi_rmnet_rebind_flow(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_flow_map *itm,
+				   struct rmnet_flow_map *new_map)
+{
+	struct rmnet_bearer_map *bearer;
+
+	__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
+
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
+	if (!bearer)
+		return -ENOMEM;
+
+	qmi_rmnet_update_flow_map(itm, new_map);
+	itm->bearer = bearer;
+
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
+
+	return 0;
+}
+
 static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 			      struct qmi_info *qmi)
 {
@@ -225,8 +353,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	struct rmnet_flow_map new_map, *itm;
 	struct rmnet_bearer_map *bearer;
 	struct tcmsg tmp_tcm;
-	struct mq_map *mq;
-	u32 mq_idx;
+	int rc = 0;
 
 	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
 		return -EINVAL;
@@ -251,14 +378,21 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
 				     new_map.ip_type);
 	if (itm) {
-		pr_debug("%s: stale flow found\n", __func__);
-		tmp_tcm.tcm__pad1 = itm->bearer_id;
-		tmp_tcm.tcm_parent = itm->flow_id;
-		tmp_tcm.tcm_ifindex = itm->ip_type;
-		tmp_tcm.tcm_handle = itm->mq_idx;
-		spin_unlock_bh(&qos_info->qos_lock);
-		qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
-		goto again;
+		if (itm->bearer_id != new_map.bearer_id) {
+			rc = __qmi_rmnet_rebind_flow(
+				dev, qos_info, itm, &new_map);
+			goto done;
+		} else if (itm->mq_idx != new_map.mq_idx) {
+			tmp_tcm.tcm__pad1 = itm->bearer_id;
+			tmp_tcm.tcm_parent = itm->flow_id;
+			tmp_tcm.tcm_ifindex = itm->ip_type;
+			tmp_tcm.tcm_handle = itm->mq_idx;
+			spin_unlock_bh(&qos_info->qos_lock);
+			qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
+			goto again;
+		} else {
+			goto done;
+		}
 	}
 
 	/* Create flow map */
@@ -272,45 +406,19 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
 	list_add(&itm->list, &qos_info->flow_head);
 
 	/* Create or update bearer map */
-	bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
-	if (bearer) {
-		bearer->flow_ref++;
-	} else {
-		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
-		if (!bearer) {
-			spin_unlock_bh(&qos_info->qos_lock);
-			return -ENOMEM;
-		}
-
-		bearer->bearer_id = new_map.bearer_id;
-		bearer->flow_ref = 1;
-		bearer->grant_size = qos_info->default_grant;
-		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
-		qos_info->default_grant = DEFAULT_GRANT;
-		list_add(&bearer->list, &qos_info->bearer_head);
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
+	if (!bearer) {
+		rc = -ENOMEM;
+		goto done;
 	}
+
 	itm->bearer = bearer;
 
-	/* Update mq map */
-	mq_idx = tcm->tcm_handle;
-	mq = &qos_info->mq[mq_idx];
-	if (!mq->bearer) {
-		mq->bearer = bearer;
-		mq->ancillary = IS_ANCILLARY(new_map.ip_type);
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
 
-		qmi_rmnet_flow_control(dev, mq_idx,
-				       bearer->grant_size > 0 ? 1 : 0);
-		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
-				 bearer->grant_size, 0, mq_idx,
-				 bearer->grant_size > 0 ? 1 : 0);
-
-	} else if (mq->bearer->bearer_id != new_map.bearer_id) {
-		pr_debug("%s: un-managered bearer %u\n",
-				__func__, new_map.bearer_id);
-	}
-
+done:
 	spin_unlock_bh(&qos_info->qos_lock);
-	return 0;
+	return rc;
 }
 
 static int
@@ -319,9 +427,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 {
 	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
 	struct rmnet_flow_map new_map, *itm;
-	struct rmnet_bearer_map *bearer;
-	struct mq_map *mq;
-	u32 mq_idx;
 
 	if (!qos_info)
 		return -EINVAL;
@@ -345,26 +450,7 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
 				    new_map.flow_id, new_map.ip_type,
 				    itm->mq_idx, 0);
 
-		bearer = itm->bearer;
-		if (bearer && --bearer->flow_ref == 0) {
-			/* Remove the bearer from mq map */
-			for (mq_idx = 0; mq_idx < MAX_MQ_NUM; mq_idx++) {
-				mq = &qos_info->mq[mq_idx];
-				if (mq->bearer != bearer)
-					continue;
-
-				mq->bearer = NULL;
-				mq->ancillary = false;
-				qmi_rmnet_reset_txq(dev, mq_idx);
-				qmi_rmnet_flow_control(dev, mq_idx, 1);
-				trace_dfc_qmi_tc(dev->name,
-					new_map.bearer_id, 0, 0, mq_idx, 1);
-			}
-
-			/* Remove from bearer map */
-			list_del(&bearer->list);
-			kfree(bearer);
-		}
+		__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
 
 		/* Remove from flow map */
 		list_del(&itm->list);
@@ -682,11 +768,69 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
 }
 EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
 
+static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
+{
+	unsigned int len = skb->len;
+
+	switch (skb->protocol) {
+	/* TCPv4 ACKs */
+	case htons(ETH_P_IP):
+		if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
+		    (ip_hdr(skb)->ihl == 5) &&
+		    (len == 40 || len == 52) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return true;
+		break;
+
+	/* TCPv6 ACKs */
+	case htons(ETH_P_IPV6):
+		if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
+		    (len == 60 || len == 72) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return true;
+		break;
+	}
+
+	return false;
+}
+
+static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
+{
+	struct rmnet_flow_map *itm;
+	int ip_type;
+	int txq = DEFAULT_MQ_NUM;
+
+	/* Put RS/NS in default mq */
+	if (skb->protocol == htons(ETH_P_IPV6) &&
+	    ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
+	    (icmp6_hdr(skb)->icmp6_type == 133 ||
+	     icmp6_hdr(skb)->icmp6_type == 135)) {
+		return DEFAULT_MQ_NUM;
+	}
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
+	if (unlikely(!itm))
+		goto done;
+
+	/* Put the packet in the assigned mq except TCP ack */
+	if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
+		txq = itm->bearer->ack_mq_idx;
+	else
+		txq = itm->mq_idx;
+
+done:
+	spin_unlock_bh(&qos->qos_lock);
+	return txq;
+}
+
 int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct qos_info *qos = rmnet_get_qos_pt(dev);
 	int txq = 0, ip_type = AF_INET;
-	unsigned int len = skb->len;
 	struct rmnet_flow_map *itm;
 	u32 mark = skb->mark;
 
@@ -697,32 +841,18 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
 	if (dfc_mode == DFC_MODE_MQ_NUM)
 		return mark;
 
-	switch (skb->protocol) {
-	/* TCPv4 ACKs */
-	case htons(ETH_P_IP):
-		ip_type = AF_INET;
-		if ((!mark) &&
-		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
-		    (len == 40 || len == 52) &&
-		    (ip_hdr(skb)->ihl == 5) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
-			return 1;
-		break;
-
-	/* TCPv6 ACKs */
-	case htons(ETH_P_IPV6):
-		ip_type = AF_INET6;
-		if ((!mark) &&
-		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
-		    (len == 60 || len == 72) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
-			return 1;
-		/* Fall through */
-	}
+	if (dfc_mode == DFC_MODE_SA)
+		return qmi_rmnet_get_queue_sa(qos, skb);
 
 	/* Default flows */
-	if (!mark)
-		return 0;
+	if (!mark) {
+		if (qmi_rmnet_is_tcp_ack(skb))
+			return 1;
+		else
+			return 0;
+	}
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
 
 	/* Dedicated flows */
 	spin_lock_bh(&qos->qos_lock);
@@ -755,7 +885,6 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
 
 	qos->mux_id = mux_id;
 	qos->real_dev = real_dev;
-	qos->default_grant = DEFAULT_GRANT;
 	qos->tran_num = 0;
 	INIT_LIST_HEAD(&qos->flow_head);
 	INIT_LIST_HEAD(&qos->bearer_head);
@@ -976,6 +1105,9 @@ void qmi_rmnet_work_init(void *port)
 	rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
 					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 
+	if (!rmnet_ps_wq)
+		return;
+
 	rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
 	if (!rmnet_work) {
 		destroy_workqueue(rmnet_ps_wq);
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 15dee7c..d94b5ea 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -9,14 +9,19 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 
-#define MAX_MQ_NUM 10
+#define MAX_MQ_NUM 16
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
 #define DEFAULT_GRANT 1
 #define DFC_MAX_BEARERS_V01 16
+#define DEFAULT_MQ_NUM 0
+#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
+#define INVALID_MQ 0xFF
 
 #define DFC_MODE_FLOW_ID 2
 #define DFC_MODE_MQ_NUM 3
+#define DFC_MODE_SA 4
+
 extern int dfc_mode;
 extern int dfc_qmap;
 
@@ -34,6 +39,8 @@ struct rmnet_bearer_map {
 	bool rat_switch;
 	bool tx_off;
 	u32 ack_txid;
+	u32 mq_idx;
+	u32 ack_mq_idx;
 };
 
 struct rmnet_flow_map {
@@ -53,7 +60,6 @@ struct svc_info {
 
 struct mq_map {
 	struct rmnet_bearer_map *bearer;
-	bool ancillary;
 };
 
 struct qos_info {
@@ -62,7 +68,6 @@ struct qos_info {
 	struct list_head flow_head;
 	struct list_head bearer_head;
 	struct mq_map mq[MAX_MQ_NUM];
-	u32 default_grant;
 	u32 tran_num;
 	spinlock_t qos_lock;
 };
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index a4798fe..c2f8993 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -406,7 +406,7 @@ void destroy_ramdump_device(void *dev)
 EXPORT_SYMBOL(destroy_ramdump_device);
 
 static int _do_ramdump(void *handle, struct ramdump_segment *segments,
-		int nsegments, bool use_elf)
+		int nsegments, bool use_elf, bool complete_ramdump)
 {
 	int ret, i;
 	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
@@ -434,7 +434,7 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
 		return -EPIPE;
 	}
 
-	if (rd_dev->complete_ramdump) {
+	if (complete_ramdump) {
 		for (i = 0; i < nsegments-1; i++)
 			segments[i].size =
 				segments[i + 1].address - segments[i].address;
@@ -635,7 +635,10 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
 
 int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
 {
-	return _do_ramdump(handle, segments, nsegments, false);
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+
+	return _do_ramdump(handle, segments, nsegments, false,
+				rd_dev->complete_ramdump);
 }
 EXPORT_SYMBOL(do_ramdump);
 
@@ -645,9 +648,19 @@ int do_minidump(void *handle, struct ramdump_segment *segments, int nsegments)
 }
 EXPORT_SYMBOL(do_minidump);
 
+int do_minidump_elf32(void *handle, struct ramdump_segment *segments,
+		      int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, true, false);
+}
+EXPORT_SYMBOL(do_minidump_elf32);
+
 int
 do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
 {
-	return _do_ramdump(handle, segments, nsegments, true);
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+
+	return _do_ramdump(handle, segments, nsegments, true,
+				rd_dev->complete_ramdump);
 }
 EXPORT_SYMBOL(do_elf_ramdump);
diff --git a/drivers/soc/qcom/rmnet_ctl/Kconfig b/drivers/soc/qcom/rmnet_ctl/Kconfig
index bfb91fbd..0085cd2 100644
--- a/drivers/soc/qcom/rmnet_ctl/Kconfig
+++ b/drivers/soc/qcom/rmnet_ctl/Kconfig
@@ -10,3 +10,9 @@
 	  Enable the RMNET CTL module which is used for communicating with
 	  device via map command protocol. This module will receive QMAP
 	  control commands via MHI.
+
+menuconfig RMNET_CTL_DEBUG
+	bool "RmNet Control debug"
+	depends on RMNET_CTL
+	help
+	  Enable RMNET CTL IPC debug loggings.
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
index 299b301..17a53ec 100644
--- a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
@@ -6,8 +6,14 @@
  */
 
 #include <soc/qcom/rmnet_ctl.h>
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
 #include "rmnet_ctl_client.h"
 
+#define RMNET_CTL_LOG_PAGE 10
+#define RMNET_CTL_LOG_NAME "rmnet_ctl"
+#define RMNET_CTL_LOG_LVL  "ipc_log_lvl"
+
 struct rmnet_ctl_client {
 	struct rmnet_ctl_client_hooks hooks;
 };
@@ -15,14 +21,38 @@ struct rmnet_ctl_client {
 struct rmnet_ctl_endpoint {
 	struct rmnet_ctl_dev __rcu *dev;
 	struct rmnet_ctl_client __rcu *client;
+	struct dentry *dbgfs_dir;
+	struct dentry *dbgfs_loglvl;
+	void *ipc_log;
 };
 
+#ifdef CONFIG_RMNET_CTL_DEBUG
+static u8 ipc_log_lvl = RMNET_CTL_LOG_DEBUG;
+#else
+static u8 ipc_log_lvl = RMNET_CTL_LOG_ERR;
+#endif
+
 static DEFINE_SPINLOCK(client_lock);
 static struct rmnet_ctl_endpoint ctl_ep;
 
 void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
 {
 	rcu_assign_pointer(ctl_ep.dev, dev);
+
+	if (dev) {
+		ctl_ep.dbgfs_dir = debugfs_create_dir(
+					RMNET_CTL_LOG_NAME, NULL);
+		if (!IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
+			ctl_ep.dbgfs_loglvl = debugfs_create_u8(
+				RMNET_CTL_LOG_LVL, 0644, ctl_ep.dbgfs_dir,
+				&ipc_log_lvl);
+
+		if (!ctl_ep.ipc_log)
+			ctl_ep.ipc_log = ipc_log_context_create(
+				RMNET_CTL_LOG_PAGE, RMNET_CTL_LOG_NAME, 0);
+	} else {
+		debugfs_remove_recursive(ctl_ep.dbgfs_dir);
+	}
 }
 
 void rmnet_ctl_endpoint_post(const void *data, size_t len)
@@ -33,6 +63,8 @@ void rmnet_ctl_endpoint_post(const void *data, size_t len)
 	if (unlikely(!data || !len))
 		return;
 
+	rmnet_ctl_log_info("RX", data, len);
+
 	rcu_read_lock();
 
 	client = rcu_dereference(ctl_ep.client);
@@ -109,6 +141,8 @@ int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 	if (client != rcu_dereference(ctl_ep.client))
 		return rc;
 
+	rmnet_ctl_log_info("TX", skb->data, skb->len);
+
 	rcu_read_lock();
 
 	dev = rcu_dereference(ctl_ep.dev);
@@ -117,6 +151,23 @@ int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 
 	rcu_read_unlock();
 
+	if (rc)
+		rmnet_ctl_log_err("TXE", rc, skb->data, skb->len);
+
 	return rc;
 }
 EXPORT_SYMBOL(rmnet_ctl_send_client);
+
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len)
+{
+	if (lvl <= ipc_log_lvl && ctl_ep.ipc_log) {
+		if (data == NULL || len == 0)
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): (null)\n",
+				       msg, rc);
+		else
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): %*ph\n",
+				       msg, rc, len > 32 ? 32 : len, data);
+	}
+}
+EXPORT_SYMBOL(rmnet_ctl_log);
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
index af84e13..17f2528 100644
--- a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
@@ -10,9 +10,10 @@
 #include <linux/of.h>
 #include <linux/skbuff.h>
 #include <linux/mhi.h>
+#include <soc/qcom/rmnet_ctl.h>
 #include "rmnet_ctl_client.h"
 
-#define RMNET_CTL_DEFAULT_MRU 1024
+#define RMNET_CTL_DEFAULT_MRU 256
 
 struct rmnet_ctl_mhi_dev {
 	struct mhi_device *mhi_dev;
@@ -51,6 +52,12 @@ static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
 	int no_tre, i, rc;
 
 	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+
+	if (!no_tre && free_buf) {
+		kfree(free_buf);
+		return;
+	}
+
 	for (i = 0; i < no_tre; i++) {
 		if (free_buf) {
 			buf = free_buf;
@@ -79,7 +86,12 @@ static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
 {
 	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
 
-	if (mhi_res->transaction_status || !mhi_res->buf_addr) {
+	if (mhi_res->transaction_status == -ENOTCONN) {
+		kfree(mhi_res->buf_addr);
+		return;
+	} else if (mhi_res->transaction_status ||
+		   !mhi_res->buf_addr || !mhi_res->bytes_xferd) {
+		rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
 		ctl_dev->dev.stats.rx_err++;
 	} else {
 		ctl_dev->dev.stats.rx_pkts++;
@@ -98,7 +110,14 @@ static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
 	struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
 
 	if (skb) {
-		ctl_dev->dev.stats.tx_complete++;
+		if (mhi_res->transaction_status) {
+			rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
+					  skb->data, skb->len);
+			ctl_dev->dev.stats.tx_err++;
+		} else {
+			rmnet_ctl_log_debug("TXC", skb->data, skb->len);
+			ctl_dev->dev.stats.tx_complete++;
+		}
 		kfree_skb(skb);
 	}
 }
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
new file mode 100644
index 0000000..baa6478
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "rpm-smd-debug: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define MAX_MSG_BUFFER 350
+#define MAX_KEY_VALUE_PAIRS 20
+
+static struct dentry *rpm_debugfs_dir;
+
+static u32 string_to_uint(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
+						size_t count, loff_t *position)
+{
+	char buf[MAX_MSG_BUFFER], rsc_type_str[6] = {}, rpm_set[8] = {},
+						key_str[6] = {};
+	int i, pos = -1, set = -1, nelems = -1;
+	char *cmp;
+	uint32_t rsc_type = 0, rsc_id = 0, key = 0, data = 0;
+	struct msm_rpm_request *req;
+
+	count = min(count, sizeof(buf) - 1);
+	if (copy_from_user(&buf, user_buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	cmp = strstrip(buf);
+
+	if (sscanf(cmp, "%7s %5s %u %d %n", rpm_set, rsc_type_str,
+				&rsc_id, &nelems, &pos) != 4) {
+		pr_err("Invalid number of arguments passed\n");
+		goto err;
+	}
+
+	if (strlen(rpm_set) > 6 || strlen(rsc_type_str) > 4) {
+		pr_err("Invalid value of set or resource type\n");
+		goto err;
+	}
+
+	if (!strcmp(rpm_set, "active"))
+		set = 0;
+	else if (!strcmp(rpm_set, "sleep"))
+		set = 1;
+
+	rsc_type = string_to_uint(rsc_type_str);
+
+	if (set < 0 || nelems < 0) {
+		pr_err("Invalid value of set or nelems\n");
+		goto err;
+	}
+	if (nelems > MAX_KEY_VALUE_PAIRS) {
+		pr_err("Exceeded max no of key-value entries\n");
+		goto err;
+	}
+
+	req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		cmp += pos;
+		if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err_request;
+		}
+
+		if (strlen(key_str) > 4) {
+			pr_err("Key value cannot be more than 4 charecters\n");
+			goto err_request;
+		}
+		key = string_to_uint(key_str);
+		if (!key) {
+			pr_err("Key values entered incorrectly\n");
+			goto err_request;
+		}
+
+		cmp += pos;
+		if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err_request;
+		}
+
+		if (msm_rpm_add_kvp_data(req, key,
+				(void *)&data, sizeof(data)))
+			goto err_request;
+	}
+
+	if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
+		pr_err("Sending the RPM message failed\n");
+
+err_request:
+	msm_rpm_free_request(req);
+err:
+	return count;
+}
+
+static const struct file_operations rsc_ops = {
+	.write = rsc_ops_write,
+};
+
+static int __init rpm_smd_debugfs_init(void)
+{
+	rpm_debugfs_dir = debugfs_create_dir("rpm_send_msg", NULL);
+	if (!rpm_debugfs_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("message", 0200, rpm_debugfs_dir, NULL,
+								&rsc_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+late_initcall(rpm_smd_debugfs_init);
+
+static void __exit rpm_smd_debugfs_exit(void)
+{
+	debugfs_remove_recursive(rpm_debugfs_dir);
+}
+module_exit(rpm_smd_debugfs_exit);
+
+MODULE_DESCRIPTION("RPM SMD Debug Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpm_master_stat.c b/drivers/soc/qcom/rpm_master_stat.c
new file mode 100644
index 0000000..6ae297a
--- /dev/null
+++ b/drivers/soc/qcom/rpm_master_stat.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+
+
+#define RPM_MASTERS_BUF_LEN 400
+
+#define SNPRINTF(buf, size, format, ...) \
+	do { \
+		if (size > 0) { \
+			int ret; \
+			ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+			if (ret > size) { \
+				buf += size; \
+				size = 0; \
+			} else { \
+				buf += ret; \
+				size -= ret; \
+			} \
+		} \
+	} while (0)
+
+#define GET_MASTER_NAME(a, prvdata) \
+	((a >= prvdata->num_masters) ? "Invalid Master Name" : \
+	 prvdata->master_names[a])
+
+#define GET_FIELD(a) ((strnstr(#a, ".", 80) + 1))
+
+struct msm_rpm_master_stats_platform_data {
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+	char **masters;
+	/*
+	 * RPM maintains PC stats for each master in MSG RAM,
+	 * it allocates 256 bytes for this use.
+	 * No of masters differs for different targets.
+	 * Based on the number of masters, linux rpm stat
+	 * driver reads (32 * num_masters) bytes to display
+	 * master stats.
+	 */
+	s32 num_masters;
+	u32 master_offset;
+	u32 version;
+};
+
+static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
+
+struct msm_rpm_master_stats {
+	uint32_t active_cores;
+	uint32_t numshutdowns;
+	uint64_t shutdown_req;
+	uint64_t wakeup_ind;
+	uint64_t bringup_req;
+	uint64_t bringup_ack;
+	uint32_t wakeup_reason; /* 0 = rude wakeup, 1 = scheduled wakeup */
+	uint32_t last_sleep_transition_duration;
+	uint32_t last_wake_transition_duration;
+	uint32_t xo_count;
+	uint64_t xo_last_entered_at;
+	uint64_t xo_last_exited_at;
+	uint64_t xo_accumulated_duration;
+};
+
+struct msm_rpm_master_stats_private_data {
+	void __iomem *reg_base;
+	u32 len;
+	char **master_names;
+	u32 num_masters;
+	char buf[RPM_MASTERS_BUF_LEN];
+	struct msm_rpm_master_stats_platform_data *platform_data;
+};
+
+static int msm_rpm_master_stats_file_close(struct inode *inode,
+		struct file *file)
+{
+	struct msm_rpm_master_stats_private_data *private = file->private_data;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	if (private->reg_base)
+		iounmap(private->reg_base);
+	kfree(file->private_data);
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+
+	return 0;
+}
+
+static int msm_rpm_master_copy_stats(
+		struct msm_rpm_master_stats_private_data *prvdata)
+{
+	struct msm_rpm_master_stats record;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	static int master_cnt;
+	int count, j = 0;
+	char *buf;
+	unsigned long active_cores;
+
+	/* Iterate possible number of masters */
+	if (master_cnt > prvdata->num_masters - 1) {
+		master_cnt = 0;
+		return 0;
+	}
+
+	pdata = prvdata->platform_data;
+	count = RPM_MASTERS_BUF_LEN;
+	buf = prvdata->buf;
+
+	if (prvdata->platform_data->version == 2) {
+		SNPRINTF(buf, count, "%s\n",
+				GET_MASTER_NAME(master_cnt, prvdata));
+
+		record.shutdown_req = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, shutdown_req)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.shutdown_req),
+			record.shutdown_req);
+
+		record.wakeup_ind = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, wakeup_ind)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.wakeup_ind),
+			record.wakeup_ind);
+
+		record.bringup_req = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, bringup_req)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.bringup_req),
+			record.bringup_req);
+
+		record.bringup_ack = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats, bringup_ack)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.bringup_ack),
+			record.bringup_ack);
+
+		record.xo_last_entered_at = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats,
+			xo_last_entered_at)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_last_entered_at),
+			record.xo_last_entered_at);
+
+		record.xo_last_exited_at = readq_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			offsetof(struct msm_rpm_master_stats,
+			xo_last_exited_at)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_last_exited_at),
+			record.xo_last_exited_at);
+
+		record.xo_accumulated_duration =
+				readq_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				xo_accumulated_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%llX\n",
+			GET_FIELD(record.xo_accumulated_duration),
+			record.xo_accumulated_duration);
+
+		record.last_sleep_transition_duration =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				last_sleep_transition_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.last_sleep_transition_duration),
+			record.last_sleep_transition_duration);
+
+		record.last_wake_transition_duration =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				last_wake_transition_duration)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.last_wake_transition_duration),
+			record.last_wake_transition_duration);
+
+		record.xo_count =
+				readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset +
+				offsetof(struct msm_rpm_master_stats,
+				xo_count)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.xo_count),
+			record.xo_count);
+
+		record.wakeup_reason = readl_relaxed(prvdata->reg_base +
+					(master_cnt * pdata->master_offset +
+					offsetof(struct msm_rpm_master_stats,
+					wakeup_reason)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.wakeup_reason),
+			record.wakeup_reason);
+
+		record.numshutdowns = readl_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset +
+			 offsetof(struct msm_rpm_master_stats, numshutdowns)));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.numshutdowns),
+			record.numshutdowns);
+
+		record.active_cores = readl_relaxed(prvdata->reg_base +
+			(master_cnt * pdata->master_offset) +
+			offsetof(struct msm_rpm_master_stats, active_cores));
+
+		SNPRINTF(buf, count, "\t%s:0x%x\n",
+			GET_FIELD(record.active_cores),
+			record.active_cores);
+	} else {
+		SNPRINTF(buf, count, "%s\n",
+				GET_MASTER_NAME(master_cnt, prvdata));
+
+		record.numshutdowns = readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset) + 0x0);
+
+		SNPRINTF(buf, count, "\t%s:0x%0x\n",
+			GET_FIELD(record.numshutdowns),
+			record.numshutdowns);
+
+		record.active_cores = readl_relaxed(prvdata->reg_base +
+				(master_cnt * pdata->master_offset) + 0x4);
+
+		SNPRINTF(buf, count, "\t%s:0x%0x\n",
+			GET_FIELD(record.active_cores),
+			record.active_cores);
+	}
+
+	active_cores = record.active_cores;
+	j = find_first_bit(&active_cores, BITS_PER_LONG);
+	while (j < (BITS_PER_LONG - 1)) {
+		SNPRINTF(buf, count, "\t\tcore%d\n", j);
+		j = find_next_bit((const unsigned long *)&active_cores,
+							BITS_PER_LONG, j + 1);
+	}
+
+	if (j == (BITS_PER_LONG - 1))
+		SNPRINTF(buf, count, "\t\tcore%d\n", j);
+
+	master_cnt++;
+	return RPM_MASTERS_BUF_LEN - count;
+}
+
+static ssize_t msm_rpm_master_stats_file_read(struct file *file,
+				char __user *bufu, size_t count, loff_t *ppos)
+{
+	struct msm_rpm_master_stats_private_data *prvdata;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	ssize_t ret;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	prvdata = file->private_data;
+	if (!prvdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdata = prvdata->platform_data;
+	if (!pdata) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu || count == 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (*ppos <= pdata->phys_size) {
+		prvdata->len = msm_rpm_master_copy_stats(prvdata);
+		*ppos = 0;
+	}
+
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			prvdata->buf, prvdata->len);
+exit:
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+	return ret;
+}
+
+static int msm_rpm_master_stats_file_open(struct inode *inode,
+		struct file *file)
+{
+	struct msm_rpm_master_stats_private_data *prvdata;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	int ret = 0;
+
+	mutex_lock(&msm_rpm_master_stats_mutex);
+	pdata = inode->i_private;
+
+	file->private_data =
+		kzalloc(sizeof(struct msm_rpm_master_stats_private_data),
+			GFP_KERNEL);
+
+	if (!file->private_data) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	prvdata = file->private_data;
+
+	prvdata->reg_base = ioremap(pdata->phys_addr_base,
+						pdata->phys_size);
+	if (!prvdata->reg_base) {
+		kfree(file->private_data);
+		prvdata = NULL;
+		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+			__func__, &pdata->phys_addr_base,
+			pdata->phys_size);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	prvdata->len = 0;
+	prvdata->num_masters = pdata->num_masters;
+	prvdata->master_names = pdata->masters;
+	prvdata->platform_data = pdata;
+exit:
+	mutex_unlock(&msm_rpm_master_stats_mutex);
+	return ret;
+}
+
+static const struct file_operations msm_rpm_master_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = msm_rpm_master_stats_file_open,
+	.read	  = msm_rpm_master_stats_file_read,
+	.release  = msm_rpm_master_stats_file_close,
+	.llseek   = no_llseek,
+};
+
+static struct msm_rpm_master_stats_platform_data
+			*msm_rpm_master_populate_pdata(struct device *dev)
+{
+	struct msm_rpm_master_stats_platform_data *pdata;
+	struct device_node *node = dev->of_node;
+	int rc = 0, i;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		goto err;
+
+	rc = of_property_read_u32(node, "qcom,master-stats-version",
+							&pdata->version);
+	if (rc) {
+		dev_err(dev, "master-stats-version missing rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = of_property_read_u32(node, "qcom,master-offset",
+							&pdata->master_offset);
+	if (rc) {
+		dev_err(dev, "master-offset missing rc=%d\n", rc);
+		goto err;
+	}
+
+	pdata->num_masters = of_property_count_strings(node, "qcom,masters");
+	if (pdata->num_masters < 0) {
+		dev_err(dev, "Failed to get number of masters =%d\n",
+						pdata->num_masters);
+		goto err;
+	}
+
+	pdata->masters = devm_kzalloc(dev, sizeof(char *) * pdata->num_masters,
+								GFP_KERNEL);
+	if (!pdata->masters)
+		goto err;
+
+	/*
+	 * Read master names from DT
+	 */
+	for (i = 0; i < pdata->num_masters; i++) {
+		const char *master_name;
+
+		of_property_read_string_index(node, "qcom,masters",
+							i, &master_name);
+		pdata->masters[i] = devm_kzalloc(dev, sizeof(char) *
+				strlen(master_name) + 1, GFP_KERNEL);
+		if (!pdata->masters[i])
+			goto err;
+		strlcpy(pdata->masters[i], master_name,
+					strlen(master_name) + 1);
+	}
+	return pdata;
+err:
+	return NULL;
+}
+
+static  int msm_rpm_master_stats_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpm_master_stats_platform_data *pdata;
+	struct resource *res = NULL;
+
+	if (!pdev)
+		return -EINVAL;
+
+	if (pdev->dev.of_node)
+		pdata = msm_rpm_master_populate_pdata(&pdev->dev);
+	else
+		pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: Unable to get pdata\n", __func__);
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res) {
+		dev_err(&pdev->dev,
+			"%s: Failed to get IO resource from platform device\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	pdata->phys_addr_base = res->start;
+	pdata->phys_size = resource_size(res);
+
+	dent = debugfs_create_file("rpm_master_stats", 0444, NULL,
+					pdata, &msm_rpm_master_stats_fops);
+
+	if (!dent) {
+		dev_err(&pdev->dev, "%s: ERROR debugfs_create_file failed\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dent);
+	return 0;
+}
+
+static int msm_rpm_master_stats_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id rpm_master_table[] = {
+	{.compatible = "qcom,rpm-master-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpm_master_stats_driver = {
+	.probe	= msm_rpm_master_stats_probe,
+	.remove = msm_rpm_master_stats_remove,
+	.driver = {
+		.name = "msm_rpm_master_stats",
+		.of_match_table = rpm_master_table,
+	},
+};
+
+static int __init msm_rpm_master_stats_init(void)
+{
+	return platform_driver_register(&msm_rpm_master_stats_driver);
+}
+
+static void __exit msm_rpm_master_stats_exit(void)
+{
+	platform_driver_unregister(&msm_rpm_master_stats_driver);
+}
+
+module_init(msm_rpm_master_stats_init);
+module_exit(msm_rpm_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Master Statistics driver");
+MODULE_ALIAS("platform:msm_master_stat_log");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 4443e277..be34be0 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/atomic.h>
@@ -559,6 +559,10 @@ int rpmh_flush(const struct device *dev)
 		return 0;
 	}
 
+	do {
+		ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+	} while (ret == -EAGAIN);
+
 	/* First flush the cached batch requests */
 	ret = flush_batch(ctrlr);
 	if (ret)
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 1ab2680..2662285 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -14,6 +14,9 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace_secure_buffer.h"
+
 DEFINE_MUTEX(secure_buffer_mutex);
 
 struct cp2_mem_chunks {
@@ -28,24 +31,12 @@ struct cp2_lock_req {
 	u32 lock;
 } __attribute__ ((__packed__));
 
-struct mem_prot_info {
-	phys_addr_t addr;
-	u64 size;
-};
-
 #define MEM_PROT_ASSIGN_ID		0x16
 #define MEM_PROTECT_LOCK_ID2		0x0A
 #define MEM_PROTECT_LOCK_ID2_FLAT	0x11
 #define V2_CHUNK_SIZE           SZ_1M
 #define FEATURE_ID_CP 12
 
-struct dest_vm_and_perm_info {
-	u32 vm;
-	u32 perm;
-	u64 ctx;
-	u32 ctx_size;
-};
-
 #define BATCH_MAX_SIZE SZ_2M
 #define BATCH_MAX_SECTIONS 32
 
@@ -228,9 +219,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 	unsigned int entries_size;
 	unsigned int batch_start = 0;
 	unsigned int batches_processed;
+	unsigned int i = 0;
+	u64 total_delta;
 	struct scatterlist *curr_sgl = table->sgl;
 	struct scatterlist *next_sgl;
 	int ret = 0;
+	ktime_t batch_assign_start_ts;
+	ktime_t first_assign_ts;
 	struct mem_prot_info *sg_table_copy = kcalloc(BATCH_MAX_SECTIONS,
 						      sizeof(*sg_table_copy),
 						      GFP_KERNEL);
@@ -238,6 +233,7 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 	if (!sg_table_copy)
 		return -ENOMEM;
 
+	first_assign_ts = ktime_get();
 	while (batch_start < table->nents) {
 		batches_processed = get_batches_from_sgl(sg_table_copy,
 							 curr_sgl, &next_sgl);
@@ -248,8 +244,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 		desc->args[0] = virt_to_phys(sg_table_copy);
 		desc->args[1] = entries_size;
 
+		trace_hyp_assign_batch_start(sg_table_copy, batches_processed);
+		batch_assign_start_ts = ktime_get();
 		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
 				MEM_PROT_ASSIGN_ID), desc);
+		trace_hyp_assign_batch_end(ret, ktime_us_delta(ktime_get(),
+					   batch_assign_start_ts));
+		i++;
 		if (ret) {
 			pr_info("%s: Failed to assign memory protection, ret = %d\n",
 				__func__, ret);
@@ -263,7 +264,8 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
 
 		batch_start += batches_processed;
 	}
-
+	total_delta = ktime_us_delta(ktime_get(), first_assign_ts);
+	trace_hyp_assign_end(total_delta, div64_u64(total_delta, i));
 	kfree(sg_table_copy);
 	return ret;
 }
@@ -288,7 +290,7 @@ static int __hyp_assign_table(struct sg_table *table,
 	size_t dest_vm_copy_size;
 
 	if (!table || !table->sgl || !source_vm_list || !source_nelems ||
-	    !dest_vmids || !dest_perms || !dest_nelems)
+	    !dest_vmids || !dest_perms || !dest_nelems || !table->nents)
 		return -EINVAL;
 
 	/*
@@ -333,6 +335,8 @@ static int __hyp_assign_table(struct sg_table *table,
 	dmac_flush_range(dest_vm_copy,
 			 (void *)dest_vm_copy + dest_vm_copy_size);
 
+	trace_hyp_assign_info(source_vm_list, source_nelems, dest_vmids,
+			      dest_perms, dest_nelems);
 	ret = batched_hyp_assign(table, &desc);
 
 	mutex_unlock(&secure_buffer_mutex);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index f735395..b3cb9bc 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -103,6 +103,7 @@
 #define MEM_RGN_SRVR_ID 1
 #define MEM_MAP_SRVR_ID 2
 #define CBOBJ_SERVER_ID_START 0x10
+#define CBOBJ_SERVER_ID_END ((1<<16) - 1)
 /* local obj id is represented by 15 bits */
 #define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
 /* CBOBJs will be served by server id 0x10 onwards */
@@ -258,6 +259,9 @@ static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
 
 static uint16_t next_cb_server_id_locked(void)
 {
+	if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
+		g_last_cb_server_id = CBOBJ_SERVER_ID_START;
+
 	while (find_cb_server_locked(++g_last_cb_server_id))
 		;
 
@@ -394,10 +398,10 @@ static void free_pending_cbobj_locked(struct kref *kref)
 
 static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
 {
-	struct smcinvoke_server_info *server = find_cb_server_locked(srvr_id);
 	struct list_head *head = NULL;
 	struct smcinvoke_cbobj *cbobj = NULL;
 	struct smcinvoke_cbobj *obj = NULL;
+	struct smcinvoke_server_info *server = find_cb_server_locked(srvr_id);
 
 	if (!server)
 		return OBJECT_ERROR_BADOBJ;
@@ -465,7 +469,11 @@ static void delete_cb_txn(struct kref *kref)
 	struct smcinvoke_cb_txn *cb_txn = container_of(kref,
 					struct smcinvoke_cb_txn, ref_cnt);
 
+	if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
+		release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
+
 	kfree(cb_txn->cb_req);
+	hash_del(&cb_txn->hash);
 	kfree(cb_txn);
 }
 
@@ -842,19 +850,13 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	/* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
 	int ret = OBJECT_ERROR_DEFUNCT;
 	struct smcinvoke_cb_txn *cb_txn = NULL;
-	struct smcinvoke_tzcb_req *cb_req = NULL;
+	struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
 	struct smcinvoke_server_info *srvr_info = NULL;
 
 	if (buf_len < sizeof(struct smcinvoke_tzcb_req))
 		return;
 
-	cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
-	if (!cb_req) {
-		/* we need to return error to caller so fill up result */
-		cb_req = buf;
-		cb_req->result = OBJECT_ERROR_KMEM;
-		return;
-	}
+	cb_req = buf;
 
 	/* check whether it is to be served by kernel or userspace */
 	if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
@@ -866,11 +868,26 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 		return;
 	}
 
+	/*
+	 * We need a copy of req that could be sent to server. Otherwise, if
+	 * someone kills invoke caller, buf would go away and server would be
+	 * working on already freed buffer, causing a device crash.
+	 */
+	tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
+	if (!tmp_cb_req) {
+		/* we need to return error to caller so fill up result */
+		cb_req->result = OBJECT_ERROR_KMEM;
+		return;
+	}
+
 	cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
 	if (!cb_txn) {
-		ret = OBJECT_ERROR_KMEM;
-		goto out;
+		cb_req->result = OBJECT_ERROR_KMEM;
+		kfree(tmp_cb_req);
+		return;
 	}
+	/* no need for memcpy as we did kmemdup() above */
+	cb_req  = tmp_cb_req;
 
 	cb_txn->state = SMCINVOKE_REQ_PLACED;
 	cb_txn->cb_req = cb_req;
@@ -882,6 +899,7 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	srvr_info = find_cb_server_locked(
 				TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle));
 	if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+		/* ret equals Object_ERROR_DEFUNCT, at this point go to out */
 		mutex_unlock(&g_smcinvoke_lock);
 		goto out;
 	}
@@ -889,33 +907,36 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 	cb_txn->txn_id = ++srvr_info->txn_id;
 	hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
 	mutex_unlock(&g_smcinvoke_lock);
+	/*
+	 * we need not worry that server_info will be deleted because as long
+	 * as this CBObj is served by this server, srvr_info will be valid.
+	 */
 	wake_up_interruptible(&srvr_info->req_wait_q);
 	ret = wait_event_interruptible(srvr_info->rsp_wait_q,
 			(cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
 			(srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT));
-	if (ret)
-		pr_err("%s wait_event interrupted: ret = %d\n", __func__, ret);
 out:
 	/*
-	 * If we are here, either req is processed or not
-	 * if processed, result would have been set by txn processor
-	 * if not processed, we should set result with ret which should have
-	 * correct value that TZ/TA can understand
+	 * we could be here because of either: a. Req is PROCESSED
+	 * b. Server was killed                c. Invoke thread is killed
+	 * sometime invoke thread and server are part of same process.
 	 */
 	mutex_lock(&g_smcinvoke_lock);
-	if (!cb_txn || (cb_txn->state != SMCINVOKE_REQ_PROCESSED)) {
-		cb_req->result = ret;
-		if (srvr_info &&
-		    srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT &&
-		    OBJECT_OP_METHODID(cb_req->hdr.op) == OBJECT_OP_RELEASE) {
-			release_tzhandle_locked(cb_req->hdr.tzhandle);
-		}
+	hash_del(&cb_txn->hash);
+	if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
+		/*
+		 * it is possible that server was killed immediately
+		 * after CB Req was processed but who cares now!
+		 */
+	} else if (!srvr_info ||
+		srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+		cb_req->result = OBJECT_ERROR_DEFUNCT;
+	} else {
+		pr_debug("%s wait_event interrupted ret = %d\n", __func__, ret);
+		cb_req->result = OBJECT_ERROR_ABORT;
 	}
-	if (cb_txn) {
-		hash_del(&cb_txn->hash);
-		memcpy(buf, cb_req, buf_len);
-		kref_put(&cb_txn->ref_cnt, delete_cb_txn);
-	}
+	memcpy(buf, cb_req, buf_len);
+	kref_put(&cb_txn->ref_cnt, delete_cb_txn);
 	mutex_unlock(&g_smcinvoke_lock);
 }
 
@@ -1443,24 +1464,26 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
 		cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
 					SMCINVOKE_REQ_PROCESSING);
 		mutex_unlock(&g_smcinvoke_lock);
-		/* cb_txn can be null if userspace provides wrong txn id. */
+		/*
+		 * cb_txn can be null if userspace provides wrong txn id OR
+		 * invoke thread died while server was processing cb req.
+		 * if invoke thread dies, it would remove req from Q. So
+		 * no matching cb_txn would be on Q and hence NULL cb_txn.
+		 */
 		if (!cb_txn) {
-			pr_err("%s: Invalid txn received  = %d\n",
+			pr_err("%s txn %d either invalid or removed from Q\n",
 					__func__, user_args.txn_id);
 			goto out;
 		}
 		ret = marshal_out_tzcb_req(&user_args, cb_txn,
 						cb_txn->filp_to_release);
 		/*
-		 * if client did not set error and we get error locally
+		 * if client did not set error and we get error locally,
 		 * we return local error to TA
 		 */
 		if (ret && cb_txn->cb_req->result == 0)
 			cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
 
-		if (OBJECT_OP_METHODID(user_args.op) == OBJECT_OP_RELEASE)
-			release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
-
 		cb_txn->state = SMCINVOKE_REQ_PROCESSED;
 		kref_put(&cb_txn->ref_cnt, delete_cb_txn);
 		wake_up(&server_info->rsp_wait_q);
@@ -1479,7 +1502,7 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
 		ret = wait_event_interruptible(server_info->req_wait_q,
 				!hash_empty(server_info->reqs_table));
 		if (ret) {
-			pr_err("%s wait_event interrupted: ret = %d\n",
+			pr_debug("%s wait_event interrupted: ret = %d\n",
 							__func__, ret);
 			goto out;
 		}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4b378ea..1ba689d 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -53,6 +53,7 @@ enum {
 	HW_PLATFORM_RCM	= 21,
 	HW_PLATFORM_STP = 23,
 	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_HDK = 31,
 	HW_PLATFORM_INVALID
 };
 
@@ -73,6 +74,7 @@ const char *hw_platform[] = {
 	[HW_PLATFORM_DTV] = "DTV",
 	[HW_PLATFORM_STP] = "STP",
 	[HW_PLATFORM_SBC] = "SBC",
+	[HW_PLATFORM_HDK] = "HDK",
 };
 
 enum {
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index c2b48dd..62e58f9 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -176,7 +176,7 @@ struct spcom_channel {
 	uint8_t num_clients;           /* current number of clients     */
 	struct mutex shared_sync_lock;
 
-	u32 pid; /* debug only to find user space application */
+	u32 pid[SPCOM_MAX_CHANNEL_CLIENTS];
 
 	/* abort flags */
 	bool rpmsg_abort;
@@ -185,6 +185,12 @@ struct spcom_channel {
 	size_t actual_rx_size;	/* actual data size received */
 	void *rpmsg_rx_buf;
 
+	/**
+	 * to track if rx_buf is read in the same session
+	 * in which it is updated
+	 */
+	uint32_t rx_buf_txn_id;
+
 	/* shared buffer lock/unlock support */
 	int dmabuf_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
 	struct dma_buf *dmabuf_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
@@ -253,6 +259,9 @@ static struct spcom_channel *spcom_find_channel_by_name(const char *name);
 static int spcom_register_rpmsg_drv(struct spcom_channel *ch);
 static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch);
 
+/* PIL's original SSR function*/
+int (*desc_powerup)(const struct subsys_desc *) = NULL;
+
 /**
  * spcom_is_channel_open() - channel is open on this side.
  *
@@ -341,7 +350,8 @@ static int spcom_init_channel(struct spcom_channel *ch,
 	ch->actual_rx_size = 0;
 	ch->is_busy = false;
 	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
-	ch->pid = 0;
+	ch->rx_buf_txn_id = ch->txn_id;
+	memset(ch->pid, 0, sizeof(ch->pid));
 	ch->rpmsg_abort = false;
 	ch->rpmsg_rx_buf = NULL;
 	ch->comm_role_undefined = true;
@@ -394,6 +404,16 @@ static int spcom_rx(struct spcom_channel *ch,
 
 	mutex_lock(&ch->lock);
 
+	if (ch->rx_buf_txn_id != ch->txn_id) {
+		pr_debug("rpmsg_rx_buf is updated in a different session\n");
+		if (ch->rpmsg_rx_buf) {
+			memset(ch->rpmsg_rx_buf, 0, ch->actual_rx_size);
+			kfree((void *)ch->rpmsg_rx_buf);
+			ch->rpmsg_rx_buf = NULL;
+			ch->actual_rx_size = 0;
+		}
+	}
+
 	/* check for already pending data */
 	if (!ch->actual_rx_size) {
 		reinit_completion(&ch->rx_done);
@@ -588,6 +608,24 @@ static int spcom_local_powerup(const struct subsys_desc *subsys)
 }
 
 /**
+ * spcom_local_powerup_after_fota() - SSR is not allowed after FOTA -
+ * might cause cryptographic erase. Reset the device
+ *
+ * @subsys: subsystem descriptor.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_local_powerup_after_fota(const struct subsys_desc *subsys)
+{
+	(void)subsys;
+
+	pr_err("SSR after firmware update before calling IAR update - panic\n");
+	panic("SSR after SPU firmware update\n");
+
+	return 0;
+}
+
+/**
  * spcom_handle_restart_sp_command() - Handle Restart SP command from
  * user space.
  *
@@ -601,7 +639,6 @@ static int spcom_handle_restart_sp_command(void *cmd_buf, int cmd_size)
 	void *subsystem_get_retval = NULL;
 	struct spcom_user_restart_sp_command *cmd = cmd_buf;
 	struct subsys_desc *desc_p = NULL;
-	int (*desc_powerup)(const struct subsys_desc *) = NULL;
 
 	if (!cmd) {
 		pr_err("NULL cmd_buf\n");
@@ -655,8 +692,14 @@ static int spcom_handle_restart_sp_command(void *cmd_buf, int cmd_size)
 	}
 
 	if (cmd->arg) {
-		/* Reset the PIL subsystem power up function */
-		desc_p->powerup = desc_powerup;
+
+		/* SPU got firmware update. Don't allow SSR*/
+		if (cmd->is_updated) {
+			desc_p->powerup = spcom_local_powerup_after_fota;
+		} else {
+			/* Reset the PIL subsystem power up function */
+			desc_p->powerup = desc_powerup;
+		}
 	}
 	pr_debug("restart - PIL FW loading process is complete\n");
 	return 0;
@@ -1130,6 +1173,41 @@ static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
 }
 
 /**
+ * spcom_handle_enable_ssr_command() - Handle user space request to enable ssr
+ *
+ * After FOTA SSR is disabled until IAR update occurs.
+ * Then - enable SSR again
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_enable_ssr_command(void)
+{
+	struct subsys_desc *desc_p = NULL;
+	void *subsystem_get_retval = find_subsys_device("spss");
+
+	if (!subsystem_get_retval) {
+		pr_err("restart - no device\n");
+		return -ENODEV;
+	}
+
+	desc_p = *(struct subsys_desc **)subsystem_get_retval;
+	if (!desc_p) {
+		pr_err("restart - no device\n");
+		return -ENODEV;
+	}
+
+	if (!desc_powerup) {
+		pr_err("no original SSR function\n");
+		return -ENODEV;
+	}
+
+	desc_p->powerup = desc_powerup;
+	pr_info("SSR is enabled after FOTA\n");
+
+	return 0;
+}
+
+/**
  * spcom_handle_write() - Handle user space write commands.
  *
  * @buf:	command buffer.
@@ -1157,7 +1235,8 @@ static int spcom_handle_write(struct spcom_channel *ch,
 	pr_debug("cmd_id [0x%x]\n", cmd_id);
 
 	if (!ch && cmd_id != SPCOM_CMD_CREATE_CHANNEL
-			&& cmd_id != SPCOM_CMD_RESTART_SP) {
+			&& cmd_id != SPCOM_CMD_RESTART_SP
+			&& cmd_id != SPCOM_CMD_ENABLE_SSR) {
 		pr_err("channel context is null\n");
 		return -EINVAL;
 	}
@@ -1193,6 +1272,9 @@ static int spcom_handle_write(struct spcom_channel *ch,
 	case SPCOM_CMD_RESTART_SP:
 		ret = spcom_handle_restart_sp_command(buf, buf_size);
 		break;
+	case SPCOM_CMD_ENABLE_SSR:
+		ret = spcom_handle_enable_ssr_command();
+		break;
 	default:
 		pr_err("Invalid Command Id [0x%x]\n", (int) cmd->cmd_id);
 		ret = -EINVAL;
@@ -1388,6 +1470,7 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
 	int ret;
 	const char *name = file_to_filename(filp);
 	u32 pid = current_pid();
+	int i = 0;
 
 	pr_debug("open file [%s]\n", name);
 
@@ -1425,21 +1508,22 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
 	}
 	/* max number of channel clients reached */
 	if (ch->is_busy) {
-		pr_err("channel [%s] is BUSY and has %d of clients, already in use by pid [%d]\n",
-			name, ch->num_clients, ch->pid);
+		pr_err("channel [%s] is BUSY and has %d of clients, already in use\n",
+			name, ch->num_clients);
 		mutex_unlock(&ch->lock);
 		return -EBUSY;
 	}
 
 	/*
-	 * if same active client trying to register again, this will fail.
-	 * Note: in the case of shared channel and SPCOM_MAX_CHANNEL_CLIENTS > 2
-	 * It possible to register with same pid if you are not the current
-	 * active client
+	 * if same client trying to register again, this will fail
 	 */
-	if (ch->pid == pid) {
-		pr_err("client is already registered with channel[%s]\n", name);
-		return -EINVAL;
+	for (i = 0; i < SPCOM_MAX_CHANNEL_CLIENTS; i++) {
+		if (ch->pid[i] == pid) {
+			pr_err("client with pid [%d] is already registered with channel[%s]\n",
+				pid, name);
+			mutex_unlock(&ch->lock);
+			return -EINVAL;
+		}
 	}
 
 	if (ch->is_sharable) {
@@ -1448,13 +1532,25 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
 			ch->is_busy = true;
 		else
 			ch->is_busy = false;
+		/* pid array has pid of all the registered client.
+		 * If we reach here, the is_busy flag check above guarantees
+		 * that we have atleast one non-zero pid index
+		 */
+		for (i = 0; i < SPCOM_MAX_CHANNEL_CLIENTS; i++) {
+			if (ch->pid[i] == 0) {
+				ch->pid[i] = pid;
+				break;
+			}
+		}
 	} else {
 		ch->num_clients = 1;
 		ch->is_busy = true;
+		/* Only first index of pid is relevant in case of
+		 * non-shareable
+		 */
+		ch->pid[0] = pid;
 	}
 
-	/* pid has the last registed client's pid */
-	ch->pid = pid;
 	mutex_unlock(&ch->lock);
 
 	filp->private_data = ch;
@@ -1477,6 +1573,7 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
 	int ret = 0;
+	int i = 0;
 
 	if (strcmp(name, "unknown") == 0) {
 		pr_err("name is unknown\n");
@@ -1507,6 +1604,13 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
 		return 0;
 	}
 
+	for (i = 0; i < SPCOM_MAX_CHANNEL_CLIENTS; i++) {
+		if (ch->pid[i] == current_pid()) {
+			ch->pid[i] = 0;
+			break;
+		}
+	}
+
 	if (ch->num_clients > 1) {
 		/*
 		 * Shared client is trying to close channel,
@@ -1515,20 +1619,17 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
 		if (ch->active_pid == current_pid()) {
 			pr_debug("active_pid [%x] is releasing ch [%s] sync lock\n",
 				 ch->active_pid, name);
-			mutex_unlock(&ch->shared_sync_lock);
 			/* No longer the current active user of the channel */
 			ch->active_pid = 0;
+			mutex_unlock(&ch->shared_sync_lock);
 		}
 		ch->num_clients--;
 		ch->is_busy = false;
-		if (ch->num_clients > 0) {
-			mutex_unlock(&ch->lock);
-			return 0;
-		}
+		mutex_unlock(&ch->lock);
+		return 0;
 	}
 
 	ch->is_busy = false;
-	ch->pid = 0;
 	ch->num_clients = 0;
 	ch->active_pid = 0;
 
@@ -1621,8 +1722,8 @@ static ssize_t spcom_device_write(struct file *filp,
 		pr_err("handle command error [%d]\n", ret);
 		kfree(buf);
 		if (ch && ch->active_pid == current_pid()) {
-			mutex_unlock(&ch->shared_sync_lock);
 			ch->active_pid = 0;
+			mutex_unlock(&ch->shared_sync_lock);
 		}
 		return ret;
 	}
@@ -1707,16 +1808,16 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
 	pr_debug("ch [%s] ret [%d]\n", name, (int) actual_size);
 
 	if (ch->active_pid == cur_pid) {
-		mutex_unlock(&ch->shared_sync_lock);
 		ch->active_pid = 0;
+		mutex_unlock(&ch->shared_sync_lock);
 	}
 	return actual_size;
 
 exit_err:
 	kfree(buf);
 	if (ch->active_pid == cur_pid) {
-		mutex_unlock(&ch->shared_sync_lock);
 		ch->active_pid = 0;
+		mutex_unlock(&ch->shared_sync_lock);
 	}
 	return ret;
 }
@@ -1928,7 +2029,7 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 	if (ret != 0)
 		pr_err("can't unregister rpmsg drv %d\n", ret);
 exit_destroy_channel:
-	// empty channel leaves free slot for next time
+	/* empty channel leaves free slot for next time*/
 	mutex_lock(&ch->lock);
 	memset(ch->name, 0, SPCOM_CHANNEL_NAME_SIZE);
 	mutex_unlock(&ch->lock);
@@ -2142,6 +2243,7 @@ static void spcom_signal_rx_done(struct work_struct *ignored)
 		}
 		ch->rpmsg_rx_buf = rx_item->rpmsg_rx_buf;
 		ch->actual_rx_size = rx_item->rx_buf_size;
+		ch->rx_buf_txn_id = ch->txn_id;
 		complete_all(&ch->rx_done);
 		mutex_unlock(&ch->lock);
 
@@ -2256,7 +2358,7 @@ static void spcom_rpdev_remove(struct rpmsg_device *rpdev)
 	}
 
 	mutex_lock(&ch->lock);
-	// unlock all ion buffers of sp_kernel channel
+	/* unlock all ion buffers of sp_kernel channel*/
 	if (strcmp(ch->name, "sp_kernel") == 0) {
 		for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) {
 			if (ch->dmabuf_handle_table[i] != NULL) {
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index 286feb5..5eabb2b 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -62,16 +62,27 @@ static u32 spss_debug_reg_addr; /* SP_SCSR_MBn_SP2CL_GPm(n,m) */
 static u32 spss_emul_type_reg_addr; /* TCSR_SOC_EMULATION_TYPE */
 static void *iar_notif_handle;
 static struct notifier_block *iar_nb;
+static bool is_iar_active;
 
-#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit */
+#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit = 16 bytes */
+#define CMAC_SIZE_IN_DWORDS (CMAC_SIZE_IN_BYTES/sizeof(u32)) /* 4 dwords */
+
+/* Asym , Crypt , Keym */
+#define NUM_UEFI_APPS 3
 
 static u32 pil_addr;
 static u32 pil_size;
-static u32 cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* saved cmac */
-static u32 pbl_cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* pbl cmac */
+static u32 cmac_buf[CMAC_SIZE_IN_DWORDS]; /* saved cmac */
+static u32 pbl_cmac_buf[CMAC_SIZE_IN_DWORDS]; /* pbl cmac */
+
+static u32 calc_apps_cmac[NUM_UEFI_APPS][CMAC_SIZE_IN_DWORDS];
+static u32 saved_apps_cmac[NUM_UEFI_APPS][CMAC_SIZE_IN_DWORDS];
+
+#define FW_AND_APPS_CMAC_SIZE \
+	(CMAC_SIZE_IN_DWORDS + NUM_UEFI_APPS*CMAC_SIZE_IN_DWORDS)
+
 static u32 iar_state;
 static bool is_iar_enabled;
-static bool is_pbl_ce; /* Did SPU PBL performed Cryptographic Erase (CE) */
 
 static void __iomem *cmac_mem;
 static size_t cmac_mem_size = SZ_4K; /* XPU align to 4KB */
@@ -97,7 +108,10 @@ static struct spss_utils_device *spss_utils_dev;
 
 /* static functions declaration */
 static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size);
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size);
+static int spss_get_pbl_and_apps_calc_cmac(void);
+
+static int spss_get_saved_uefi_apps_cmac(void);
+static int spss_set_saved_uefi_apps_cmac(void);
 
 /*==========================================================================*/
 /*		Device Sysfs */
@@ -245,24 +259,6 @@ static ssize_t iar_enabled_show(struct device *dev,
 
 static DEVICE_ATTR_RO(iar_enabled);
 
-static ssize_t pbl_ce_show(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	int ret = 0;
-
-	if (!dev || !attr || !buf) {
-		pr_err("invalid param.\n");
-		return -EINVAL;
-	}
-
-	ret = snprintf(buf, PAGE_SIZE, "0x%x\n", is_pbl_ce);
-
-	return ret;
-}
-
-static DEVICE_ATTR_RO(pbl_ce);
-
 static ssize_t pbl_cmac_show(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)
@@ -274,6 +270,9 @@ static ssize_t pbl_cmac_show(struct device *dev,
 		return -EINVAL;
 	}
 
+	/* first make sure the pbl cmac is updated */
+	spss_get_pbl_and_apps_calc_cmac();
+
 	ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
 	    pbl_cmac_buf[0], pbl_cmac_buf[1], pbl_cmac_buf[2], pbl_cmac_buf[3]);
 
@@ -282,6 +281,24 @@ static ssize_t pbl_cmac_show(struct device *dev,
 
 static DEVICE_ATTR_RO(pbl_cmac);
 
+static ssize_t apps_cmac_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (!dev || !attr || !buf) {
+		pr_err("invalid param.\n");
+		return -EINVAL;
+	}
+
+	/* first make sure the pbl cmac is updated */
+	spss_get_pbl_and_apps_calc_cmac();
+
+	memcpy(buf, calc_apps_cmac, sizeof(calc_apps_cmac));
+
+	return sizeof(calc_apps_cmac);
+}
+
+static DEVICE_ATTR_RO(apps_cmac);
+
 /*--------------------------------------------------------------------------*/
 static int spss_create_sysfs(struct device *dev)
 {
@@ -323,22 +340,23 @@ static int spss_create_sysfs(struct device *dev)
 		goto remove_iar_state;
 	}
 
-	ret = device_create_file(dev, &dev_attr_pbl_ce);
-	if (ret < 0) {
-		pr_err("failed to create sysfs file for pbl_ce.\n");
-		goto remove_iar_enabled;
-	}
-
 	ret = device_create_file(dev, &dev_attr_pbl_cmac);
 	if (ret < 0) {
 		pr_err("failed to create sysfs file for pbl_cmac.\n");
-		goto remove_pbl_ce;
+		goto remove_iar_enabled;
 	}
 
+	ret = device_create_file(dev, &dev_attr_apps_cmac);
+	if (ret < 0) {
+		pr_err("failed to create sysfs file for apps_cmac.\n");
+		goto remove_pbl_cmac;
+	}
+
+
 	return 0;
 
-remove_pbl_ce:
-		device_remove_file(dev, &dev_attr_pbl_ce);
+remove_pbl_cmac:
+		device_remove_file(dev, &dev_attr_pbl_cmac);
 remove_iar_enabled:
 		device_remove_file(dev, &dev_attr_iar_enabled);
 remove_iar_state:
@@ -364,6 +382,9 @@ static long spss_utils_ioctl(struct file *file,
 	void *buf = (void *) arg;
 	unsigned char data[64] = {0};
 	size_t size = 0;
+	u32 i = 0;
+	/* Saved cmacs of spu firmware and UEFI loaded spu apps */
+	u32 fw_and_apps_cmacs[FW_AND_APPS_CMAC_SIZE];
 
 	if (buf == NULL) {
 		pr_err("invalid ioctl arg\n");
@@ -387,32 +408,35 @@ static long spss_utils_ioctl(struct file *file,
 
 	switch (cmd) {
 	case SPSS_IOC_SET_FW_CMAC:
-		if (size != sizeof(cmac_buf)) {
+		if (size != sizeof(fw_and_apps_cmacs)) {
 			pr_err("cmd [0x%x] invalid size [0x%x]\n", cmd, size);
 			return -EINVAL;
 		}
 
-		memcpy(cmac_buf, data, sizeof(cmac_buf));
-		pr_info("saved fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
-			cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
+		/* spdaemon uses this ioctl only when IAR is active */
+		is_iar_active = true;
+
+		memcpy(fw_and_apps_cmacs, data, sizeof(fw_and_apps_cmacs));
+		memcpy(cmac_buf, fw_and_apps_cmacs, sizeof(cmac_buf));
+
+		for (i = 0; i < NUM_UEFI_APPS; ++i) {
+			int x = (i+1)*CMAC_SIZE_IN_DWORDS;
+
+			memcpy(saved_apps_cmac[i],
+				fw_and_apps_cmacs + x,
+				CMAC_SIZE_IN_BYTES);
+		}
 
 		/*
 		 * SPSS is loaded now by UEFI,
 		 * so IAR callback is not being called on powerup by PIL.
-		 * therefore read the spu pbl fw cmac from ioctl.
+		 * therefore read the spu pbl fw cmac and apps cmac from ioctl.
 		 * The callback shall be called on spss SSR.
 		 */
 		pr_debug("read pbl cmac from shared memory\n");
 		spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
-		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
-		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
-			is_pbl_ce = true; /* cmacs not the same */
-		else
-			is_pbl_ce = false;
-
-		pr_info("calc fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
-			pbl_cmac_buf[0], pbl_cmac_buf[1],
-			pbl_cmac_buf[2], pbl_cmac_buf[3]);
+		spss_set_saved_uefi_apps_cmac();
+		spss_get_saved_uefi_apps_cmac();
 		break;
 
 	default:
@@ -781,23 +805,105 @@ static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size)
 	return 0;
 }
 
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size)
+static int spss_get_pbl_and_apps_calc_cmac(void)
 {
 	u8 __iomem *reg = NULL;
-	int i;
+	int i, j;
 	u32 val;
 
 	if (cmac_mem == NULL)
 		return -EFAULT;
 
-	/* PBL calculated cmac after HLOS expected cmac */
-	reg = cmac_mem + cmac_size;
+	reg = cmac_mem; /* IAR buffer base */
+	reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
 	pr_debug("reg [%pK]\n", reg);
 
-	for (i = 0; i < cmac_size/4; i++) {
-		val = readl_relaxed(reg + i*sizeof(u32));
-		cmac[i] = val;
-		pr_debug("cmac[%d] [0x%x]\n", (int) i, (int) val);
+	/* get pbl fw cmac from ddr */
+	for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+		val = readl_relaxed(reg);
+		pbl_cmac_buf[i] = val;
+		reg += sizeof(u32);
+	}
+	reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+	pr_debug("pbl_cmac_buf : 0x%08x,0x%08x,0x%08x,0x%08x\n",
+	    pbl_cmac_buf[0], pbl_cmac_buf[1],
+	    pbl_cmac_buf[2], pbl_cmac_buf[3]);
+
+	/* get apps cmac from ddr */
+	for (j = 0; j < NUM_UEFI_APPS; j++) {
+		for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+			val = readl_relaxed(reg);
+			calc_apps_cmac[j][i] = val;
+			reg += sizeof(u32);
+		}
+		reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+		pr_debug("app [%d] cmac : 0x%08x,0x%08x,0x%08x,0x%08x\n", j,
+			calc_apps_cmac[j][0], calc_apps_cmac[j][1],
+			calc_apps_cmac[j][2], calc_apps_cmac[j][3]);
+	}
+
+	return 0;
+}
+
+static int spss_get_saved_uefi_apps_cmac(void)
+{
+	u8 __iomem *reg = NULL;
+	int i, j;
+	u32 val;
+
+	if (cmac_mem == NULL)
+		return -EFAULT;
+
+	reg = cmac_mem; /* IAR buffer base */
+	reg += (2*CMAC_SIZE_IN_BYTES); /* skip the saved and calc fw cmac */
+	pr_debug("reg [%pK]\n", reg);
+
+	/* get saved apps cmac from ddr - were written by UEFI spss driver */
+	for (j = 0; j < NUM_UEFI_APPS; j++) {
+		for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+			val = readl_relaxed(reg);
+			saved_apps_cmac[j][i] = val;
+			reg += sizeof(u32);
+		}
+		reg += CMAC_SIZE_IN_BYTES; /* skip the calc cmac */
+
+		pr_debug("app[%d] saved cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
+			j,
+			saved_apps_cmac[j][0], saved_apps_cmac[j][1],
+			saved_apps_cmac[j][2], saved_apps_cmac[j][3]);
+	}
+
+	return 0;
+}
+
+static int spss_set_saved_uefi_apps_cmac(void)
+{
+	u8 __iomem *reg = NULL;
+	int i, j;
+	u32 val;
+
+	if (cmac_mem == NULL)
+		return -EFAULT;
+
+	reg = cmac_mem; /* IAR buffer base */
+	reg += (2*CMAC_SIZE_IN_BYTES); /* skip the saved and calc fw cmac */
+	pr_debug("reg [%pK]\n", reg);
+
+	/* get saved apps cmac from ddr - were written by UEFI spss driver */
+	for (j = 0; j < NUM_UEFI_APPS; j++) {
+		for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+			val = saved_apps_cmac[j][i];
+			writel_relaxed(val, reg);
+			reg += sizeof(u32);
+		}
+		reg += CMAC_SIZE_IN_BYTES; /* skip the calc app cmac */
+
+		pr_debug("app[%d] saved cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
+			j,
+			saved_apps_cmac[j][0], saved_apps_cmac[j][1],
+			saved_apps_cmac[j][2], saved_apps_cmac[j][3]);
 	}
 
 	return 0;
@@ -807,6 +913,9 @@ static int spss_utils_iar_callback(struct notifier_block *nb,
 				  unsigned long code,
 				  void *data)
 {
+	/* do nothing if IAR is not active */
+	if (!is_iar_active)
+		return NOTIFY_OK;
 
 	switch (code) {
 	case SUBSYS_BEFORE_SHUTDOWN:
@@ -820,15 +929,12 @@ static int spss_utils_iar_callback(struct notifier_block *nb,
 		break;
 	case SUBSYS_AFTER_POWERUP:
 		pr_debug("[SUBSYS_AFTER_POWERUP] event.\n");
-		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
-		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
-			is_pbl_ce = true; /* cmacs not the same */
-		else
-			is_pbl_ce = false;
 		break;
 	case SUBSYS_BEFORE_AUTH_AND_RESET:
 		pr_debug("[SUBSYS_BEFORE_AUTH_AND_RESET] event.\n");
+		/* Called on SSR as spss firmware is loaded by UEFI */
 		spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
+		spss_set_saved_uefi_apps_cmac();
 		break;
 	default:
 		pr_err("unknown code [0x%x] .\n", (int) code);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 16c1dab..cd3fd27 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -658,7 +658,7 @@ static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
 
 	desc.args[0] = d->pas_id;
 	desc.args[1] = addr;
-	desc.args[2] = size;
+	desc.args[2] = size + pil->extra_size;
 	desc.arginfo = SCM_ARGS(3);
 	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
 			&desc);
@@ -1064,7 +1064,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 {
 	struct pil_tz_data *d;
 	struct resource *res;
-	u32 proxy_timeout;
+	struct device_node *crypto_node;
+	u32 proxy_timeout, crypto_id;
 	int len, rc;
 	char md_node[20];
 
@@ -1121,7 +1122,16 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 									rc);
 			goto err_deregister_bus;
 		}
-		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE_0);
+
+		crypto_id = MSM_BUS_MASTER_CRYPTO_CORE_0;
+		crypto_node = of_parse_phandle(pdev->dev.of_node,
+						"qcom,mas-crypto", 0);
+		if (!IS_ERR_OR_NULL(crypto_node)) {
+			of_property_read_u32(crypto_node, "cell-id",
+				&crypto_id);
+		}
+		of_node_put(crypto_node);
+		scm_pas_init((int)crypto_id);
 	}
 
 	rc = pil_desc_init(&d->desc);
@@ -1197,6 +1207,10 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 		}
 		mask_scsr_irqs(d);
 
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,extra-size",
+						&d->desc.extra_size);
+		if (rc)
+			d->desc.extra_size = 0;
 	} else {
 		d->subsys_desc.err_fatal_handler =
 						subsys_err_fatal_intr_handler;
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 2d0c3b8..f8fb034 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -274,6 +274,8 @@ static ssize_t restart_level_store(struct device *dev,
 
 	for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
 		if (!strncasecmp(buf, restart_levels[i], count)) {
+			pil_ipc("[%s]: change restart level to %d\n",
+				subsys->desc->name, i);
 			subsys->restart_level = i;
 			return orig_count;
 		}
@@ -849,7 +851,7 @@ static int subsys_start(struct subsys_device *subsys)
 		subsys_set_state(subsys, SUBSYS_ONLINE);
 		return 0;
 	}
-
+	pil_ipc("[%s]: before wait_for_err_ready\n", subsys->desc->name);
 	ret = wait_for_err_ready(subsys);
 	if (ret) {
 		/* pil-boot succeeded but we need to shutdown
@@ -865,6 +867,7 @@ static int subsys_start(struct subsys_device *subsys)
 
 	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
 								NULL);
+	pil_ipc("[%s]: exit\n", subsys->desc->name);
 	return ret;
 }
 
@@ -872,6 +875,7 @@ static void subsys_stop(struct subsys_device *subsys)
 {
 	const char *name = subsys->desc->name;
 
+	pil_ipc("[%s]: entry\n", subsys->desc->name);
 	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
 	reinit_completion(&subsys->shutdown_ack);
 	if (!of_property_read_bool(subsys->desc->dev->of_node,
@@ -890,6 +894,7 @@ static void subsys_stop(struct subsys_device *subsys)
 	subsys_set_state(subsys, SUBSYS_OFFLINE);
 	disable_all_irqs(subsys);
 	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+	pil_ipc("[%s]: exit\n", subsys->desc->name);
 }
 
 int subsystem_set_fwname(const char *name, const char *fw_name)
@@ -1304,14 +1309,24 @@ EXPORT_SYMBOL(subsystem_crashed);
 void subsys_set_crash_status(struct subsys_device *dev,
 				enum crash_status crashed)
 {
+	if (!dev) {
+		pr_err("subsys_set_crash_status() dev is NULL\n");
+		return;
+	}
 	dev->crashed = crashed;
 }
 EXPORT_SYMBOL(subsys_set_crash_status);
 
 enum crash_status subsys_get_crash_status(struct subsys_device *dev)
 {
+	if (!dev) {
+		pr_err("subsys_get_crash_status() dev is NULL\n");
+		return CRASH_STATUS_WDOG_BITE;
+	}
+
 	return dev->crashed;
 }
+EXPORT_SYMBOL(subsys_get_crash_status);
 
 static struct subsys_device *desc_to_subsys(struct device *d)
 {
diff --git a/drivers/soc/qcom/trace_secure_buffer.h b/drivers/soc/qcom/trace_secure_buffer.h
new file mode 100644
index 0000000..f0655c8
--- /dev/null
+++ b/drivers/soc/qcom/trace_secure_buffer.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM secure_buffer
+
+#if !defined(_TRACE_SECURE_BUFFER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SECURE_BUFFER_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <soc/qcom/secure_buffer.h>
+
+TRACE_EVENT(hyp_assign_info,
+
+	TP_PROTO(u32 *source_vm_list,
+		 int source_nelems, int *dest_vmids, int *dest_perms,
+		 int dest_nelems),
+
+	TP_ARGS(source_vm_list, source_nelems, dest_vmids,
+		dest_perms, dest_nelems),
+
+	TP_STRUCT__entry(
+		__field(int, source_nelems)
+		__field(int, dest_nelems)
+		__dynamic_array(u32, source_vm_list, source_nelems)
+		__dynamic_array(int, dest_vmids, dest_nelems)
+		__dynamic_array(int, dest_perms, dest_nelems)
+	),
+
+	TP_fast_assign(
+		__entry->source_nelems = source_nelems;
+		__entry->dest_nelems = dest_nelems;
+		memcpy(__get_dynamic_array(source_vm_list), source_vm_list,
+		       source_nelems * sizeof(*source_vm_list));
+		memcpy(__get_dynamic_array(dest_vmids), dest_vmids,
+		       dest_nelems * sizeof(*dest_vmids));
+		memcpy(__get_dynamic_array(dest_perms), dest_perms,
+		       dest_nelems * sizeof(*dest_perms));
+	),
+
+	TP_printk("srcVMIDs: %s dstVMIDs: %s dstPerms: %s",
+		  __print_array(__get_dynamic_array(source_vm_list),
+				__entry->source_nelems, sizeof(u32)),
+		  __print_array(__get_dynamic_array(dest_vmids),
+				__entry->dest_nelems, sizeof(int)),
+		  __print_array(__get_dynamic_array(dest_perms),
+				__entry->dest_nelems, sizeof(int))
+	)
+);
+
+TRACE_EVENT(hyp_assign_batch_start,
+
+	TP_PROTO(struct mem_prot_info *info, int info_nelems),
+
+	TP_ARGS(info, info_nelems),
+
+	TP_STRUCT__entry(
+		__field(int, info_nelems)
+		__field(u64, batch_size)
+		__dynamic_array(phys_addr_t, addrs, info_nelems)
+		__dynamic_array(u64, sizes, info_nelems)
+	),
+
+	TP_fast_assign(
+		unsigned int i;
+		phys_addr_t *addr_arr_ptr = __get_dynamic_array(addrs);
+		u64 *sizes_arr_ptr = __get_dynamic_array(sizes);
+
+		__entry->info_nelems = info_nelems;
+		__entry->batch_size = 0;
+
+		for (i = 0; i < info_nelems; i++) {
+			addr_arr_ptr[i] = info[i].addr;
+			sizes_arr_ptr[i] = info[i].size;
+			__entry->batch_size += info[i].size;
+		}
+	),
+
+	TP_printk("num entries: %d batch size: %llu phys addrs: %s sizes: %s",
+		  __entry->info_nelems, __entry->batch_size,
+		  __print_array(__get_dynamic_array(addrs),
+				__entry->info_nelems, sizeof(phys_addr_t)),
+		  __print_array(__get_dynamic_array(sizes),
+				__entry->info_nelems, sizeof(u64))
+	)
+);
+
+TRACE_EVENT(hyp_assign_batch_end,
+
+	TP_PROTO(int ret, u64 delta),
+
+	TP_ARGS(ret, delta),
+
+	TP_STRUCT__entry(
+		__field(int, ret)
+		__field(u64, delta)
+	),
+
+	TP_fast_assign(
+		__entry->ret = ret;
+		__entry->delta = delta;
+	),
+
+	TP_printk("ret: %d time delta: %lld us",
+		  __entry->ret, __entry->delta
+	)
+);
+
+TRACE_EVENT(hyp_assign_end,
+
+	TP_PROTO(u64 tot_delta, u64 avg_delta),
+
+	TP_ARGS(tot_delta, avg_delta),
+
+	TP_STRUCT__entry(
+		__field(u64, tot_delta)
+		__field(u64, avg_delta)
+	),
+
+	TP_fast_assign(
+		__entry->tot_delta = tot_delta;
+		__entry->avg_delta = avg_delta;
+	),
+
+	TP_printk("total time delta: %lld us avg batch delta: %lld us",
+		  __entry->tot_delta, __entry->avg_delta
+	)
+);
+#endif /* _TRACE_SECURE_BUFFER_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_secure_buffer
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 87eb07f7..8343023 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -92,7 +92,6 @@ struct msm_watchdog_data {
 	unsigned long long thread_start;
 	unsigned long long ping_start[NR_CPUS];
 	unsigned long long ping_end[NR_CPUS];
-	unsigned int cpu_scandump_sizes[NR_CPUS];
 };
 
 /*
@@ -550,95 +549,6 @@ static irqreturn_t wdog_ppi_bark(int irq, void *dev_id)
 	return wdog_bark_handler(irq, wdog_dd);
 }
 
-static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
-{
-	int ret;
-	struct msm_dump_entry dump_entry;
-	struct msm_dump_data *cpu_data;
-	int cpu;
-	void *cpu_buf;
-
-	cpu_data = kcalloc(num_present_cpus(), sizeof(struct msm_dump_data),
-								GFP_KERNEL);
-	if (!cpu_data)
-		goto out0;
-
-	cpu_buf = kcalloc(num_present_cpus(), MAX_CPU_CTX_SIZE, GFP_KERNEL);
-	if (!cpu_buf)
-		goto out1;
-
-	for_each_cpu(cpu, cpu_present_mask) {
-		cpu_data[cpu].addr = virt_to_phys(cpu_buf +
-						cpu * MAX_CPU_CTX_SIZE);
-		cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
-		snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
-			"KCPU_CTX%d", cpu);
-		dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
-		dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &dump_entry);
-		/*
-		 * Don't free the buffers in case of error since
-		 * registration may have succeeded for some cpus.
-		 */
-		if (ret)
-			pr_err("cpu %d reg dump setup failed\n", cpu);
-	}
-
-	return;
-out1:
-	kfree(cpu_data);
-out0:
-	return;
-}
-
-static void configure_scandump(struct msm_watchdog_data *wdog_dd)
-{
-	int ret;
-	struct msm_dump_entry dump_entry;
-	struct msm_dump_data *cpu_data;
-	int cpu;
-	static dma_addr_t dump_addr;
-	static void *dump_vaddr;
-	unsigned int scandump_size;
-
-	for_each_cpu(cpu, cpu_present_mask) {
-		scandump_size = wdog_dd->cpu_scandump_sizes[cpu];
-		cpu_data = devm_kzalloc(wdog_dd->dev,
-					sizeof(struct msm_dump_data),
-					GFP_KERNEL);
-		if (!cpu_data)
-			continue;
-
-		dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev,
-							 scandump_size,
-							 &dump_addr,
-							 GFP_KERNEL);
-		if (!dump_vaddr) {
-			dev_err(wdog_dd->dev, "Couldn't get memory for dump\n");
-			continue;
-		}
-		memset(dump_vaddr, 0x0, scandump_size);
-
-		cpu_data->addr = dump_addr;
-		cpu_data->len = scandump_size;
-		snprintf(cpu_data->name, sizeof(cpu_data->name),
-			"KSCANDUMP%d", cpu);
-		dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
-		dump_entry.addr = virt_to_phys(cpu_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &dump_entry);
-		if (ret) {
-			dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n",
-				MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu);
-			dma_free_coherent(wdog_dd->dev, scandump_size,
-					  dump_vaddr,
-					  dump_addr);
-			devm_kfree(wdog_dd->dev, cpu_data);
-		}
-	}
-}
-
 static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
 {
 	int error = 0;
@@ -699,8 +609,6 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
 	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
 	wdog_dd->min_slack_ticks = UINT_MAX;
 	wdog_dd->min_slack_ns = ULLONG_MAX;
-	configure_scandump(wdog_dd);
-	configure_bark_dump(wdog_dd);
 	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
 	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
 	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
@@ -754,7 +662,7 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
 {
 	struct device_node *node = pdev->dev.of_node;
 	struct resource *res;
-	int ret, cpu, num_scandump_sizes;
+	int ret;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base");
 	if (!res)
@@ -815,18 +723,6 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
 	}
 	pdata->wakeup_irq_enable = of_property_read_bool(node,
 							 "qcom,wakeup-enable");
-
-	num_scandump_sizes = of_property_count_elems_of_size(node,
-							"qcom,scandump-sizes",
-							sizeof(u32));
-	if (num_scandump_sizes < 0 || num_scandump_sizes != num_possible_cpus())
-		dev_info(&pdev->dev, "%s scandump sizes property not correct\n",
-			__func__);
-	else
-		for_each_cpu(cpu, cpu_present_mask)
-			of_property_read_u32_index(node, "qcom,scandump-sizes",
-						   cpu,
-					&pdata->cpu_scandump_sizes[cpu]);
 	pdata->irq_ppi = irq_is_percpu(pdata->bark_irq);
 	dump_pdata(pdata);
 	return 0;
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 19c8efb..1ba1556 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -3,8 +3,8 @@
 #
 
 menuconfig SOUNDWIRE
-	bool "SoundWire support"
-	---help---
+	tristate "SoundWire support"
+	help
 	  SoundWire is a 2-Pin interface with data and clock line ratified
 	  by the MIPI Alliance. SoundWire is used for transporting data
 	  typically related to audio functions. SoundWire interface is
@@ -16,17 +16,12 @@
 
 comment "SoundWire Devices"
 
-config SOUNDWIRE_BUS
-	tristate
-	select REGMAP_SOUNDWIRE
-
 config SOUNDWIRE_CADENCE
 	tristate
 
 config SOUNDWIRE_INTEL
 	tristate "Intel SoundWire Master driver"
 	select SOUNDWIRE_CADENCE
-	select SOUNDWIRE_BUS
 	depends on X86 && ACPI && SND_SOC
 	---help---
 	  SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 5817bea..1e2c001 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -4,7 +4,7 @@
 
 #Bus Objs
 soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
 
 #Cadence Objs
 soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index cb6a331..70f78ed 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
 
 #define CDNS_MCP_INTSET				0x4C
 
-#define CDNS_SDW_SLAVE_STAT			0x50
-#define CDNS_MCP_SLAVE_STAT_MASK		BIT(1, 0)
+#define CDNS_MCP_SLAVE_STAT			0x50
+#define CDNS_MCP_SLAVE_STAT_MASK		GENMASK(1, 0)
 
 #define CDNS_MCP_SLAVE_INTSTAT0			0x54
 #define CDNS_MCP_SLAVE_INTSTAT1			0x58
@@ -96,8 +96,8 @@
 #define CDNS_MCP_SLAVE_INTMASK0			0x5C
 #define CDNS_MCP_SLAVE_INTMASK1			0x60
 
-#define CDNS_MCP_SLAVE_INTMASK0_MASK		GENMASK(30, 0)
-#define CDNS_MCP_SLAVE_INTMASK1_MASK		GENMASK(16, 0)
+#define CDNS_MCP_SLAVE_INTMASK0_MASK		GENMASK(31, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK		GENMASK(15, 0)
 
 #define CDNS_MCP_PORT_INTSTAT			0x64
 #define CDNS_MCP_PDI_STAT			0x6C
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index a6e2581..29bc99c 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -282,6 +282,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
 
 	if (pcm) {
 		count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
+
+		/*
+		 * WORKAROUND: on all existing Intel controllers, pdi
+		 * number 2 reports channel count as 1 even though it
+		 * supports 8 channels. Performing hardcoding for pdi
+		 * number 2.
+		 */
+		if (pdi_num == 2)
+			count = 7;
+
 	} else {
 		count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
 		count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 3094d81..12c1fa5 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
 		      BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
 }
 
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
 {
-	struct spi_master *master = dev_id;
-	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
-	irqreturn_t ret = IRQ_NONE;
-
-	/* IRQ may be shared, so return if our interrupts are disabled */
-	if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
-	      (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
-		return ret;
+	u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
 
 	/* check if we have data to read */
-	while (bs->rx_len &&
-	       (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-		  BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+	for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+	     stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
 		bcm2835aux_rd_fifo(bs);
-		ret = IRQ_HANDLED;
-	}
 
 	/* check if we have data to write */
 	while (bs->tx_len &&
@@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
 	       (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
 		  BCM2835_AUX_SPI_STAT_TX_FULL))) {
 		bcm2835aux_wr_fifo(bs);
-		ret = IRQ_HANDLED;
 	}
+}
 
-	/* and check if we have reached "done" */
-	while (bs->rx_len &&
-	       (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-		  BCM2835_AUX_SPI_STAT_BUSY))) {
-		bcm2835aux_rd_fifo(bs);
-		ret = IRQ_HANDLED;
-	}
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+	struct spi_master *master = dev_id;
+	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+	/* IRQ may be shared, so return if our interrupts are disabled */
+	if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+	      (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+		return IRQ_NONE;
+
+	/* do common fifo handling */
+	bcm2835aux_spi_transfer_helper(bs);
 
 	if (!bs->tx_len) {
 		/* disable tx fifo empty interrupt */
@@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
 		complete(&master->xfer_completion);
 	}
 
-	/* and return */
-	return ret;
+	return IRQ_HANDLED;
 }
 
 static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 {
 	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
 	unsigned long timeout;
-	u32 stat;
 
 	/* configure spi */
 	bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
@@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 
 	/* loop until finished the transfer */
 	while (bs->rx_len) {
-		/* read status */
-		stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
 
-		/* fill in tx fifo with remaining data */
-		if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
-			bcm2835aux_wr_fifo(bs);
-			continue;
-		}
-
-		/* read data from fifo for both cases */
-		if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
-			bcm2835aux_rd_fifo(bs);
-			continue;
-		}
-		if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
-			bcm2835aux_rd_fifo(bs);
-			continue;
-		}
+		/* do common fifo handling */
+		bcm2835aux_spi_transfer_helper(bs);
 
 		/* there is still data pending to read check the timeout */
 		if (bs->rx_len && time_after(jiffies, timeout)) {
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 088772e..77838d8 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -410,7 +410,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
 		return status;
 
 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
-	master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
+	master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
 	master->flags = master_flags;
 	master->bus_num = pdev->id;
 	/* The master needs to think there is a chipselect even if not connected */
@@ -437,7 +437,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
 		spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
 	}
 	spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
-	spi_gpio->bitbang.flags = SPI_CS_HIGH;
 
 	status = spi_bitbang_start(&spi_gpio->bitbang);
 	if (status)
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index eacd470..22ad52b 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -689,17 +689,9 @@ static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
 	struct task_struct *thread;
 	int ret;
 	char *buf;
-	cpumask_t *cpumask;
-	DECLARE_BITMAP(bmap, nr_cpumask_bits);
 
 	attr.sched_nice = ION_KTHREAD_NICE_VAL;
 	buf = cached ? "cached" : "uncached";
-	/*
-	 * Affine the kthreads to min capacity CPUs
-	 * TODO: remove this hack once is_min_capability_cpu is available
-	 */
-	bitmap_fill(bmap, 0x4);
-	cpumask = to_cpumask(bmap);
 
 	thread = kthread_create(ion_sys_heap_worker, pools,
 				"ion-pool-%s-worker", buf);
@@ -715,7 +707,7 @@ static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
 			__func__, buf, ret);
 		return ERR_PTR(ret);
 	}
-	kthread_bind_mask(thread, cpumask);
+
 	return thread;
 }
 
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee..caf4d4d 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			    unsigned int flags)
 {
-	int divider, base, prescale;
+	unsigned int divider, base, prescale;
 
-	/* This function needs improvment */
+	/* This function needs improvement */
 	/* Don't know if divider==0 works. */
 
 	for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			divider = (*nanosec) / base;
 			break;
 		case CMDF_ROUND_UP:
-			divider = (*nanosec) / base;
+			divider = DIV_ROUND_UP(*nanosec, base);
 			break;
 		}
 		if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 	}
 
 	prescale = 15;
-	base = timer_base * (1 << prescale);
+	base = timer_base * (prescale + 1);
 	divider = 65535;
 	*nanosec = divider * base;
 	return (prescale << 16) | (divider);
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 0a089cf..fe6683e 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -100,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
 		unsigned nameoff, maxsize;
 
 		dentry_page = read_mapping_page(mapping, i, NULL);
-		if (IS_ERR(dentry_page))
-			continue;
+		if (dentry_page == ERR_PTR(-ENOMEM)) {
+			err = -ENOMEM;
+			break;
+		} else if (IS_ERR(dentry_page)) {
+			errln("fail to readdir of logical block %u of nid %llu",
+			      i, EROFS_V(dir)->nid);
+			err = PTR_ERR(dentry_page);
+			break;
+		}
 
 		lock_page(dentry_page);
 		de = (struct erofs_dirent *)kmap(dentry_page);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index ad6fe6d..0f1558c6 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -311,7 +311,11 @@ z_erofs_vle_work_lookup(struct super_block *sb,
 	/* if multiref is disabled, `primary' is always true */
 	primary = true;
 
-	DBG_BUGON(work->pageofs != pageofs);
+	if (work->pageofs != pageofs) {
+		DBG_BUGON(1);
+		erofs_workgroup_put(egrp);
+		return ERR_PTR(-EIO);
+	}
 
 	/*
 	 * lock must be taken first to avoid grp->next == NIL between
@@ -853,6 +857,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 	for (i = 0; i < nr_pages; ++i)
 		pages[i] = NULL;
 
+	err = 0;
 	z_erofs_pagevec_ctor_init(&ctor,
 		Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
 
@@ -874,8 +879,17 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 			pagenr = z_erofs_onlinepage_index(page);
 
 		DBG_BUGON(pagenr >= nr_pages);
-		DBG_BUGON(pages[pagenr]);
 
+		/*
+		 * currently EROFS doesn't support multiref(dedup),
+		 * so here erroring out one multiref page.
+		 */
+		if (pages[pagenr]) {
+			DBG_BUGON(1);
+			SetPageError(pages[pagenr]);
+			z_erofs_onlinepage_endio(pages[pagenr]);
+			err = -EIO;
+		}
 		pages[pagenr] = page;
 	}
 	sparsemem_pages = i;
@@ -885,7 +899,6 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 	overlapped = false;
 	compressed_pages = grp->compressed_pages;
 
-	err = 0;
 	for (i = 0; i < clusterpages; ++i) {
 		unsigned pagenr;
 
@@ -911,7 +924,12 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 			pagenr = z_erofs_onlinepage_index(page);
 
 			DBG_BUGON(pagenr >= nr_pages);
-			DBG_BUGON(pages[pagenr]);
+			if (pages[pagenr]) {
+				DBG_BUGON(1);
+				SetPageError(pages[pagenr]);
+				z_erofs_onlinepage_endio(pages[pagenr]);
+				err = -EIO;
+			}
 			++sparsemem_pages;
 			pages[pagenr] = page;
 
@@ -1335,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
 	err = z_erofs_do_read_page(&f, page, &pagepool);
 	(void)z_erofs_vle_work_iter_end(&f.builder);
 
-	if (err) {
-		errln("%s, failed to read, err [%d]", __func__, err);
-		goto out;
-	}
-
+	/* if some compressed cluster ready, need submit them anyway */
 	z_erofs_submit_and_unzip(&f, &pagepool, true);
-out:
+
+	if (err)
+		errln("%s, failed to read, err [%d]", __func__, err);
+
 	if (f.m_iter.mpage != NULL)
 		put_page(f.m_iter.mpage);
 
 	/* clean up the remaining free pages */
 	put_pages_list(&pagepool);
-	return 0;
+	return err;
 }
 
 static inline int __z_erofs_vle_normalaccess_readpages(
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a2df02d..16fcf63 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -819,7 +819,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 	if (par->gamma.curves && gamma) {
 		if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
 					  strlen(gamma)))
-			goto alloc_fail;
+			goto release_framebuf;
 	}
 
 	/* Transmit buffer */
@@ -836,7 +836,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 	if (txbuflen > 0) {
 		txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
 		if (!txbuf)
-			goto alloc_fail;
+			goto release_framebuf;
 		par->txbuf.buf = txbuf;
 		par->txbuf.len = txbuflen;
 	}
@@ -872,6 +872,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 
 	return info;
 
+release_framebuf:
+	framebuffer_release(info);
+
 alloc_fail:
 	vfree(vmem);
 
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index ceeeb30..212fa06 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -247,7 +247,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
 }
 
 /* Waits for low-power LP-11 state on data and clock lanes. */
-static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
 {
 	u32 mask, reg;
 	int ret;
@@ -258,11 +258,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
 	ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
 				 (reg & mask) == mask, 0, 500000);
 	if (ret) {
-		v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
-		return ret;
+		v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
+		v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
 	}
-
-	return 0;
 }
 
 /* Wait for active clock on the clock lane. */
@@ -320,9 +318,7 @@ static int csi2_start(struct csi2_dev *csi2)
 	csi2_enable(csi2, true);
 
 	/* Step 5 */
-	ret = csi2_dphy_wait_stopstate(csi2);
-	if (ret)
-		goto err_assert_reset;
+	csi2_dphy_wait_stopstate(csi2);
 
 	/* Step 6 */
 	ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 607804a..76f434c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1755,8 +1755,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
 
 	priv->hw->max_signal = 100;
 
-	if (vnt_init(priv))
+	if (vnt_init(priv)) {
+		device_free_info(priv);
 		return -ENODEV;
+	}
 
 	device_print_info(priv);
 	pci_set_drvdata(pcid, priv);
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 649caae..2579811 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -649,17 +649,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
 			goto fail_locks;
 		}
 
-		if (wl->gpio_irq && init_irq(dev)) {
-			ret = -EIO;
-			goto fail_locks;
-		}
-
 		ret = wlan_initialize_threads(dev);
 		if (ret < 0) {
 			ret = -EIO;
 			goto fail_wilc_wlan;
 		}
 
+		if (wl->gpio_irq && init_irq(dev)) {
+			ret = -EIO;
+			goto fail_threads;
+		}
+
 		if (!wl->dev_irq_num &&
 		    wl->hif_func->enable_interrupt &&
 		    wl->hif_func->enable_interrupt(wl)) {
@@ -715,7 +715,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
 fail_irq_init:
 		if (wl->dev_irq_num)
 			deinit_irq(dev);
-
+fail_threads:
 		wlan_deinitialize_threads(dev);
 fail_wilc_wlan:
 		wilc_wlan_cleanup(dev);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index d4cf09b..095df24 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -476,10 +476,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
 	/* Set the encryption - we only support wep */
 	if (is_wep) {
 		if (sme->key) {
-			if (sme->key_idx >= NUM_WEPKEYS) {
-				err = -EINVAL;
-				goto exit;
-			}
+			if (sme->key_idx >= NUM_WEPKEYS)
+				return -EINVAL;
 
 			result = prism2_domibset_uint32(wlandev,
 				DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ce1321a..854b2bc 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -514,8 +514,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
 		}
 
 		/* Always in 512 byte units for Linux/Block */
-		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
-		sectors -= 1;
+		block_lba += sg->length >> SECTOR_SHIFT;
+		sectors -= sg->length >> SECTOR_SHIFT;
 	}
 
 	iblock_submit_bios(&list);
@@ -757,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 		}
 
 		/* Always in 512 byte units for Linux/Block */
-		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		block_lba += sg->length >> SECTOR_SHIFT;
 		sg_num--;
 	}
 
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 9cc3843..cefc641 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -9,7 +9,6 @@
 #define IBLOCK_VERSION		"4.0"
 
 #define IBLOCK_MAX_CDBS		16
-#define IBLOCK_LBA_SHIFT	9
 
 struct iblock_req {
 	refcount_t pending;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c46efa4..7159e83 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1143,14 +1143,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 	struct se_cmd *se_cmd = cmd->se_cmd;
 	struct tcmu_dev *udev = cmd->tcmu_dev;
 	bool read_len_valid = false;
-	uint32_t read_len = se_cmd->data_length;
+	uint32_t read_len;
 
 	/*
 	 * cmd has been completed already from timeout, just reclaim
 	 * data area space and free cmd
 	 */
-	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+		WARN_ON_ONCE(se_cmd);
 		goto out;
+	}
 
 	list_del_init(&cmd->queue_entry);
 
@@ -1163,6 +1165,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 		goto done;
 	}
 
+	read_len = se_cmd->data_length;
 	if (se_cmd->data_direction == DMA_FROM_DEVICE &&
 	    (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
 		read_len_valid = true;
@@ -1318,6 +1321,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
 		 */
 		scsi_status = SAM_STAT_CHECK_CONDITION;
 		list_del_init(&cmd->queue_entry);
+		cmd->se_cmd = NULL;
 	} else {
 		list_del_init(&cmd->queue_entry);
 		idr_remove(&udev->commands, id);
@@ -2036,6 +2040,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
 
 		idr_remove(&udev->commands, i);
 		if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+			WARN_ON(!cmd->se_cmd);
 			list_del_init(&cmd->queue_entry);
 			if (err_level == 1) {
 				/*
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 27d178b..02f623b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -125,7 +125,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
 	struct cpufreq_cooling_device *cpufreq_cdev;
 
-	if (event != CPUFREQ_ADJUST)
+	if (event != CPUFREQ_INCOMPATIBLE)
 		return NOTIFY_DONE;
 
 	mutex_lock(&cooling_list_lock);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 6ac5230..8556510 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -569,6 +569,46 @@ static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
 	return 0;
 }
 
+static bool of_thermal_is_trips_triggered(struct thermal_zone_device *tz,
+		int temp)
+{
+	int tt, th, trip, last_temp;
+	struct __thermal_zone *data = tz->devdata;
+	bool triggered = false;
+
+	mutex_lock(&tz->lock);
+	last_temp = tz->temperature;
+	for (trip = 0; trip < data->ntrips; trip++) {
+
+		if (!tz->tzp->tracks_low) {
+			tt = data->trips[trip].temperature;
+			if (temp >= tt && last_temp < tt) {
+				triggered = true;
+				break;
+			}
+			th = tt - data->trips[trip].hysteresis;
+			if (temp <= th && last_temp > th) {
+				triggered = true;
+				break;
+			}
+		} else {
+			tt = data->trips[trip].temperature;
+			if (temp <= tt && last_temp > tt) {
+				triggered = true;
+				break;
+			}
+			th = tt + data->trips[trip].hysteresis;
+			if (temp >= th && last_temp < th) {
+				triggered = true;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&tz->lock);
+
+	return triggered;
+}
+
 /*
  * of_thermal_aggregate_trip - aggregate trip temperatures across sibling
  *				thermal zones.
@@ -605,6 +645,8 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
 			thermal_zone_device_update(zone,
 				THERMAL_EVENT_UNSPECIFIED);
 		} else {
+			if (!of_thermal_is_trips_triggered(zone, trip_temp))
+				continue;
 			thermal_zone_device_update_temp(zone,
 				THERMAL_EVENT_UNSPECIFIED, trip_temp);
 		}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 54698a7..2471627 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -120,3 +120,23 @@
 	   These cooling devices will be used by QTI chipset to place a
 	   request to limits hardware for a minimum CPU railway voltage
 	   corner at cold temperature condition.
+
+config QTI_LIMITS_ISENSE_CDSP
+        tristate "QTI Limits Isense Driver"
+        depends on QCOM_SMEM
+        help
+          This enables driver to read cdsp isense calibration data from
+          shared memory and enable sysfs file support to access this data. This
+          driver is required for the chipsets where isense hardware is present
+          as part of cdsp subsystem.
+
+config QTI_CX_IPEAK_COOLING_DEVICE
+	bool "CX IPeak cooling device"
+	depends on THERMAL_OF
+	help
+	  This implements a mitigation device to place a thermal client vote
+	  to CXIP LM hardware. When all pre-defined clients on CX rail including
+	  thermal client set their vote, CXIP LM hardware throttles the clients
+	  on the CX rail.
+
+	  If you want this support, you should say Y here.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 9279a22..c5820e5 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -11,3 +11,5 @@
 obj-$(CONFIG_REGULATOR_COOLING_DEVICE) += regulator_cdev.o
 obj-$(CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE) += cpu_isolate.o
 obj-$(CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE) += lmh_cpu_vdd_cdev.o
+obj-$(CONFIG_QTI_LIMITS_ISENSE_CDSP) += msm_isense_cdsp.o
+obj-$(CONFIG_QTI_CX_IPEAK_COOLING_DEVICE) += cx_ipeak_cdev.o
diff --git a/drivers/thermal/qcom/adc-tm.c b/drivers/thermal/qcom/adc-tm.c
index f8d82d4..537912c 100644
--- a/drivers/thermal/qcom/adc-tm.c
+++ b/drivers/thermal/qcom/adc-tm.c
@@ -350,6 +350,8 @@ static int adc_tm_probe(struct platform_device *pdev)
 	adc_tm->base = reg;
 	adc_tm->dt_channels = dt_chan_num;
 
+	platform_set_drvdata(pdev, adc_tm);
+
 	revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
 	if (revid_dev_node) {
 		adc_tm->pmic_rev_id = get_revid_data(revid_dev_node);
@@ -393,7 +395,6 @@ static int adc_tm_probe(struct platform_device *pdev)
 	}
 
 	list_add_tail(&adc_tm->list, &adc_tm_device_list);
-	platform_set_drvdata(pdev, adc_tm);
 	return 0;
 fail:
 	i = 0;
diff --git a/drivers/thermal/qcom/adc-tm5.c b/drivers/thermal/qcom/adc-tm5.c
index 9570411..286be8c 100644
--- a/drivers/thermal/qcom/adc-tm5.c
+++ b/drivers/thermal/qcom/adc-tm5.c
@@ -209,9 +209,30 @@ static int adc_tm5_configure(struct adc_tm_sensor *sensor,
 	buf[7] |= ADC_TM_Mn_MEAS_EN;
 
 	ret = adc_tm5_write_reg(chip,
-			ADC_TM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 8);
+			ADC_TM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 1);
 	if (ret < 0) {
-		pr_err("adc-tm block write failed with %d\n", ret);
+		pr_err("adc-tm channel select failed\n");
+		return ret;
+	}
+
+	ret = adc_tm5_write_reg(chip,
+			ADC_TM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx), &buf[5], 1);
+	if (ret < 0) {
+		pr_err("adc-tm timer select failed\n");
+		return ret;
+	}
+
+	ret = adc_tm5_write_reg(chip,
+			ADC_TM_Mn_CTL(btm_chan_idx), &buf[6], 1);
+	if (ret < 0) {
+		pr_err("adc-tm parameter select failed\n");
+		return ret;
+	}
+
+	ret = adc_tm5_write_reg(chip,
+			ADC_TM_Mn_EN(btm_chan_idx), &buf[7], 1);
+	if (ret < 0) {
+		pr_err("adc-tm monitoring enable failed\n");
 		return ret;
 	}
 
diff --git a/drivers/thermal/qcom/bcl_pmic5.c b/drivers/thermal/qcom/bcl_pmic5.c
index 872388e..bdef0dd 100644
--- a/drivers/thermal/qcom/bcl_pmic5.c
+++ b/drivers/thermal/qcom/bcl_pmic5.c
@@ -74,6 +74,7 @@ struct bcl_device;
 
 struct bcl_peripheral_data {
 	int                     irq_num;
+	int                     status_bit_idx;
 	long			trip_thresh;
 	int                     last_val;
 	struct mutex            state_trans_lock;
@@ -92,6 +93,7 @@ struct bcl_device {
 
 static struct bcl_device *bcl_devices[MAX_PERPH_COUNT];
 static int bcl_device_ct;
+static bool ibat_use_qg_adc;
 
 static int bcl_read_register(struct bcl_device *bcl_perph, int16_t reg_offset,
 				unsigned int *data)
@@ -159,12 +161,20 @@ static void convert_ibat_to_adc_val(int *val)
 	 * Threshold register is bit shifted from ADC MSB.
 	 * So the scaling factor is half.
 	 */
-	*val = (*val * 2000) / BCL_IBAT_SCALING_UA;
+	if (ibat_use_qg_adc)
+		*val = (int)div_s64(*val * 2000 * 2, BCL_IBAT_SCALING_UA);
+	else
+		*val = (int)div_s64(*val * 2000, BCL_IBAT_SCALING_UA);
+
 }
 
 static void convert_adc_to_ibat_val(int *val)
 {
-	*val = (*val * BCL_IBAT_SCALING_UA) / 1000;
+	/* Scaling factor will be half if ibat_use_qg_adc is true */
+	if (ibat_use_qg_adc)
+		*val = (int)div_s64(*val * BCL_IBAT_SCALING_UA, 2 * 1000);
+	else
+		*val = (int)div_s64(*val * BCL_IBAT_SCALING_UA, 1000);
 }
 
 static int bcl_set_ibat(void *data, int low, int high)
@@ -419,10 +429,14 @@ static irqreturn_t bcl_handle_irq(int irq, void *data)
 
 	bcl_perph = perph_data->dev;
 	bcl_read_register(bcl_perph, BCL_IRQ_STATUS, &irq_status);
-	pr_debug("Irq:%d triggered for bcl type:%s. status:%u\n",
+
+	if (irq_status & perph_data->status_bit_idx) {
+		pr_debug("Irq:%d triggered for bcl type:%s. status:%u\n",
 			irq, bcl_int_names[perph_data->type],
 			irq_status);
-	of_thermal_handle_trip(perph_data->tz_dev);
+		of_thermal_handle_trip_temp(perph_data->tz_dev,
+				perph_data->status_bit_idx);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -443,6 +457,9 @@ static int bcl_get_devicetree_data(struct platform_device *pdev,
 		return -ENODEV;
 	}
 
+	ibat_use_qg_adc =  of_property_read_bool(dev_node,
+				"qcom,ibat-use-qg-adc-5a");
+
 	return ret;
 }
 
@@ -543,13 +560,14 @@ static void bcl_probe_ibat(struct platform_device *pdev,
 }
 
 static void bcl_lvl_init(struct platform_device *pdev,
-			enum bcl_dev_type type, struct bcl_device *bcl_perph)
+	enum bcl_dev_type type, int sts_bit_idx, struct bcl_device *bcl_perph)
 {
 	struct bcl_peripheral_data *lbat = &bcl_perph->param[type];
 
 	mutex_init(&lbat->state_trans_lock);
 	lbat->type = type;
 	lbat->dev = bcl_perph;
+	lbat->status_bit_idx = sts_bit_idx;
 	bcl_fetch_trip(pdev, type, lbat, bcl_handle_irq);
 	if (lbat->irq_num <= 0)
 		return;
@@ -572,9 +590,9 @@ static void bcl_lvl_init(struct platform_device *pdev,
 static void bcl_probe_lvls(struct platform_device *pdev,
 					struct bcl_device *bcl_perph)
 {
-	bcl_lvl_init(pdev, BCL_LVL0, bcl_perph);
-	bcl_lvl_init(pdev, BCL_LVL1, bcl_perph);
-	bcl_lvl_init(pdev, BCL_LVL2, bcl_perph);
+	bcl_lvl_init(pdev, BCL_LVL0, BCL_IRQ_L0, bcl_perph);
+	bcl_lvl_init(pdev, BCL_LVL1, BCL_IRQ_L1, bcl_perph);
+	bcl_lvl_init(pdev, BCL_LVL2, BCL_IRQ_L2, bcl_perph);
 }
 
 static void bcl_configure_bcl_peripheral(struct bcl_device *bcl_perph)
diff --git a/drivers/thermal/qcom/cx_ipeak_cdev.c b/drivers/thermal/qcom/cx_ipeak_cdev.c
new file mode 100644
index 0000000..cfc45aa
--- /dev/null
+++ b/drivers/thermal/qcom/cx_ipeak_cdev.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#define CXIP_LM_CDEV_DRIVER "cx-ipeak-cooling-device"
+#define CXIP_LM_CDEV_MAX_STATE 1
+
+#define CXIP_LM_VOTE_STATUS       0x0
+#define CXIP_LM_BYPASS            0x4
+#define CXIP_LM_VOTE_CLEAR        0x8
+#define CXIP_LM_VOTE_SET          0xc
+#define CXIP_LM_FEATURE_EN        0x10
+#define CXIP_LM_BYPASS_VAL        0xff20
+#define CXIP_LM_THERM_VOTE_VAL    0x80
+#define CXIP_LM_FEATURE_EN_VAL    0x1
+
+struct cxip_lm_cooling_device {
+	struct thermal_cooling_device	*cool_dev;
+	char				cdev_name[THERMAL_NAME_LENGTH];
+	void				*cx_ip_reg_base;
+	unsigned int			therm_clnt;
+	unsigned int			*bypass_clnts;
+	unsigned int			bypass_clnt_cnt;
+	bool				state;
+};
+
+static void cxip_lm_therm_vote_apply(struct cxip_lm_cooling_device *cxip_dev,
+					bool vote)
+{
+	int vote_offset = 0, val = 0, sts_offset = 0;
+
+	if (!cxip_dev->therm_clnt) {
+		vote_offset = vote ? CXIP_LM_VOTE_SET : CXIP_LM_VOTE_CLEAR;
+		val = CXIP_LM_THERM_VOTE_VAL;
+		sts_offset = CXIP_LM_VOTE_STATUS;
+	} else {
+		vote_offset = cxip_dev->therm_clnt;
+		val = vote ? 0x1 : 0x0;
+		sts_offset = vote_offset;
+	}
+
+	writel_relaxed(val, cxip_dev->cx_ip_reg_base + vote_offset);
+	pr_debug("%s vote for cxip_lm. vote:0x%x\n",
+		vote ? "Applied" : "Cleared",
+		readl_relaxed(cxip_dev->cx_ip_reg_base + sts_offset));
+}
+
+static void cxip_lm_initialize_cxip_hw(struct cxip_lm_cooling_device *cxip_dev)
+{
+	int i = 0;
+
+	/* Set CXIP LM proxy vote for clients who are not participating */
+	if (cxip_dev->bypass_clnt_cnt)
+		for (i = 0; i < cxip_dev->bypass_clnt_cnt; i++)
+			writel_relaxed(0x1, cxip_dev->cx_ip_reg_base +
+					cxip_dev->bypass_clnts[i]);
+	else if (!cxip_dev->therm_clnt)
+		writel_relaxed(CXIP_LM_BYPASS_VAL,
+			cxip_dev->cx_ip_reg_base + CXIP_LM_BYPASS);
+
+	/* Enable CXIP LM HW */
+	writel_relaxed(CXIP_LM_FEATURE_EN_VAL, cxip_dev->cx_ip_reg_base +
+			CXIP_LM_FEATURE_EN);
+}
+
+static int cxip_lm_get_max_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	*state = CXIP_LM_CDEV_MAX_STATE;
+
+	return 0;
+}
+
+static int cxip_lm_set_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct cxip_lm_cooling_device *cxip_dev = cdev->devdata;
+	int ret = 0;
+
+	if (state > CXIP_LM_CDEV_MAX_STATE)
+		state = CXIP_LM_CDEV_MAX_STATE;
+
+	if (cxip_dev->state == state)
+		return 0;
+
+	cxip_lm_therm_vote_apply(cxip_dev, state);
+	cxip_dev->state = state;
+
+	return ret;
+}
+
+static int cxip_lm_get_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct cxip_lm_cooling_device *cxip_dev = cdev->devdata;
+
+	*state = cxip_dev->state;
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops cxip_lm_device_ops = {
+	.get_max_state = cxip_lm_get_max_state,
+	.get_cur_state = cxip_lm_get_cur_state,
+	.set_cur_state = cxip_lm_set_cur_state,
+};
+
+static int cxip_lm_cdev_remove(struct platform_device *pdev)
+{
+	struct cxip_lm_cooling_device *cxip_dev =
+		(struct cxip_lm_cooling_device *)dev_get_drvdata(&pdev->dev);
+
+	if (cxip_dev) {
+		if (cxip_dev->cool_dev)
+			thermal_cooling_device_unregister(cxip_dev->cool_dev);
+
+		if (cxip_dev->cx_ip_reg_base)
+			cxip_lm_therm_vote_apply(cxip_dev->cx_ip_reg_base,
+							false);
+	}
+
+	return 0;
+}
+
+static int cxip_lm_get_devicetree_data(struct platform_device *pdev,
+					struct cxip_lm_cooling_device *cxip_dev,
+					struct device_node *np)
+{
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "qcom,thermal-client-offset",
+			&cxip_dev->therm_clnt);
+	if (ret) {
+		dev_dbg(&pdev->dev,
+			"error for qcom,thermal-client-offset. ret:%d\n",
+			ret);
+		cxip_dev->therm_clnt = 0;
+		ret = 0;
+		return ret;
+	}
+
+	ret = of_property_count_u32_elems(np, "qcom,bypass-client-list");
+	if (ret <= 0) {
+		dev_dbg(&pdev->dev, "Invalid number of clients err:%d\n", ret);
+		ret = 0;
+		return ret;
+	}
+	cxip_dev->bypass_clnt_cnt = ret;
+
+	cxip_dev->bypass_clnts = devm_kcalloc(&pdev->dev,
+				cxip_dev->bypass_clnt_cnt,
+				sizeof(*cxip_dev->bypass_clnts), GFP_KERNEL);
+	if (!cxip_dev->bypass_clnts)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(np, "qcom,bypass-client-list",
+		cxip_dev->bypass_clnts, cxip_dev->bypass_clnt_cnt);
+	if (ret) {
+		dev_dbg(&pdev->dev, "bypass client list err:%d, cnt:%d\n",
+			ret, cxip_dev->bypass_clnt_cnt);
+		cxip_dev->bypass_clnt_cnt = 0;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int cxip_lm_cdev_probe(struct platform_device *pdev)
+{
+	struct cxip_lm_cooling_device *cxip_dev = NULL;
+	int ret = 0;
+	struct device_node *np;
+	struct resource *res = NULL;
+
+	np = dev_of_node(&pdev->dev);
+	if (!np) {
+		dev_err(&pdev->dev,
+			"of node not available for cxip_lm cdev\n");
+		return -EINVAL;
+	}
+
+	cxip_dev = devm_kzalloc(&pdev->dev, sizeof(*cxip_dev), GFP_KERNEL);
+	if (!cxip_dev)
+		return -ENOMEM;
+
+	ret = cxip_lm_get_devicetree_data(pdev, cxip_dev, np);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"cxip_lm platform get resource failed\n");
+		return -ENODEV;
+	}
+
+	cxip_dev->cx_ip_reg_base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!cxip_dev->cx_ip_reg_base) {
+		dev_err(&pdev->dev, "cxip_lm reg remap failed\n");
+		return -ENOMEM;
+	}
+
+	cxip_lm_initialize_cxip_hw(cxip_dev);
+
+	/* Set thermal vote till we get first vote from TF */
+	cxip_dev->state = true;
+	cxip_lm_therm_vote_apply(cxip_dev, cxip_dev->state);
+
+	strlcpy(cxip_dev->cdev_name, np->name, THERMAL_NAME_LENGTH);
+	cxip_dev->cool_dev = thermal_of_cooling_device_register(
+					np, cxip_dev->cdev_name, cxip_dev,
+					&cxip_lm_device_ops);
+	if (IS_ERR(cxip_dev->cool_dev)) {
+		ret = PTR_ERR(cxip_dev->cool_dev);
+		dev_err(&pdev->dev, "cxip_lm cdev register err:%d\n",
+				ret);
+		cxip_dev->cool_dev = NULL;
+		cxip_lm_therm_vote_apply(cxip_dev->cx_ip_reg_base,
+						false);
+		return ret;
+	}
+
+	dev_set_drvdata(&pdev->dev, cxip_dev);
+
+	return ret;
+}
+
+static const struct of_device_id cxip_lm_cdev_of_match[] = {
+	{.compatible = "qcom,cxip-lm-cooling-device", },
+	{}
+};
+
+static struct platform_driver cxip_lm_cdev_driver = {
+	.driver = {
+		.name = CXIP_LM_CDEV_DRIVER,
+		.of_match_table = cxip_lm_cdev_of_match,
+	},
+	.probe = cxip_lm_cdev_probe,
+	.remove = cxip_lm_cdev_remove,
+};
+builtin_platform_driver(cxip_lm_cdev_driver);
diff --git a/drivers/thermal/qcom/msm_isense_cdsp.c b/drivers/thermal/qcom/msm_isense_cdsp.c
new file mode 100644
index 0000000..3078d5a
--- /dev/null
+++ b/drivers/thermal/qcom/msm_isense_cdsp.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#define LIMITS_SMEM_CONFIG        619
+#define PARTITION_SIZE_BYTES      4096
+
+struct limits_isense_cdsp_sysfs {
+	struct kobj_attribute  attr;
+	struct module_kobject *m_kobj;
+};
+
+struct limits_isense_cdsp_smem_data {
+	uint8_t  subsys_cal_done;
+	uint8_t  store_data_in_partition;
+	uint16_t size_of_partition_data;
+} __packed;
+
+static struct limits_isense_cdsp_smem_data  *limits_isense_cdsp_data;
+static struct limits_isense_cdsp_sysfs      *limits_isense_cdsp_sysfs;
+
+static ssize_t limits_isense_cdsp_data_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	if (!limits_isense_cdsp_data)
+		return -ENODATA;
+
+	if (!limits_isense_cdsp_data->subsys_cal_done)
+		return -EAGAIN;
+
+	if (limits_isense_cdsp_data->store_data_in_partition) {
+		if (limits_isense_cdsp_data->size_of_partition_data >
+		    PARTITION_SIZE_BYTES)
+			return -ENOBUFS;
+
+		memcpy(buf,
+		       ((void *)
+		       (&limits_isense_cdsp_data->size_of_partition_data) + 2),
+		       limits_isense_cdsp_data->size_of_partition_data);
+
+		return limits_isense_cdsp_data->size_of_partition_data;
+	}
+
+	return -ENODATA;
+}
+
+static int limits_create_msm_limits_cdsp_sysfs(struct platform_device *pdev)
+{
+	int err = 0;
+	struct module_kobject *m_kobj;
+
+	limits_isense_cdsp_sysfs = devm_kcalloc(&pdev->dev, 1,
+			sizeof(*limits_isense_cdsp_sysfs), GFP_KERNEL);
+	if (!limits_isense_cdsp_sysfs)
+		return PTR_ERR(limits_isense_cdsp_sysfs);
+
+	m_kobj = devm_kcalloc(&pdev->dev, 1,
+			sizeof(*m_kobj), GFP_KERNEL);
+	if (!m_kobj)
+		return PTR_ERR(m_kobj);
+
+	limits_isense_cdsp_sysfs->m_kobj = m_kobj;
+
+	m_kobj->mod = THIS_MODULE;
+	m_kobj->kobj.kset = module_kset;
+
+	err = kobject_init_and_add(&m_kobj->kobj, &module_ktype, NULL,
+				   "%s", KBUILD_MODNAME);
+	if (err) {
+		dev_err(&pdev->dev,
+			"%s: cannot create kobject for %s\n",
+			__func__, KBUILD_MODNAME);
+		goto exit_handler;
+	}
+
+	kobject_get(&m_kobj->kobj);
+
+	if (IS_ERR_OR_NULL(&m_kobj->kobj)) {
+		err = PTR_ERR(&m_kobj->kobj);
+		goto exit_handler;
+	}
+
+	sysfs_attr_init(&limits_isense_cdsp_sysfs->attr.attr);
+	limits_isense_cdsp_sysfs->attr.attr.name = "data";
+	limits_isense_cdsp_sysfs->attr.attr.mode = 0444;
+	limits_isense_cdsp_sysfs->attr.show = limits_isense_cdsp_data_show;
+	limits_isense_cdsp_sysfs->attr.store = NULL;
+
+	sysfs_create_file(&m_kobj->kobj, &limits_isense_cdsp_sysfs->attr.attr);
+	if (err) {
+		dev_err(&pdev->dev, "cannot create sysfs file\n");
+		goto exit_handler;
+	}
+
+	return err;
+
+exit_handler:
+	kobject_del(&m_kobj->kobj);
+
+	return err;
+}
+
+static int limits_isense_cdsp_probe(struct platform_device *pdev)
+{
+	void *smem_ptr = NULL;
+	size_t size = 0;
+	int err;
+
+	if (limits_isense_cdsp_data)
+		return 0;
+
+	smem_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY, LIMITS_SMEM_CONFIG, &size);
+	if (!smem_ptr || !size) {
+		dev_err(&pdev->dev,
+			"Failed to get limits SMEM Address\n");
+		return PTR_ERR(smem_ptr);
+	}
+
+	limits_isense_cdsp_data =
+			(struct limits_isense_cdsp_smem_data *) smem_ptr;
+
+	err = limits_create_msm_limits_cdsp_sysfs(pdev);
+
+	return err;
+}
+
+static int limits_isense_cdsp_remove(struct platform_device *pdev)
+{
+	limits_isense_cdsp_data = NULL;
+
+	if (limits_isense_cdsp_sysfs && limits_isense_cdsp_sysfs->m_kobj) {
+		sysfs_remove_file(&limits_isense_cdsp_sysfs->m_kobj->kobj,
+				  &limits_isense_cdsp_sysfs->attr.attr);
+		kobject_del(&limits_isense_cdsp_sysfs->m_kobj->kobj);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id limits_isense_cdsp_match[] = {
+	{ .compatible = "qcom,msm-limits-cdsp", },
+	{},
+};
+
+static struct platform_driver limits_isense_cdsp_driver = {
+	.probe = limits_isense_cdsp_probe,
+	.remove = limits_isense_cdsp_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = limits_isense_cdsp_match,
+	},
+};
+
+int __init limits_isense_cdsp_late_init(void)
+{
+	int err;
+
+	err = platform_driver_register(&limits_isense_cdsp_driver);
+	if (err)
+		pr_err("Failed to register limits_isense_cdsp platform driver: %d\n",
+			err);
+
+	return err;
+}
+late_initcall(limits_isense_cdsp_late_init);
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
index cd67695..ff1705a 100644
--- a/drivers/thermal/qcom/qmi_cooling.c
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -62,6 +62,10 @@ static struct qmi_dev_info device_clients[] = {
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
+		.dev_name = "pa_fr1",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
 		.dev_name = "cx_vdd_limit",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
@@ -86,6 +90,10 @@ static struct qmi_dev_info device_clients[] = {
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
+		.dev_name = "charge_state",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
 		.dev_name = "mmw0",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
@@ -102,19 +110,19 @@ static struct qmi_dev_info device_clients[] = {
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
-		.dev_name = "modem_skin0",
+		.dev_name = "mmw_skin0",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
-		.dev_name = "modem_skin1",
+		.dev_name = "mmw_skin1",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
-		.dev_name = "modem_skin2",
+		.dev_name = "mmw_skin2",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
-		.dev_name = "modem_skin3",
+		.dev_name = "mmw_skin3",
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
@@ -210,6 +218,7 @@ static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
 			state, qmi_cdev->cdev_name, ret);
 		goto qmi_send_exit;
 	}
+	ret = 0;
 	pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
 
 qmi_send_exit:
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index b07f2a7..3a8d9ae 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -54,6 +54,12 @@ static unsigned long get_target_state(struct thermal_instance *instance,
 	unsigned long next_target;
 
 	/*
+	 * If the throttle condition is not reached and there is no
+	 * previous mitigaiton request, then there is nothing to compute.
+	 */
+	if (!throttle && instance->target == THERMAL_NO_TARGET)
+		return THERMAL_NO_TARGET;
+	/*
 	 * We keep this instance the way it is by default.
 	 * Otherwise, we use the current state of the
 	 * cdev in use to determine the next_target.
@@ -90,7 +96,8 @@ static unsigned long get_target_state(struct thermal_instance *instance,
 		break;
 	case THERMAL_TREND_DROPPING:
 	case THERMAL_TREND_STABLE:
-		if (cur_state <= instance->lower) {
+		if (cur_state <= instance->lower ||
+			instance->target <= instance->lower) {
 			if (!throttle)
 				next_target = THERMAL_NO_TARGET;
 		} else {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04ff104..cb384fc 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -306,7 +306,7 @@ static void thermal_zone_device_set_polling(struct workqueue_struct *queue,
 		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
-		cancel_delayed_work(&tz->poll_queue);
+		cancel_delayed_work_sync(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 40c69a5..dd5d8ee 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
 thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
 {
 	struct thermal_hwmon_device *hwmon;
+	char type[THERMAL_NAME_LENGTH];
 
 	mutex_lock(&thermal_hwmon_list_lock);
-	list_for_each_entry(hwmon, &thermal_hwmon_list, node)
-		if (!strcmp(hwmon->type, tz->type)) {
+	list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
+		strcpy(type, tz->type);
+		strreplace(type, '-', '_');
+		if (!strcmp(hwmon->type, type)) {
 			mutex_unlock(&thermal_hwmon_list_lock);
 			return hwmon;
 		}
+	}
 	mutex_unlock(&thermal_hwmon_list_lock);
 
 	return NULL;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index d7037a7..cb0e13f 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -898,6 +898,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
 {
 	struct cooling_dev_stats *stats = cdev->stats;
 
+	if (!stats)
+		return;
+
 	spin_lock(&stats->lock);
 
 	if (stats->state == new_state)
@@ -919,6 +922,9 @@ static ssize_t total_trans_show(struct device *dev,
 	struct cooling_dev_stats *stats = cdev->stats;
 	int ret;
 
+	if (!stats)
+		return -ENODEV;
+
 	spin_lock(&stats->lock);
 	ret = sprintf(buf, "%u\n", stats->total_trans);
 	spin_unlock(&stats->lock);
@@ -935,6 +941,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
 	ssize_t len = 0;
 	int i;
 
+	if (!stats)
+		return -ENODEV;
+
 	spin_lock(&stats->lock);
 	update_time_in_state(stats);
 
@@ -953,8 +962,12 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
 {
 	struct thermal_cooling_device *cdev = to_cooling_device(dev);
 	struct cooling_dev_stats *stats = cdev->stats;
-	int i, states = stats->max_states;
+	int i, states;
 
+	if (!stats)
+		return -ENODEV;
+
+	states = stats->max_states;
 	spin_lock(&stats->lock);
 
 	stats->total_trans = 0;
@@ -978,6 +991,9 @@ static ssize_t trans_table_show(struct device *dev,
 	ssize_t len = 0;
 	int i, j;
 
+	if (!stats)
+		return -ENODEV;
+
 	len += snprintf(buf + len, PAGE_SIZE - len, " From  :    To\n");
 	len += snprintf(buf + len, PAGE_SIZE - len, "       : ");
 	for (i = 0; i < stats->max_states; i++) {
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1cb80fe..dd8949e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1270,7 +1270,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
 
 			atmel_port->hd_start_rx = false;
 			atmel_start_rx(port);
-			return;
 		}
 
 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a413ecb..ce40ff0 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -165,11 +165,14 @@ struct msm_geni_serial_port {
 	void *ipc_log_rx;
 	void *ipc_log_pwr;
 	void *ipc_log_misc;
+	void *console_log;
 	unsigned int cur_baud;
 	int ioctl_count;
 	int edge_count;
 	bool manual_flow;
 	struct msm_geni_serial_ver_info ver_info;
+	u32 cur_tx_remaining;
+	bool startup_in_progress;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -479,6 +482,7 @@ static struct msm_geni_serial_port *get_port_from_line(int line,
 	struct msm_geni_serial_port *port = NULL;
 
 	if (is_console) {
+		/* Max 1 port supported as of now */
 		if ((line < 0) || (line >= GENI_UART_CONS_PORTS))
 			return ERR_PTR(-ENXIO);
 		port = &msm_geni_console_port;
@@ -771,10 +775,13 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
 {
 	struct uart_port *uport;
 	struct msm_geni_serial_port *port;
-	int locked = 1;
+	bool locked = true;
 	unsigned long flags;
+	unsigned int geni_status;
+	int irq_en;
 
-	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
+	/* Max 1 port supported as of now */
+	WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
 
 	port = get_port_from_line(co->index, true);
 	if (IS_ERR_OR_NULL(port))
@@ -786,10 +793,44 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
 	else
 		spin_lock_irqsave(&uport->lock, flags);
 
-	if (locked) {
-		__msm_geni_serial_console_write(uport, s, count);
-		spin_unlock_irqrestore(&uport->lock, flags);
+	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+
+	/* Cancel the current write to log the fault */
+	if (!locked) {
+		geni_cancel_m_cmd(uport->membase);
+		if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+						M_CMD_CANCEL_EN, true)) {
+			geni_abort_m_cmd(uport->membase);
+			msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+							M_CMD_ABORT_EN, true);
+			geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
+							SE_GENI_M_IRQ_CLEAR);
+		}
+		writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+							SE_GENI_M_IRQ_CLEAR);
+	} else if ((geni_status & M_GENI_CMD_ACTIVE) &&
+						!port->cur_tx_remaining) {
+		/* It seems we can interrupt existing transfers unless all data
+		 * has been sent, in which case we need to look for done first.
+		 */
+		msm_geni_serial_poll_cancel_tx(uport);
+
+		/* Enable WM interrupt for every new console write op */
+		if (uart_circ_chars_pending(&uport->state->xmit)) {
+			irq_en = geni_read_reg_nolog(uport->membase,
+						SE_GENI_M_IRQ_EN);
+			geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+		}
 	}
+
+	__msm_geni_serial_console_write(uport, s, count);
+
+	if (port->cur_tx_remaining)
+		msm_geni_serial_setup_tx(uport, port->cur_tx_remaining);
+
+	if (locked)
+		spin_unlock_irqrestore(&uport->lock, flags);
 }
 
 static int handle_rx_console(struct uart_port *uport,
@@ -1042,6 +1083,9 @@ static void start_rx_sequencer(struct uart_port *uport)
 	int ret;
 	u32 geni_se_param = UART_PARAM_RFR_OPEN;
 
+	if (port->startup_in_progress)
+		return;
+
 	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
 	if (geni_status & S_GENI_CMD_ACTIVE) {
 		if (port->xfer_mode == SE_DMA && !port->rx_dma) {
@@ -1129,6 +1173,38 @@ static void msm_geni_serial_rx_fsm_rst(struct uart_port *uport)
 	geni_write_reg_nolog(rx_irq_en, uport->membase, SE_DMA_RX_IRQ_EN_SET);
 }
 
+static void msm_geni_serial_set_manual_flow(bool enable,
+					struct msm_geni_serial_port *port)
+{
+	u32 uart_manual_rfr = 0;
+
+	if (!enable) {
+		uart_manual_rfr |= (UART_MANUAL_RFR_EN);
+		geni_write_reg_nolog(uart_manual_rfr, port->uport.membase,
+						SE_UART_MANUAL_RFR);
+		/* UART FW needs delay per HW experts recommendation */
+		udelay(10);
+
+		uart_manual_rfr |= (UART_RFR_NOT_READY);
+		geni_write_reg_nolog(uart_manual_rfr, port->uport.membase,
+						SE_UART_MANUAL_RFR);
+		/*
+		 * Ensure that the manual flow on writes go through before
+		 * doing a stop_rx.
+		 */
+		mb();
+		IPC_LOG_MSG(port->ipc_log_pwr,
+			"%s: Manual Flow Enabled, HW Flow OFF\n", __func__);
+	} else {
+		geni_write_reg_nolog(0, port->uport.membase,
+						SE_UART_MANUAL_RFR);
+		/* Ensure that the manual flow off writes go through */
+		mb();
+		IPC_LOG_MSG(port->ipc_log_pwr,
+			"%s: Manual Flow Disabled, HW Flow ON\n", __func__);
+	}
+}
+
 static void stop_rx_sequencer(struct uart_port *uport)
 {
 	unsigned int geni_s_irq_en;
@@ -1266,41 +1342,61 @@ static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx)
 	return ret;
 }
 
-static int msm_geni_serial_handle_tx(struct uart_port *uport)
+static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
+		bool active)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	struct circ_buf *xmit = &uport->state->xmit;
-	unsigned int avail_fifo_bytes = 0;
+	int avail_fifo_bytes = 0;
 	unsigned int bytes_remaining = 0;
+	unsigned int pending;
 	int i = 0;
 	unsigned int tx_fifo_status;
 	unsigned int xmit_size;
 	unsigned int fifo_width_bytes =
 		(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
 	int temp_tail = 0;
+	int irq_en;
 
-	xmit_size = uart_circ_chars_pending(xmit);
 	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
-	/* Both FIFO and framework buffer are drained */
-	if (!xmit_size && !tx_fifo_status) {
+
+	/* Complete the current tx command before taking newly added data */
+	if (active)
+		pending = msm_port->cur_tx_remaining;
+	else
+		pending = uart_circ_chars_pending(xmit);
+
+	/* All data has been transmitted and acknowledged as received */
+	if (!pending && !tx_fifo_status && done) {
 		msm_geni_serial_stop_tx(uport);
 		goto exit_handle_tx;
 	}
 
-	avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) *
-							fifo_width_bytes;
-	temp_tail = xmit->tail & (UART_XMIT_SIZE - 1);
+	avail_fifo_bytes = msm_port->tx_fifo_depth - (tx_fifo_status &
+								TX_FIFO_WC);
+	avail_fifo_bytes *= fifo_width_bytes;
+	if (avail_fifo_bytes < 0)
+		avail_fifo_bytes = 0;
 
-	if (xmit_size > (UART_XMIT_SIZE - temp_tail))
-		xmit_size = (UART_XMIT_SIZE - temp_tail);
-	if (xmit_size > avail_fifo_bytes)
-		xmit_size = avail_fifo_bytes;
+	temp_tail = xmit->tail;
+	xmit_size = min_t(unsigned int, avail_fifo_bytes, pending);
 	if (!xmit_size)
 		goto exit_handle_tx;
 
-	msm_geni_serial_setup_tx(uport, xmit_size);
+	if (!msm_port->cur_tx_remaining) {
+		msm_geni_serial_setup_tx(uport, pending);
+		msm_port->cur_tx_remaining = pending;
+
+		/* Re-enable WM interrupt when starting new transfer */
+		irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+		if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
+			geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+	}
+
 	bytes_remaining = xmit_size;
+
 	while (i < xmit_size) {
 		unsigned int tx_bytes;
 		unsigned int buf = 0;
@@ -1309,20 +1405,39 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport)
 		tx_bytes = ((bytes_remaining < fifo_width_bytes) ?
 					bytes_remaining : fifo_width_bytes);
 
-		for (c = 0; c < tx_bytes ; c++)
-			buf |= (xmit->buf[temp_tail + c] << (c * 8));
+		for (c = 0; c < tx_bytes ; c++) {
+			buf |= (xmit->buf[temp_tail++] << (c * 8));
+			temp_tail &= UART_XMIT_SIZE - 1;
+		}
+
 		geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
+
 		i += tx_bytes;
 		bytes_remaining -= tx_bytes;
 		uport->icount.tx += tx_bytes;
-		temp_tail += tx_bytes;
+		msm_port->cur_tx_remaining -= tx_bytes;
 		/* Ensure FIFO write goes through */
 		wmb();
 	}
-	xmit->tail = temp_tail & (UART_XMIT_SIZE - 1);
-	if (uart_console(uport))
-		msm_geni_serial_poll_cancel_tx(uport);
+	xmit->tail = temp_tail;
+
+	/*
+	 * The tx fifo watermark is level triggered and latched. Though we had
+	 * cleared it in qcom_geni_serial_isr it will have already reasserted
+	 * so we must clear it again here after our writes.
+	 */
+	geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
+						SE_GENI_M_IRQ_CLEAR);
+
 exit_handle_tx:
+	if (!msm_port->cur_tx_remaining) {
+		irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+		/* Clear WM interrupt post each transfer completion */
+		if (irq_en & M_TX_FIFO_WATERMARK_EN)
+			geni_write_reg_nolog(irq_en & ~M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+	}
+
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(uport);
 	return 0;
@@ -1418,13 +1533,17 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
 	struct uart_port *uport = dev;
 	unsigned long flags;
 	unsigned int m_irq_en;
+	unsigned int geni_status;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	struct tty_port *tport = &uport->state->port;
 	bool drop_rx = false;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	if (uart_console(uport) && uport->suspended)
+	if (uart_console(uport) && uport->suspended) {
+		IPC_LOG_MSG(msm_port->console_log,
+			"%s. Console in suspend state\n", __func__);
 		goto exit_geni_serial_isr;
+	}
 	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
 		dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
 		IPC_LOG_MSG(msm_port->ipc_log_misc,
@@ -1435,10 +1554,15 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
 						SE_GENI_M_IRQ_STATUS);
 	s_irq_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_STATUS);
+	if (uart_console(uport))
+		IPC_LOG_MSG(msm_port->console_log,
+			"%s. sirq 0x%x mirq:0x%x\n", __func__, s_irq_status,
+								m_irq_status);
 	m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
 	dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT);
 	dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT);
+	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
 
 	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
 	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
@@ -1459,7 +1583,9 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
 	if (!dma) {
 		if ((m_irq_status & m_irq_en) &
 		    (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
-			msm_geni_serial_handle_tx(uport);
+			msm_geni_serial_handle_tx(uport,
+					m_irq_status & M_CMD_DONE_EN,
+					geni_status & M_GENI_CMD_ACTIVE);
 
 		if ((s_irq_status & S_GP_IRQ_0_EN) ||
 			(s_irq_status & S_GP_IRQ_1_EN)) {
@@ -1600,6 +1726,7 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	unsigned long flags;
+	int ret;
 
 	/* Stop the console before stopping the current tx */
 	if (uart_console(uport)) {
@@ -1628,7 +1755,13 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
 			}
 			msm_port->ioctl_count = 0;
 		}
-		msm_geni_serial_power_off(uport);
+
+		ret = pm_runtime_put_sync_suspend(uport->dev);
+		if (ret) {
+			IPC_LOG_MSG(msm_port->ipc_log_pwr,
+			"%s: Failed to suspend:%d\n", __func__, ret);
+		}
+
 		if (msm_port->wakeup_irq > 0) {
 			irq_set_irq_wake(msm_port->wakeup_irq, 0);
 			disable_irq(msm_port->wakeup_irq);
@@ -1722,6 +1855,8 @@ static int msm_geni_serial_startup(struct uart_port *uport)
 	scnprintf(msm_port->name, sizeof(msm_port->name), "msm_serial_geni%d",
 				uport->line);
 
+	msm_port->startup_in_progress = true;
+
 	if (likely(!uart_console(uport))) {
 		ret = msm_geni_serial_power_on(&msm_port->uport);
 		if (ret) {
@@ -1771,6 +1906,8 @@ static int msm_geni_serial_startup(struct uart_port *uport)
 exit_startup:
 	if (likely(!uart_console(uport)))
 		msm_geni_serial_power_off(&msm_port->uport);
+	msm_port->startup_in_progress = false;
+
 	return ret;
 }
 
@@ -1869,6 +2006,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
 							__func__, ret);
 			return;
 		}
+		disable_irq(uport->irq);
+		msm_geni_serial_set_manual_flow(false, port);
 	}
 	/* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
 	 * and FSM_RESET. This also has a potential race with the dma_map/unmap
@@ -1965,8 +2104,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
 
 	if (termios->c_cflag & CRTSCTS) {
 		geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
-		IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual flow off\n",
-				__func__);
+		IPC_LOG_MSG(port->ipc_log_misc,
+			"%s: Manual flow Disabled, HW Flow ON\n", __func__);
 	}
 
 	IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
@@ -1977,6 +2116,10 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
 	IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n",
 				bits_per_char, stop_bit_len);
 exit_set_termios:
+	if (!uart_console(uport)) {
+		msm_geni_serial_set_manual_flow(true, port);
+		enable_irq(uport->irq);
+	}
 	msm_geni_serial_start_rx(uport);
 	if (!uart_console(uport))
 		msm_geni_serial_power_off(uport);
@@ -2086,7 +2229,8 @@ static int __init msm_geni_console_setup(struct console *co, char *options)
 	int flow = 'n';
 	int ret = 0;
 
-	if (unlikely(co->index >= GENI_UART_NR_PORTS  || co->index < 0))
+	/* Max 1 port supported as of now */
+	if (unlikely(co->index >= GENI_UART_CONS_PORTS  || co->index < 0))
 		return -ENXIO;
 
 	dev_port = get_port_from_line(co->index, true);
@@ -2191,7 +2335,6 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 	 * it else we could end up in data loss scenarios.
 	 */
 	msm_geni_serial_poll_cancel_tx(uport);
-	msm_geni_serial_abort_rx(uport);
 
 	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
 	geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
@@ -2203,14 +2346,8 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 							SE_UART_TX_TRANS_CFG);
 	geni_write_reg_nolog(tx_parity_cfg, uport->membase,
 							SE_UART_TX_PARITY_CFG);
-	geni_write_reg_nolog(rx_trans_cfg, uport->membase,
-							SE_UART_RX_TRANS_CFG);
-	geni_write_reg_nolog(rx_parity_cfg, uport->membase,
-							SE_UART_RX_PARITY_CFG);
 	geni_write_reg_nolog(bits_per_char, uport->membase,
 							SE_UART_TX_WORD_LEN);
-	geni_write_reg_nolog(bits_per_char, uport->membase,
-							SE_UART_RX_WORD_LEN);
 	geni_write_reg_nolog(stop_bit, uport->membase, SE_UART_TX_STOP_BIT_LEN);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
@@ -2252,7 +2389,7 @@ static struct uart_driver msm_geni_console_driver = {
 	.owner = THIS_MODULE,
 	.driver_name = "msm_geni_console",
 	.dev_name = "ttyMSM",
-	.nr =  GENI_UART_NR_PORTS,
+	.nr =  GENI_UART_CONS_PORTS,
 	.cons = &cons_ops,
 };
 #else
@@ -2269,14 +2406,13 @@ static void console_unregister(struct uart_driver *drv)
 static void msm_geni_serial_debug_init(struct uart_port *uport, bool console)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	char name[30];
 
 	msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
 	if (IS_ERR_OR_NULL(msm_port->dbg))
 		dev_err(uport->dev, "Failed to create dbg dir\n");
 
 	if (!console) {
-		char name[30];
-
 		memset(name, 0, sizeof(name));
 		if (!msm_port->ipc_log_rx) {
 			scnprintf(name, sizeof(name), "%s%s",
@@ -2313,6 +2449,16 @@ static void msm_geni_serial_debug_init(struct uart_port *uport, bool console)
 			if (!msm_port->ipc_log_misc)
 				dev_info(uport->dev, "Err in Misc IPC Log\n");
 		}
+	} else {
+		memset(name, 0, sizeof(name));
+		if (!msm_port->console_log) {
+			scnprintf(name, sizeof(name), "%s%s",
+					dev_name(uport->dev), "_console");
+			msm_port->console_log = ipc_log_context_create(
+					IPC_LOG_MISC_PAGES, name, 0);
+			if (!msm_port->console_log)
+				dev_info(uport->dev, "Err in Misc IPC Log\n");
+		}
 	}
 }
 
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 828f114..2774af8 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -232,7 +232,7 @@ static inline void sprd_rx(struct uart_port *port)
 
 		if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
 			SPRD_LSR_FE | SPRD_LSR_OE))
-			if (handle_lsr_errors(port, &lsr, &flag))
+			if (handle_lsr_errors(port, &flag, &lsr))
 				continue;
 		if (uart_handle_sysrq_char(port, ch))
 			continue;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 98d3ead..8df3058 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -837,7 +837,8 @@ static int __init ulite_init(void)
 static void __exit ulite_exit(void)
 {
 	platform_driver_unregister(&ulite_platform_driver);
-	uart_unregister_driver(&ulite_uart_driver);
+	if (ulite_uart_driver.state)
+		uart_unregister_driver(&ulite_uart_driver);
 }
 
 module_init(ulite_init);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ac8025c..b4838ab 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1460,6 +1460,10 @@ static void release_one_tty(struct work_struct *work)
 
 	put_pid(tty->pgrp);
 	put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	if (tty->echo_delayed_work.work.func)
+		cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 	free_tty_struct(tty);
 }
 
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4d147f89..7c7217a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -840,8 +840,13 @@ int tty_ldisc_init(struct tty_struct *tty)
  */
 void tty_ldisc_deinit(struct tty_struct *tty)
 {
-	if (tty->ldisc)
+	if (tty->ldisc) {
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+		if (tty->echo_delayed_work.work.func)
+			cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 		tty_ldisc_put(tty->ldisc);
+	}
 	tty->ldisc = NULL;
 }
 
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index cc7c856..169ccfa 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -708,12 +708,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
 	struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
 	unsigned long flags;
 
-	spin_lock_irqsave(&ci->lock, flags);
-	ci->gadget.speed = USB_SPEED_UNKNOWN;
-	ci->remote_wakeup = 0;
-	ci->suspended = 0;
-	spin_unlock_irqrestore(&ci->lock, flags);
-
 	/* flush all endpoints */
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_fifo_flush(ep);
@@ -731,6 +725,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
 		ci->status = NULL;
 	}
 
+	spin_lock_irqsave(&ci->lock, flags);
+	ci->gadget.speed = USB_SPEED_UNKNOWN;
+	ci->remote_wakeup = 0;
+	ci->suspended = 0;
+	spin_unlock_irqrestore(&ci->lock, flags);
+
 	return 0;
 }
 
@@ -1302,6 +1302,10 @@ static int ep_disable(struct usb_ep *ep)
 		return -EBUSY;
 
 	spin_lock_irqsave(hwep->lock, flags);
+	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+		spin_unlock_irqrestore(hwep->lock, flags);
+		return 0;
+	}
 
 	/* only internal SW should disable ctrl endpts */
 
@@ -1391,6 +1395,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
 		return -EINVAL;
 
 	spin_lock_irqsave(hwep->lock, flags);
+	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+		spin_unlock_irqrestore(hwep->lock, flags);
+		return 0;
+	}
 	retval = _ep_queue(ep, req, gfp_flags);
 	spin_unlock_irqrestore(hwep->lock, flags);
 	return retval;
@@ -1414,8 +1422,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 		return -EINVAL;
 
 	spin_lock_irqsave(hwep->lock, flags);
-
-	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+	if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
+		hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
 	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
 		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1486,6 +1494,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
 	}
 
 	spin_lock_irqsave(hwep->lock, flags);
+	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+		spin_unlock_irqrestore(hwep->lock, flags);
+		return;
+	}
 
 	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
@@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
 	int ret = 0;
 
 	spin_lock_irqsave(&ci->lock, flags);
+	if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
+		spin_unlock_irqrestore(&ci->lock, flags);
+		return 0;
+	}
 	if (!ci->remote_wakeup) {
 		ret = -EOPNOTSUPP;
 		goto out;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5b442bc..59675cc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1333,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf,
 	tty_port_init(&acm->port);
 	acm->port.ops = &acm_port_ops;
 
-	minor = acm_alloc_minor(acm);
-	if (minor < 0)
-		goto alloc_fail1;
-
 	ctrlsize = usb_endpoint_maxp(epctrl);
 	readsize = usb_endpoint_maxp(epread) *
 				(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1344,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf,
 	acm->writesize = usb_endpoint_maxp(epwrite) * 20;
 	acm->control = control_interface;
 	acm->data = data_interface;
+
+	usb_get_intf(acm->control); /* undone in destruct() */
+
+	minor = acm_alloc_minor(acm);
+	if (minor < 0)
+		goto alloc_fail1;
+
 	acm->minor = minor;
 	acm->dev = usb_dev;
 	if (h.usb_cdc_acm_descriptor)
@@ -1490,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf,
 	usb_driver_claim_interface(&acm_driver, data_interface, acm);
 	usb_set_intfdata(data_interface, acm);
 
-	usb_get_intf(control_interface);
 	tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
 			&control_interface->dev);
 	if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index bec581f..b8a1fde 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
 {
 	struct wdm_device *desc = file->private_data;
 
-	wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+	wait_event(desc->wait,
+			/*
+			 * needs both flags. We cannot do with one
+			 * because resetting it would cause a race
+			 * with write() yet we need to signal
+			 * a disconnect
+			 */
+			!test_bit(WDM_IN_USE, &desc->flags) ||
+			test_bit(WDM_DISCONNECTING, &desc->flags));
 
 	/* cannot dereference desc->intf if WDM_DISCONNECTING */
-	if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+	if (test_bit(WDM_DISCONNECTING, &desc->flags))
+		return -ENODEV;
+	if (desc->werr < 0)
 		dev_err(&desc->intf->dev, "Error in flush path: %d\n",
 			desc->werr);
 
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
 	spin_lock_irqsave(&desc->iuspin, flags);
 	set_bit(WDM_DISCONNECTING, &desc->flags);
 	set_bit(WDM_READ, &desc->flags);
-	/* to terminate pending flushes */
-	clear_bit(WDM_IN_USE, &desc->flags);
 	spin_unlock_irqrestore(&desc->iuspin, flags);
 	wake_up_all(&desc->wait);
 	mutex_lock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 407a7a6..4a80103 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
 	kfree(usblp->readbuf);
 	kfree(usblp->device_id_string);
 	kfree(usblp->statusbuf);
+	usb_put_intf(usblp->intf);
 	kfree(usblp);
 }
 
@@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file)
 
 	mutex_lock(&usblp_mutex);
 	usblp->used = 0;
-	if (usblp->present) {
+	if (usblp->present)
 		usblp_unlink_urbs(usblp);
-		usb_autopm_put_interface(usblp->intf);
-	} else		/* finish cleanup from disconnect */
+
+	usb_autopm_put_interface(usblp->intf);
+
+	if (!usblp->present)		/* finish cleanup from disconnect */
 		usblp_cleanup(usblp);
 	mutex_unlock(&usblp_mutex);
 	return 0;
@@ -1105,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
 	init_waitqueue_head(&usblp->wwait);
 	init_usb_anchor(&usblp->urbs);
 	usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-	usblp->intf = intf;
+	usblp->intf = usb_get_intf(intf);
 
 	/* Malloc device ID string buffer to the largest expected length,
 	 * since we can re-query it on an ioctl and a dynamic string
@@ -1194,6 +1197,7 @@ static int usblp_probe(struct usb_interface *intf,
 	kfree(usblp->readbuf);
 	kfree(usblp->statusbuf);
 	kfree(usblp->device_id_string);
+	usb_put_intf(usblp->intf);
 	kfree(usblp);
 abort_ret:
 	return retval;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 868bc4c..08e6fea 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -928,7 +928,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 	struct usb_bos_descriptor *bos;
 	struct usb_dev_cap_header *cap;
 	struct usb_ssp_cap_descriptor *ssp_cap;
-	unsigned char *buffer;
+	unsigned char *buffer, *buffer0;
 	int length, total_len, num, i, ssac;
 	__u8 cap_type;
 	int ret;
@@ -973,10 +973,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 			ret = -ENOMSG;
 		goto err;
 	}
+
+	buffer0 = buffer;
 	total_len -= length;
+	buffer += length;
 
 	for (i = 0; i < num; i++) {
-		buffer += length;
 		cap = (struct usb_dev_cap_header *)buffer;
 
 		if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -990,8 +992,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 			break;
 		}
 
-		total_len -= length;
-
 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
 			dev_warn(ddev, "descriptor type invalid, skip\n");
 			continue;
@@ -1035,7 +1035,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 		default:
 			break;
 		}
+
+		total_len -= length;
+		buffer += length;
 	}
+	dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
 
 	return 0;
 
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f7..558890a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
 		intf->minor = minor;
 		break;
 	}
-	up_write(&minor_rwsem);
-	if (intf->minor < 0)
+	if (intf->minor < 0) {
+		up_write(&minor_rwsem);
 		return -EXFULL;
+	}
 
 	/* create a usb class device for this usb interface */
 	snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
 				      MKDEV(USB_MAJOR, minor), class_driver,
 				      "%s", kbasename(name));
 	if (IS_ERR(intf->usb_dev)) {
-		down_write(&minor_rwsem);
 		usb_minors[minor] = NULL;
 		intf->minor = -1;
-		up_write(&minor_rwsem);
 		retval = PTR_ERR(intf->usb_dev);
 	}
+	up_write(&minor_rwsem);
 	return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
 		return;
 
 	dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 
 	down_write(&minor_rwsem);
 	usb_minors[intf->minor] = NULL;
 	up_write(&minor_rwsem);
 
-	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 	intf->usb_dev = NULL;
 	intf->minor = -1;
 	destroy_usb_class();
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 0343246..7537681 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 		/* EHCI, OHCI */
 		hcd->rsrc_start = pci_resource_start(dev, 0);
 		hcd->rsrc_len = pci_resource_len(dev, 0);
-		if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
-				driver->description)) {
+		if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
+				hcd->rsrc_len, driver->description)) {
 			dev_dbg(&dev->dev, "controller already in use\n");
 			retval = -EBUSY;
 			goto put_hcd;
 		}
-		hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+		hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+				hcd->rsrc_len);
 		if (hcd->regs == NULL) {
 			dev_dbg(&dev->dev, "error mapping memory\n");
 			retval = -EFAULT;
-			goto release_mem_region;
+			goto put_hcd;
 		}
 
 	} else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
 			hcd->rsrc_start = pci_resource_start(dev, region);
 			hcd->rsrc_len = pci_resource_len(dev, region);
-			if (request_region(hcd->rsrc_start, hcd->rsrc_len,
-					driver->description))
+			if (devm_request_region(&dev->dev, hcd->rsrc_start,
+					hcd->rsrc_len, driver->description))
 				break;
 		}
 		if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	}
 
 	if (retval != 0)
-		goto unmap_registers;
+		goto put_hcd;
 	device_wakeup_enable(hcd->self.controller);
 
 	if (pci_dev_run_wake(dev))
 		pm_runtime_put_noidle(&dev->dev);
 	return retval;
 
-unmap_registers:
-	if (driver->flags & HCD_MEMORY) {
-		iounmap(hcd->regs);
-release_mem_region:
-		release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-	} else
-		release_region(hcd->rsrc_start, hcd->rsrc_len);
 put_hcd:
 	usb_put_hcd(hcd);
 disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
 		dev_set_drvdata(&dev->dev, NULL);
 		up_read(&companions_rwsem);
 	}
-
-	if (hcd->driver->flags & HCD_MEMORY) {
-		iounmap(hcd->regs);
-		release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-	} else {
-		release_region(hcd->rsrc_start, hcd->rsrc_len);
-	}
-
 	usb_put_hcd(hcd);
 	pci_disable_device(dev);
 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4020ce8d..6a76e99 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1494,6 +1494,175 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
 EXPORT_SYMBOL_GPL(usb_set_interface);
 
 /**
+ * usb_set_interface_timeout - Makes a particular alternate setting be current
+ * and allows to set a timeout value for this control transfer.
+ * @dev: the device whose interface is being updated
+ * @interface: the interface being updated
+ * @alternate: the setting being chosen.
+ * @timeout: timeout value in ms (non-zero), before which this transfer should
+ * complete.
+ * Context: !in_interrupt ()
+ *
+ * This is used to enable data transfers on interfaces that may not
+ * be enabled by default.  Not all devices support such configurability.
+ * Only the driver bound to an interface may change its setting.
+ *
+ * Within any given configuration, each interface may have several
+ * alternative settings.  These are often used to control levels of
+ * bandwidth consumption.  For example, the default setting for a high
+ * speed interrupt endpoint may not send more than 64 bytes per microframe,
+ * while interrupt transfers of up to 3KBytes per microframe are legal.
+ * Also, isochronous endpoints may never be part of an
+ * interface's default setting.  To access such bandwidth, alternate
+ * interface settings must be made current.
+ *
+ * Note that in the Linux USB subsystem, bandwidth associated with
+ * an endpoint in a given alternate setting is not reserved until an URB
+ * is submitted that needs that bandwidth.  Some other operating systems
+ * allocate bandwidth early, when a configuration is chosen.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ * Also, drivers must not change altsettings while urbs are scheduled for
+ * endpoints in that interface; all such urbs must first be completed
+ * (perhaps forced by unlinking).
+ *
+ * Return: Zero on success, or else the status code returned by the
+ * underlying usb_control_msg() call.
+ */
+int usb_set_interface_timeout(struct usb_device *dev, int interface,
+	int alternate, unsigned long timeout_ms)
+{
+	struct usb_interface *iface;
+	struct usb_host_interface *alt;
+	struct usb_hcd *hcd = bus_to_hcd(dev->bus);
+	int i, ret, manual = 0;
+	unsigned int epaddr;
+	unsigned int pipe;
+
+	if (dev->state == USB_STATE_SUSPENDED)
+		return -EHOSTUNREACH;
+
+	iface = usb_ifnum_to_if(dev, interface);
+	if (!iface) {
+		dev_dbg(&dev->dev, "selecting invalid interface %d\n",
+			interface);
+		return -EINVAL;
+	}
+	if (iface->unregistering)
+		return -ENODEV;
+
+	alt = usb_altnum_to_altsetting(iface, alternate);
+	if (!alt) {
+		dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
+			 alternate);
+		return -EINVAL;
+	}
+
+	/* Make sure we have enough bandwidth for this alternate interface.
+	 * Remove the current alt setting and add the new alt setting.
+	 */
+	mutex_lock(hcd->bandwidth_mutex);
+	/* Disable LPM, and re-enable it once the new alt setting is installed,
+	 * so that the xHCI driver can recalculate the U1/U2 timeouts.
+	 */
+	if (usb_disable_lpm(dev)) {
+		dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__);
+		mutex_unlock(hcd->bandwidth_mutex);
+		return -ENOMEM;
+	}
+	/* Changing alt-setting also frees any allocated streams */
+	for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
+		iface->cur_altsetting->endpoint[i].streams = 0;
+
+	ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
+	if (ret < 0) {
+		dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
+				alternate);
+		usb_enable_lpm(dev);
+		mutex_unlock(hcd->bandwidth_mutex);
+		return ret;
+	}
+
+	if (dev->quirks & USB_QUIRK_NO_SET_INTF)
+		ret = -EPIPE;
+	else
+		ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+				   USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
+				   alternate, interface, NULL, 0, timeout_ms);
+
+	/* 9.4.10 says devices don't need this and are free to STALL the
+	 * request if the interface only has one alternate setting.
+	 */
+	if (ret == -EPIPE && iface->num_altsetting == 1) {
+		dev_dbg(&dev->dev,
+			"manual set_interface for iface %d, alt %d\n",
+			interface, alternate);
+		manual = 1;
+	} else if (ret < 0) {
+		/* Re-instate the old alt setting */
+		usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
+		usb_enable_lpm(dev);
+		mutex_unlock(hcd->bandwidth_mutex);
+		return ret;
+	}
+	mutex_unlock(hcd->bandwidth_mutex);
+
+	/* FIXME drivers shouldn't need to replicate/bugfix the logic here
+	 * when they implement async or easily-killable versions of this or
+	 * other "should-be-internal" functions (like clear_halt).
+	 * should hcd+usbcore postprocess control requests?
+	 */
+
+	/* prevent submissions using previous endpoint settings */
+	if (iface->cur_altsetting != alt) {
+		remove_intf_ep_devs(iface);
+		usb_remove_sysfs_intf_files(iface);
+	}
+	usb_disable_interface(dev, iface, true);
+
+	iface->cur_altsetting = alt;
+
+	/* Now that the interface is installed, re-enable LPM. */
+	usb_unlocked_enable_lpm(dev);
+
+	/* If the interface only has one altsetting and the device didn't
+	 * accept the request, we attempt to carry out the equivalent action
+	 * by manually clearing the HALT feature for each endpoint in the
+	 * new altsetting.
+	 */
+	if (manual) {
+		for (i = 0; i < alt->desc.bNumEndpoints; i++) {
+			epaddr = alt->endpoint[i].desc.bEndpointAddress;
+			pipe = __create_pipe(dev,
+					USB_ENDPOINT_NUMBER_MASK & epaddr) |
+					(usb_endpoint_out(epaddr) ?
+					USB_DIR_OUT : USB_DIR_IN);
+
+			usb_clear_halt(dev, pipe);
+		}
+	}
+
+	/* 9.1.1.5: reset toggles for all endpoints in the new altsetting
+	 *
+	 * Note:
+	 * Despite EP0 is always present in all interfaces/AS, the list of
+	 * endpoints from the descriptor does not contain EP0. Due to its
+	 * omnipresence one might expect EP0 being considered "affected" by
+	 * any SetInterface request and hence assume toggles need to be reset.
+	 * However, EP0 toggles are re-synced for every individual transfer
+	 * during the SETUP stage - hence EP0 toggles are "don't care" here.
+	 * (Likewise, EP0 never "halts" on well designed devices.)
+	 */
+	usb_enable_interface(dev, iface, true);
+	if (device_is_registered(&iface->dev)) {
+		usb_create_sysfs_intf_files(iface);
+		create_intf_ep_devs(iface);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(usb_set_interface_timeout);
+
+/**
  * usb_reset_configuration - lightweight device reset
  * @dev: the device whose configuration is being reset
  *
@@ -2211,14 +2380,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
 				(struct usb_cdc_dmm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_desc))
 				goto next_desc;
 			if (desc)
 				return -EINVAL;
 			desc = (struct usb_cdc_mdlm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_DETAIL_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
 				goto next_desc;
 			if (detail)
 				return -EINVAL;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1b0f981..8a0f116 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -355,8 +355,10 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
 	struct dwc3_event_buffer	*evt;
 
 	evt = dwc->ev_buf;
-	if (evt)
+	if (evt) {
 		dwc3_free_one_event_buffer(dwc, evt);
+		dwc->ev_buf = NULL;
+	}
 
 	/* free GSI related event buffers */
 	dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_FREE, 0);
@@ -1404,6 +1406,7 @@ static int dwc3_probe(struct platform_device *pdev)
 
 	void __iomem		*regs;
 	int			irq;
+	char			dma_ipc_log_ctx_name[40];
 
 	if (count >= DWC_CTRL_COUNT) {
 		dev_err(dev, "Err dwc instance %d >= %d available\n",
@@ -1543,6 +1546,13 @@ static int dwc3_probe(struct platform_device *pdev)
 	if (!dwc->dwc_ipc_log_ctxt)
 		dev_err(dwc->dev, "Error getting ipc_log_ctxt\n");
 
+	snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name),
+					"%s.ep_events", dev_name(dwc->dev));
+	dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES,
+						dma_ipc_log_ctx_name, 0);
+	if (!dwc->dwc_dma_ipc_log_ctxt)
+		dev_err(dwc->dev, "Error getting ipc_log_ctxt for ep_events\n");
+
 	dwc3_instance[count] = dwc;
 	dwc->index = count;
 	count++;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index d2d3e16..9dcbfc5 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -37,6 +37,7 @@
 #define DWC3_EP0_SETUP_SIZE	512
 #define DWC3_ENDPOINTS_NUM	32
 #define DWC3_XHCI_RESOURCES_NUM	2
+#define MAX_ERROR_RECOVERY_TRIES	3
 
 #define DWC3_SCRATCHBUF_SIZE	4096	/* each buffer is assumed to be 4KiB */
 #define DWC3_EVENT_BUFFERS_SIZE	4096
@@ -1317,6 +1318,7 @@ struct dwc3 {
 
 	unsigned int		index;
 	void			*dwc_ipc_log_ctxt;
+	void			*dwc_dma_ipc_log_ctxt;
 	struct dwc3_gadget_events	dbg_gadget_events;
 	int			tx_fifo_size;
 	int			last_fifo_depth;
@@ -1342,6 +1344,9 @@ struct dwc3 {
 	 * connected devices on PM resume.
 	 */
 	bool			host_poweroff_in_pm_suspend;
+	int			retries_on_error;
+	/*  If true, GDSC collapse will happen in HOST mode bus suspend */
+	bool			gdsc_collapse_in_host_suspend;
 };
 
 #define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index a03602d..f1659b7 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -36,6 +36,18 @@
 #define dbg_setup(ep_num, req) \
 	dwc3_dbg_setup(dwc, ep_num, req)
 
+#define dbg_ep_queue(ep_num, req) \
+	dwc3_dbg_dma_queue(dwc, ep_num, req)
+
+#define dbg_ep_dequeue(ep_num, req) \
+	dwc3_dbg_dma_dequeue(dwc, ep_num, req)
+
+#define dbg_ep_unmap(ep_num, req) \
+	dwc3_dbg_dma_unmap(dwc, ep_num, req)
+
+#define dbg_ep_map(ep_num, req) \
+	dwc3_dbg_dma_map(dwc, ep_num, req)
+
 #define dbg_log_string(fmt, ...) \
 	ipc_log_string(dwc->dwc_ipc_log_ctxt,\
 			"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -660,6 +672,14 @@ void dwc3_dbg_setup(struct dwc3 *dwc, u8 ep_num,
 		const struct usb_ctrlrequest *req);
 void dwc3_dbg_print_reg(struct dwc3 *dwc,
 		const char *name, int reg);
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num,
+			struct dwc3_request *req);
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num,
+			struct dwc3_request *req);
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num,
+			struct dwc3_request *req);
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num,
+			struct dwc3_request *req);
 
 #ifdef CONFIG_DEBUG_FS
 extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/debug_ipc.c b/drivers/usb/dwc3/debug_ipc.c
index b694246..98f27a5 100644
--- a/drivers/usb/dwc3/debug_ipc.c
+++ b/drivers/usb/dwc3/debug_ipc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #include "debug.h"
@@ -136,3 +136,47 @@ void dwc3_dbg_print_reg(struct dwc3 *dwc, const char *name, int reg)
 
 	ipc_log_string(dwc->dwc_ipc_log_ctxt, "%s = 0x%08x", name, reg);
 }
+
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+	if (ep_num < 2)
+		return;
+
+	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+		"%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d", ep_num >> 1,
+		ep_num & 1 ? "IN":"OUT", "UNMAP", &req->request,
+		req->request.dma, req->request.length, req->trb_dma,
+		req->trb->ctrl & DWC3_TRB_CTRL_HWO);
+}
+
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+	if (ep_num < 2)
+		return;
+
+	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+		"%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx", ep_num >> 1,
+		ep_num & 1 ? "IN":"OUT", "MAP", &req->request, req->request.dma,
+		req->request.length, req->trb_dma);
+}
+
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+	if (ep_num < 2)
+		return;
+
+	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+		"%02X-%-3.3s %-25.25s 0x%pK 0x%lx 0x%lx", ep_num >> 1,
+		ep_num & 1 ? "IN":"OUT", "DEQUEUE", &req->request,
+		req->request.dma, req->trb_dma);
+}
+
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+	if (ep_num < 2)
+		return;
+
+	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+		"%02X-%-3.3s %-25.25s 0x%pK", ep_num >> 1,
+		ep_num & 1 ? "IN":"OUT", "QUEUE", &req->request);
+}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index b678848..9e47b00 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -602,6 +602,11 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
 	seq_printf(s, "%u\n", val);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4259c5d..d0644d3 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -345,6 +345,7 @@ struct dwc3_msm {
 
 	u64			dummy_gsi_db;
 	dma_addr_t		dummy_gsi_db_dma;
+	int			orientation_override;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
@@ -1947,8 +1948,13 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event,
 		reg |= DWC3_GCTL_CORESOFTRESET;
 		dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
 
-		/* restart USB which performs full reset and reconnect */
-		schedule_work(&mdwc->restart_usb_work);
+		/*
+		 * If core could not recover after MAX_ERROR_RECOVERY_TRIES
+		 * skip the restart USB work and keep the core in softreset
+		 * state
+		 */
+		if (dwc->retries_on_error < MAX_ERROR_RECOVERY_TRIES)
+			schedule_work(&mdwc->restart_usb_work);
 		break;
 	case DWC3_CONTROLLER_POST_RESET_EVENT:
 		dev_dbg(mdwc->dev,
@@ -2541,7 +2547,8 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse)
 
 	/* Perform controller power collapse */
 	if (!(mdwc->in_host_mode || mdwc->in_device_mode) ||
-	      mdwc->in_restart || force_power_collapse) {
+	      mdwc->in_restart || force_power_collapse ||
+	      (dwc->gdsc_collapse_in_host_suspend && mdwc->in_host_mode)) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
@@ -2603,6 +2610,13 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
 
 	dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
 
+	/*
+	 * If h/w exited LPM without any events, ensure
+	 * h/w is reset before processing any new events.
+	 */
+	if (!mdwc->vbus_active && mdwc->id_state)
+		set_bit(WAIT_FOR_LPM, &mdwc->inputs);
+
 	mutex_lock(&mdwc->suspend_resume_mutex);
 	if (!atomic_read(&dwc->in_lpm)) {
 		dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
@@ -2680,9 +2694,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
 	if (dwc->maximum_speed >= USB_SPEED_SUPER &&
 			mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
 		mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
-		if (mdwc->typec_orientation == ORIENTATION_CC1)
+		if (mdwc->orientation_override)
+			mdwc->ss_phy->flags |= mdwc->orientation_override;
+		else if (mdwc->typec_orientation == ORIENTATION_CC1)
 			mdwc->ss_phy->flags |= PHY_LANE_A;
-		if (mdwc->typec_orientation == ORIENTATION_CC2)
+		else if (mdwc->typec_orientation == ORIENTATION_CC2)
 			mdwc->ss_phy->flags |= PHY_LANE_B;
 		usb_phy_set_suspend(mdwc->ss_phy, 0);
 		mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
@@ -2917,12 +2933,6 @@ static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
 		irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
 	}
 
-	/* Clear L2 exit */
-	if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
-		irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
-		irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
-	}
-
 	/* Handle exit from L1 events */
 	if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
 		dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
@@ -2983,6 +2993,7 @@ static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
 }
 
 static void dwc3_otg_sm_work(struct work_struct *w);
+static int get_psy_type(struct dwc3_msm *mdwc);
 
 static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
 {
@@ -3149,6 +3160,8 @@ static void check_for_sdp_connection(struct work_struct *w)
 	}
 }
 
+#define DP_PULSE_WIDTH_MSEC 200
+
 static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
 	unsigned long event, void *ptr)
 {
@@ -3172,6 +3185,13 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
 	dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
 
 	mdwc->vbus_active = event;
+
+	if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_CDP &&
+			mdwc->vbus_active) {
+		dev_dbg(mdwc->dev, "Connected to CDP, pull DP up\n");
+		usb_phy_drive_dp_pulse(mdwc->hs_phy, DP_PULSE_WIDTH_MSEC);
+	}
+
 	if (dwc3_is_otg_or_drd(dwc) && !mdwc->in_restart)
 		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
 
@@ -3247,6 +3267,36 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
 	return 0;
 }
 
+static ssize_t orientation_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->orientation_override == PHY_LANE_A)
+		return scnprintf(buf, PAGE_SIZE, "A\n");
+	if (mdwc->orientation_override == PHY_LANE_B)
+		return scnprintf(buf, PAGE_SIZE, "B\n");
+
+	return scnprintf(buf, PAGE_SIZE, "none\n");
+}
+
+static ssize_t orientation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (sysfs_streq(buf, "A"))
+		mdwc->orientation_override = PHY_LANE_A;
+	else if (sysfs_streq(buf, "B"))
+		mdwc->orientation_override = PHY_LANE_B;
+	else
+		mdwc->orientation_override = ORIENTATION_NONE;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(orientation);
+
 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -3475,12 +3525,13 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 	}
 
 	/*
-	 * Create freezable workqueue for sm_work so that it gets scheduled only
-	 * after pm_resume has happened completely. This helps in avoiding race
-	 * conditions between xhci_plat_resume and xhci_runtime_resume; and also
-	 * between hcd disconnect and xhci_resume.
+	 * Create an ordered freezable workqueue for sm_work so that it gets
+	 * scheduled only after pm_resume has happened completely. This helps
+	 * in avoiding race conditions between xhci_plat_resume and
+	 * xhci_runtime_resume and also between hcd disconnect and xhci_resume.
 	 */
-	mdwc->sm_usb_wq = create_freezable_workqueue("k_sm_usb");
+	mdwc->sm_usb_wq = alloc_ordered_workqueue("k_sm_usb",
+						WQ_FREEZABLE | WQ_MEM_RECLAIM);
 	if (!mdwc->sm_usb_wq) {
 		destroy_workqueue(mdwc->dwc3_wq);
 		return -ENOMEM;
@@ -3727,6 +3778,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 	if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
 		pm_runtime_get_noresume(mdwc->dev);
 
+	if (of_property_read_bool(node,
+				"qcom,gdsc-collapse-in-host-suspend"))
+		dwc->gdsc_collapse_in_host_suspend = true;
+
 	ret = of_property_read_u32(node, "qcom,pm-qos-latency",
 				&mdwc->pm_qos_latency);
 	if (ret) {
@@ -3795,12 +3850,14 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 			/* fall through */
 		default:
 			mdwc->vbus_active = true;
+			dwc->vbus_active = true;
 			break;
 		}
 
 		dwc3_ext_event_notify(mdwc);
 	}
 
+	device_create_file(&pdev->dev, &dev_attr_orientation);
 	device_create_file(&pdev->dev, &dev_attr_mode);
 	device_create_file(&pdev->dev, &dev_attr_speed);
 	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 64764a0..ac89af3 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -206,7 +206,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 	u32				reg;
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	if (!dep->endpoint.desc || !dwc->pullups_connected) {
+	if (!dep->endpoint.desc || !dwc->softconnect ||
+		!dwc->vbus_active) {
 		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
 				dep->name);
 		ret = -ESHUTDOWN;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 413a44e..b843d11 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -225,8 +225,8 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
 			|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
 		mult = 3;
 
-	if ((dep->endpoint.maxburst > 2) &&
-			dep->endpoint.ep_type == EP_TYPE_GSI
+	if ((dep->endpoint.maxburst > 6) &&
+			usb_endpoint_xfer_bulk(dep->endpoint.desc)
 			&& dwc3_is_usb31(dwc))
 		mult = 6;
 
@@ -289,9 +289,11 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
 	if (req->request.status == -EINPROGRESS)
 		req->request.status = status;
 
-	if (req->trb)
+	if (req->trb) {
+		dbg_ep_unmap(dep->number, req);
 		usb_gadget_unmap_request_by_dev(dwc->sysdev,
 				&req->request, req->direction);
+	}
 
 	req->trb = NULL;
 	trace_dwc3_gadget_giveback(req);
@@ -812,19 +814,6 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 	dbg_log_string("START for %s(%d)", dep->name, dep->number);
 	dwc3_stop_active_transfer(dwc, dep->number, true);
 
-	/* - giveback all requests to gadget driver */
-	while (!list_empty(&dep->started_list)) {
-		req = next_request(&dep->started_list);
-
-		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
-	}
-
-	while (!list_empty(&dep->pending_list)) {
-		req = next_request(&dep->pending_list);
-
-		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
-	}
-
 	if (dep->number == 1 && dwc->ep0state != EP0_SETUP_PHASE) {
 		unsigned int dir;
 
@@ -839,6 +828,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 		dwc->eps[1]->trb_enqueue = 0;
 	}
 
+	/* - giveback all requests to gadget driver */
+	while (!list_empty(&dep->started_list)) {
+		req = next_request(&dep->started_list);
+		if (req)
+			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+	}
+
+	while (!list_empty(&dep->pending_list)) {
+		req = next_request(&dep->pending_list);
+		if (req)
+			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+	}
+
 	dbg_log_string("DONE for %s(%d)", dep->name, dep->number);
 }
 
@@ -897,8 +899,6 @@ static void dwc3_stop_active_transfers_to_halt(struct dwc3 *dwc)
 		}
 	}
 
-	dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_CLEAR, 0);
-
 	dbg_log_string("DONE");
 }
 
@@ -1401,6 +1401,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
 		else
 			dwc3_prepare_one_trb_linear(dep, req);
 
+		dbg_ep_map(dep->number, req);
 		if (!dwc3_calc_trbs_left(dep))
 			return;
 	}
@@ -1545,6 +1546,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 
 	list_add_tail(&req->list, &dep->pending_list);
 
+	dbg_ep_queue(dep->number, req);
 	/*
 	 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
 	 * wait for a XferNotReady event so we will know what's the current
@@ -1704,7 +1706,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
 	}
 
 out1:
-	dbg_event(dep->number, "DEQUEUE", 0);
+	dbg_ep_dequeue(dep->number, req);
 	/* giveback the request */
 
 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
@@ -2065,6 +2067,38 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
 	return 0;
 }
 
+/**
+ * dwc3_device_core_soft_reset - Issues device core soft reset
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_device_core_soft_reset(struct dwc3 *dwc)
+{
+	u32             reg;
+	int             retries = 10;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	reg |= DWC3_DCTL_CSFTRST;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		if (!(reg & DWC3_DCTL_CSFTRST))
+			goto done;
+
+		usleep_range(1000, 1100);
+	} while (--retries);
+
+	dev_err(dwc->dev, "%s timedout\n", __func__);
+
+	return -ETIMEDOUT;
+
+done:
+	/* phy sync delay as per data book */
+	msleep(50);
+
+	return 0;
+}
+
 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 {
 	u32			reg, reg1;
@@ -2103,6 +2137,8 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 		dwc->pullups_connected = true;
 	} else {
 		dwc3_gadget_disable_irq(dwc);
+
+		dwc->err_evt_seen = false;
 		dwc->pullups_connected = false;
 		__dwc3_gadget_ep_disable(dwc->eps[0]);
 		__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -2123,14 +2159,25 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
+	/* Controller is not halted until the events are acknowledged */
+	if (!is_on) {
+		/*
+		 * Clear out any pending events (i.e. End Transfer Command
+		 * Complete).
+		 */
+		reg1 = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+		reg1 &= DWC3_GEVNTCOUNT_MASK;
+		dbg_log_string("remaining EVNTCOUNT(0)=%d", reg1);
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg1);
+		dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_CLEAR, 0);
+		dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_CLEAR_DB, 0);
+	}
+
 	do {
 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 		reg &= DWC3_DSTS_DEVCTRLHLT;
 	} while (--timeout && !(!is_on ^ !reg));
 
-	if (!is_on)
-		dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_CLEAR_DB, 0);
-
 	if (!timeout) {
 		dev_err(dwc->dev, "failed to %s controller\n",
 				is_on ? "start" : "stop");
@@ -2214,6 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
+	if (!is_on && ret == -ETIMEDOUT) {
+		dev_err(dwc->dev, "%s: Core soft reset...\n", __func__);
+		dwc3_device_core_soft_reset(dwc);
+	}
 	enable_irq(dwc->irq);
 
 	pm_runtime_mark_last_busy(dwc->dev);
@@ -2298,6 +2349,7 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
 {
 	struct dwc3 *dwc = gadget_to_dwc(_gadget);
 	unsigned long flags;
+	int ret = 0;
 
 	if (dwc->dr_mode != USB_DR_MODE_OTG && dwc->dr_mode != USB_DR_MODE_DRD)
 		return -EPERM;
@@ -2305,6 +2357,11 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
 	is_active = !!is_active;
 
 	dbg_event(0xFF, "VbusSess", is_active);
+
+	disable_irq(dwc->irq);
+
+	flush_work(&dwc->bh_work);
+
 	spin_lock_irqsave(&dwc->lock, flags);
 
 	/* Mark that the vbus was powered */
@@ -2320,9 +2377,9 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
 			 * Both vbus was activated by otg and pullup was
 			 * signaled by the gadget driver.
 			 */
-			dwc3_gadget_run_stop(dwc, 1, false);
+			ret = dwc3_gadget_run_stop(dwc, 1, false);
 		} else {
-			dwc3_gadget_run_stop(dwc, 0, false);
+			ret = dwc3_gadget_run_stop(dwc, 0, false);
 		}
 	}
 
@@ -2336,6 +2393,13 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
 	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
+	if (!is_active && ret == -ETIMEDOUT) {
+		dev_err(dwc->dev, "%s: Core soft reset...\n", __func__);
+		dwc3_device_core_soft_reset(dwc);
+	}
+
+	enable_irq(dwc->irq);
+
 	return 0;
 }
 
@@ -3178,8 +3242,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
 	u32			reg;
 
-	usb_phy_start_link_training(dwc->usb3_phy);
-
 	dwc->connected = true;
 
 	/*
@@ -3265,7 +3327,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 	u8			speed;
 
 	dbg_event(0xFF, "CONNECT DONE", 0);
-	usb_phy_stop_link_training(dwc->usb3_phy);
 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 	speed = reg & DWC3_DSTS_CONNECTSPD;
 	dwc->speed = speed;
@@ -3277,6 +3338,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 		dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 	}
 
+	/* Reset the retry on erratic error event count */
+	dwc->retries_on_error = 0;
+
 	/*
 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
 	 * each time on Connect Done.
@@ -3640,7 +3704,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		dwc->dbg_gadget_events.sof++;
 		break;
 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
-		dbg_event(0xFF, "ERROR", 0);
+		dbg_event(0xFF, "ERROR", dwc->retries_on_error);
 		dwc->dbg_gadget_events.erratic_error++;
 		dwc->err_evt_seen = true;
 		break;
@@ -3698,6 +3762,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
 			if (dwc3_notify_event(dwc,
 						DWC3_CONTROLLER_ERROR_EVENT, 0))
 				dwc->err_evt_seen = 0;
+			dwc->retries_on_error++;
 			break;
 		}
 
@@ -3765,12 +3830,16 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
 
 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
 {
-	struct dwc3 *dwc = evt->dwc;
+	struct dwc3 *dwc;
 	u32 amount;
 	u32 count;
 	u32 reg;
 	ktime_t start_time;
 
+	if (!evt)
+		return IRQ_NONE;
+
+	dwc = evt->dwc;
 	start_time = ktime_get();
 	dwc->irq_cnt++;
 
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dcbb336..dc73594 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2113,6 +2113,7 @@ void composite_disconnect(struct usb_gadget *gadget)
 	 * disconnect callbacks?
 	 */
 	spin_lock_irqsave(&cdev->lock, flags);
+	cdev->suspended = 0;
 	if (cdev->config)
 		reset_config(cdev);
 	if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 6060761..3bbf33b 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -866,6 +866,9 @@ static void usb_cser_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_cdev *port = func_to_port(f);
 
+	/* Reset string id */
+	cser_string_defs[0].id = 0;
+
 	usb_free_all_descriptors(f);
 	usb_cser_free_req(port->port_usb.notify, port->port_usb.notify_req);
 }
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index cdb6d62..3211960 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -855,6 +855,11 @@ static void ipa_work_handler(struct work_struct *w)
 		break;
 	case STATE_INITIALIZED:
 		if (event == EVT_SET_ALT) {
+			if (!atomic_read(&gsi->connected)) {
+				log_event_err("USB cable not connected\n");
+				break;
+			}
+
 			usb_gadget_autopm_get(d_port->gadget);
 			log_event_dbg("%s: get = %d", __func__,
 				atomic_read(&gad_dev->power.usage_count));
@@ -1339,12 +1344,14 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 	unsigned long flags;
 	struct gsi_ctrl_pkt *cpkt;
 	struct gsi_ctrl_port *c_port;
-	struct usb_request *req;
 	enum ipa_usb_teth_prot prot_id =
 		*(enum ipa_usb_teth_prot *)(fp->private_data);
 	struct gsi_inst_status *inst_cur = &inst_status[prot_id];
 	struct f_gsi *gsi;
 
+	if (prot_id == IPA_USB_DIAG)
+		return -EINVAL;
+
 	pr_debug("Enter %zu", count);
 
 	mutex_lock(&inst_cur->gsi_lock);
@@ -1358,13 +1365,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 
 	gsi = inst_cur->opts->gsi;
 	c_port = &gsi->c_port;
-	req = c_port->notify_req;
-
-	if (!c_port || !req || !req->buf) {
-		log_event_err("%s: c_port %pK req %p req->buf %p",
-			__func__, c_port, req, req ? req->buf : req);
-		return -ENODEV;
-	}
 
 	if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
 		log_event_err("error: ctrl pkt length %zu", count);
@@ -3085,6 +3085,13 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
 
 	ipa_usb_deinit_teth_prot(gsi->prot_id);
 
+	/* Reset string ids */
+	rndis_gsi_string_defs[0].id = 0;
+	ecm_gsi_string_defs[0].id   = 0;
+	rmnet_gsi_string_defs[0].id = 0;
+	mbim_gsi_string_defs[0].id  = 0;
+	qdss_gsi_string_defs[0].id  = 0;
+
 	if (gsi->prot_id == IPA_USB_RNDIS) {
 		gsi->d_port.sm_state = STATE_UNINITIALIZED;
 		rndis_deregister(gsi->params);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 311e7ce..ae698bf 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -450,7 +450,7 @@ static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_in_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =		2,
+	.bMaxBurst =		6,
 	/* .bmAttributes =	0, */
 };
 
@@ -705,7 +705,7 @@ static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_bulk_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =		2,
+	.bMaxBurst =		6,
 	/* .bmAttributes =	0, */
 };
 
@@ -990,7 +990,7 @@ static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_in_comp_desc = {
 	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =         2,
+	.bMaxBurst =         6,
 	/* .bmAttributes =      0, */
 };
 
@@ -1290,7 +1290,7 @@ static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_in_comp_desc = {
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
 	/* the following 2 values can be tweaked if necessary */
-	.bMaxBurst =         2,
+	.bMaxBurst =         6,
 	/* .bmAttributes =      0, */
 };
 
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 6aada1c..8bce7ce 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
 struct fsg_common {
 	struct usb_gadget	*gadget;
 	struct usb_composite_dev *cdev;
-	struct fsg_dev		*fsg, *new_fsg;
+	struct fsg_dev		*fsg;
 	wait_queue_head_t	io_wait;
 	wait_queue_head_t	fsg_wait;
 
@@ -290,6 +290,7 @@ struct fsg_common {
 	unsigned int		bulk_out_maxpacket;
 	enum fsg_state		state;		/* For exception handling */
 	unsigned int		exception_req_tag;
+	void			*exception_arg;
 
 	enum data_direction	data_dir;
 	u32			data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 
 /* These routines may be called in process context or in_irq */
 
-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
+			      void *arg)
 {
 	unsigned long		flags;
 
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
 	if (common->state <= new_state) {
 		common->exception_req_tag = common->ep0_req_tag;
 		common->state = new_state;
+		common->exception_arg = arg;
 		if (common->thread_task)
 			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
 				      common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
 	spin_unlock_irqrestore(&common->lock, flags);
 }
 
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+	__raise_exception(common, new_state, NULL);
+}
 
 /*-------------------------------------------------------------------------*/
 
@@ -2287,20 +2294,19 @@ static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
-	fsg->common->new_fsg = fsg;
 
 	/* prevents usb LPM until thread runs to completion */
 	usb_gadget_autopm_get_async(fsg->common->gadget);
 
-	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+	__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
 	return USB_GADGET_DELAYED_STATUS;
 }
 
 static void fsg_disable(struct usb_function *f)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
-	fsg->common->new_fsg = NULL;
-	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+	__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
 }
 
 
@@ -2313,6 +2319,7 @@ static void handle_exception(struct fsg_common *common)
 	enum fsg_state		old_state;
 	struct fsg_lun		*curlun;
 	unsigned int		exception_req_tag;
+	struct fsg_dev		*new_fsg;
 
 	/*
 	 * Clear the existing signals.  Anything but SIGUSR1 is converted
@@ -2366,6 +2373,7 @@ static void handle_exception(struct fsg_common *common)
 	common->next_buffhd_to_fill = &common->buffhds[0];
 	common->next_buffhd_to_drain = &common->buffhds[0];
 	exception_req_tag = common->exception_req_tag;
+	new_fsg = common->exception_arg;
 	old_state = common->state;
 	common->state = FSG_STATE_NORMAL;
 
@@ -2419,8 +2427,8 @@ static void handle_exception(struct fsg_common *common)
 		break;
 
 	case FSG_STATE_CONFIG_CHANGE:
-		do_set_interface(common, common->new_fsg);
-		if (common->new_fsg)
+		do_set_interface(common, new_fsg);
+		if (new_fsg)
 			usb_composite_setup_continue(common->cdev);
 		break;
 
@@ -2995,8 +3003,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
 
 	DBG(fsg, "unbind\n");
 	if (fsg->common->fsg == fsg) {
-		fsg->common->new_fsg = NULL;
-		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+		__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
 		/* FIXME: make interruptible or killable somehow? */
 		wait_event(common->fsg_wait, common->fsg != fsg);
 	}
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 1212e7e..c137669 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -511,6 +511,10 @@ static void qdss_unbind(struct usb_configuration *c, struct usb_function *f)
 
 	flush_workqueue(qdss->wq);
 
+	/* Reset string ids */
+	qdss_string_defs[QDSS_DATA_IDX].id = 0;
+	qdss_string_defs[QDSS_CTRL_IDX].id = 0;
+
 	qdss->debug_inface_enabled = 0;
 
 	clear_eps(f);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 213b525..1505e55 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -48,6 +48,7 @@
 #define DRIVER_VERSION	"02 May 2005"
 
 #define POWER_BUDGET	500	/* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3	900	/* in mA */
 
 static const char	driver_name[] = "dummy_hcd";
 static const char	driver_desc[] = "USB Host+Gadget Emulator";
@@ -2446,7 +2447,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
 	dum_hcd->rh_state = DUMMY_RH_RUNNING;
 	dum_hcd->stream_en_ep = 0;
 	INIT_LIST_HEAD(&dum_hcd->urbp_list);
-	dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+	dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
 	dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
 	dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
 #ifdef CONFIG_USB_OTG
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index eafc2a0..21921db 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1165,11 +1165,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
 			tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
 			bl = bytes - n;
-			if (bl > 3)
-				bl = 3;
+			if (bl > 4)
+				bl = 4;
 
 			for (i = 0; i < bl; i++)
-				data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+				data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
 		}
 		break;
 
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fea02c7..a5254e8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/sys_soc.h>
 #include <linux/uaccess.h>
 #include <linux/usb/ch9.h>
@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
 	if (usb3->forced_b_device)
 		return -EBUSY;
 
-	if (!strncmp(buf, "host", strlen("host")))
+	if (sysfs_streq(buf, "host"))
 		new_mode_is_host = true;
-	else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+	else if (sysfs_streq(buf, "peripheral"))
 		new_mode_is_host = false;
 	else
 		return -EINVAL;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index e64eb47..2d5a72c 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1627,6 +1627,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 			/* see what we found out */
 			temp = check_reset_complete(fotg210, wIndex, status_reg,
 					fotg210_readl(fotg210, status_reg));
+
+			/* restart schedule */
+			fotg210->command |= CMD_RUN;
+			fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
 		}
 
 		if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 210181f..af11887 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -418,8 +418,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
  * other cases where the next software may expect clean state from the
  * "firmware".  this is bus-neutral, unlike shutdown() methods.
  */
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
 {
 	struct ohci_hcd *ohci;
 
@@ -435,6 +434,16 @@ ohci_shutdown (struct usb_hcd *hcd)
 	ohci->rh_state = OHCI_RH_HALTED;
 }
 
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ohci->lock, flags);
+	_ohci_shutdown(hcd);
+	spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
 /*-------------------------------------------------------------------------*
  * HC functions
  *-------------------------------------------------------------------------*/
@@ -752,7 +761,7 @@ static void io_watchdog_func(struct timer_list *t)
  died:
 			usb_hc_died(ohci_to_hcd(ohci));
 			ohci_dump(ohci);
-			ohci_shutdown(ohci_to_hcd(ohci));
+			_ohci_shutdown(ohci_to_hcd(ohci));
 			goto done;
 		} else {
 			/* No write back because the done queue was empty */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index f522d7e..52bceda 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -407,10 +407,10 @@ static int xhci_plat_remove(struct platform_device *dev)
 
 	device_remove_file(&dev->dev, &dev_attr_config_imod);
 	usb_remove_hcd(shared_hcd);
-	xhci->shared_hcd = NULL;
 	usb_phy_shutdown(hcd->usb_phy);
 
 	usb_remove_hcd(hcd);
+	xhci->shared_hcd = NULL;
 	usb_put_hcd(shared_hcd);
 
 	clk_disable_unprepare(clk);
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 8616c52..2b0ccd1 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
 	return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
 		of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
 		of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
-		of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+		of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
 }
 
 static int xhci_rcar_is_gen3(struct device *dev)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 81479df..edef3e2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3154,10 +3154,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
 	if (usb_urb_dir_out(urb)) {
 		len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
 				   seg->bounce_buf, new_buff_len, enqd_len);
-		if (len != seg->bounce_len)
+		if (len != new_buff_len)
 			xhci_warn(xhci,
 				"WARN Wrong bounce buffer write length: %zu != %d\n",
-				len, seg->bounce_len);
+				len, new_buff_len);
 		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
 						 max_pkt, DMA_TO_DEVICE);
 	} else {
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index b1cce98..fe37dac 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1148,6 +1148,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 
 	tegra_xusb_ipfs_config(tegra, regs);
 
+	/*
+	 * The XUSB Falcon microcontroller can only address 40 bits, so set
+	 * the DMA mask accordingly.
+	 */
+	err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+		goto put_rpm;
+	}
+
 	err = tegra_xusb_load_firmware(tegra);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f336c38..9f7e540 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -226,7 +226,7 @@ int xhci_reset(struct xhci_hcd *xhci)
 		udelay(1000);
 
 	ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
-			CMD_RESET, 0, 10 * 1000 * 1000);
+			CMD_RESET, 0, 1000 * 1000);
 	if (ret)
 		return ret;
 
@@ -240,7 +240,7 @@ int xhci_reset(struct xhci_hcd *xhci)
 	 * than status until the "Controller Not Ready" flag is cleared.
 	 */
 	ret = xhci_handshake(&xhci->op_regs->status,
-			STS_CNR, 0, 10 * 1000 * 1000);
+			STS_CNR, 0, 1000 * 1000);
 
 	for (i = 0; i < 2; i++) {
 		xhci->bus_state[i].port_c_suspend = 0;
@@ -1051,7 +1051,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 	writel(command, &xhci->op_regs->command);
 	xhci->broken_suspend = 0;
 	if (xhci_handshake(&xhci->op_regs->status,
-				STS_SAVE, 0, 10 * 1000)) {
+				STS_SAVE, 0, 20 * 1000)) {
 	/*
 	 * AMD SNPS xHC 3.0 occasionally does not clear the
 	 * SSS bit of USBSTS and when driver tries to poll
@@ -1127,6 +1127,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 		hibernated = true;
 
 	if (!hibernated) {
+		/*
+		 * Some controllers might lose power during suspend, so wait
+		 * for controller not ready bit to clear, just as in xHC init.
+		 */
+		retval = xhci_handshake(&xhci->op_regs->status,
+					STS_CNR, 0, 10 * 1000 * 1000);
+		if (retval) {
+			xhci_warn(xhci, "Controller not ready at resume %d\n",
+				  retval);
+			spin_unlock_irq(&xhci->lock);
+			return retval;
+		}
 		/* step 1: restore register */
 		xhci_restore_registers(xhci);
 		/* step 2: initialize command ring buffer */
@@ -3082,6 +3094,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 	unsigned int ep_index;
 	unsigned long flags;
 	u32 ep_flag;
+	int err;
 
 	xhci = hcd_to_xhci(hcd);
 	if (!host_ep->hcpriv)
@@ -3131,7 +3144,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 		xhci_free_command(xhci, cfg_cmd);
 		goto cleanup;
 	}
-	xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+					ep_index, 0);
+	if (err < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cfg_cmd);
+		xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+				__func__, err);
+		goto cleanup;
+	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3145,8 +3168,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
 					   ctrl_ctx, ep_flag, ep_flag);
 	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
 
-	xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+	err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
 				      udev->slot_id, false);
+	if (err < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cfg_cmd);
+		xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+				__func__, err);
+		goto cleanup;
+	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -4660,12 +4691,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
 	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
 		desc, state, timeout);
 
-	/* If we found we can't enable hub-initiated LPM, or
+	/* If we found we can't enable hub-initiated LPM, and
 	 * the U1 or U2 exit latency was too high to allow
-	 * device-initiated LPM as well, just stop searching.
+	 * device-initiated LPM as well, then we will disable LPM
+	 * for this device, so stop searching any further.
 	 */
-	if (alt_timeout == USB3_LPM_DISABLED ||
-			alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+	if (alt_timeout == USB3_LPM_DISABLED) {
 		*timeout = alt_timeout;
 		return -E2BIG;
 	}
@@ -4776,10 +4807,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
 		if (intf->dev.driver) {
 			driver = to_usb_driver(intf->dev.driver);
 			if (driver && driver->disable_hub_initiated_lpm) {
-				dev_dbg(&udev->dev, "Hub-initiated %s disabled "
-						"at request of driver %s\n",
-						state_name, driver->name);
-				return xhci_get_timeout_no_hub_lpm(udev, state);
+				dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+					state_name, driver->name);
+				timeout = xhci_get_timeout_no_hub_lpm(udev,
+								      state);
+				if (timeout == USB3_LPM_DISABLED)
+					return timeout;
 			}
 		}
 
@@ -5063,11 +5096,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 		hcd->has_tt = 1;
 	} else {
 		/*
-		 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
-		 * minor revision instead of sbrn. Minor revision is a two digit
-		 * BCD containing minor and sub-minor numbers, only show minor.
+		 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+		 * should return 0x31 for sbrn, or that the minor revision
+		 * is a two digit BCD containig minor and sub-minor numbers.
+		 * This was later clarified in xHCI 1.2.
+		 *
+		 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+		 * minor revision set to 0x1 instead of 0x10.
 		 */
-		minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+		if (xhci->usb3_rhub.min_rev == 0x1)
+			minor_rev = 1;
+		else
+			minor_rev = xhci->usb3_rhub.min_rev / 0x10;
 
 		switch (minor_rev) {
 		case 2:
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f2f563..addbb47 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -721,6 +721,10 @@ static int mts_usb_probe(struct usb_interface *intf,
 
 	}
 
+	if (ep_in_current != &ep_in_set[2]) {
+		MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+		return -ENODEV;
+	}
 
 	if ( ep_out == -1 ) {
 		MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index bbd0bee..5ebb23e 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -46,16 +46,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called usbsevseg.
 
-config USB_RIO500
-	tristate "USB Diamond Rio500 support"
-	help
-	  Say Y here if you want to connect a USB Rio500 mp3 player to your
-	  computer's USB port. Please read <file:Documentation/usb/rio.txt>
-	  for more information.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called rio500.
-
 config USB_LEGOTOWER
 	tristate "USB Lego Infrared Tower support"
 	help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 4f7d202..6eb379b 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -17,7 +17,6 @@
 obj-$(CONFIG_USB_LCD)			+= usblcd.o
 obj-$(CONFIG_USB_LD)			+= ldusb.o
 obj-$(CONFIG_USB_LEGOTOWER)		+= legousbtower.o
-obj-$(CONFIG_USB_RIO500)		+= rio500.o
 obj-$(CONFIG_USB_TEST)			+= usbtest.o
 obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)    += ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)	+= trancevibrator.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 9465fb9..9a51760 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -75,6 +75,7 @@ struct adu_device {
 	char			serial_number[8];
 
 	int			open_count; /* number of times this port has been opened */
+	unsigned long		disconnected:1;
 
 	char		*read_buffer_primary;
 	int			read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
 	unsigned long flags;
 
-	if (dev->udev == NULL)
+	if (dev->disconnected)
 		return;
 
 	/* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
 	kfree(dev->read_buffer_secondary);
 	kfree(dev->interrupt_in_buffer);
 	kfree(dev->interrupt_out_buffer);
+	usb_put_dev(dev->udev);
 	kfree(dev);
 }
 
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
 	}
 
 	dev = usb_get_intfdata(interface);
-	if (!dev || !dev->udev) {
+	if (!dev) {
 		retval = -ENODEV;
 		goto exit_no_device;
 	}
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
 	}
 
 	adu_release_internal(dev);
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 		if (!dev->open_count)	/* ... and we're the last user */
 			adu_delete(dev);
@@ -355,7 +357,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
 		return -ERESTARTSYS;
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto exit;
@@ -520,7 +522,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
 		goto exit_nolock;
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto exit;
@@ -665,7 +667,7 @@ static int adu_probe(struct usb_interface *interface,
 
 	mutex_init(&dev->mtx);
 	spin_lock_init(&dev->buflock);
-	dev->udev = udev;
+	dev->udev = usb_get_dev(udev);
 	init_waitqueue_head(&dev->read_wait);
 	init_waitqueue_head(&dev->write_wait);
 
@@ -764,14 +766,18 @@ static void adu_disconnect(struct usb_interface *interface)
 
 	dev = usb_get_intfdata(interface);
 
-	mutex_lock(&dev->mtx);	/* not interruptible */
-	dev->udev = NULL;	/* poison */
 	usb_deregister_dev(interface, &adu_class);
-	mutex_unlock(&dev->mtx);
+
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
 
 	mutex_lock(&adutux_mutex);
 	usb_set_intfdata(interface, NULL);
 
+	mutex_lock(&dev->mtx);	/* not interruptible */
+	dev->disconnected = 1;
+	mutex_unlock(&dev->mtx);
+
 	/* if the device is not opened, then we clean up right now */
 	if (!dev->open_count)
 		adu_delete(dev);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index cf5828c..34e6cd6 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
 		usb_free_urb(dev->urb);
 		kfree(dev->name);
 		kfree(dev->buf);
+		usb_put_intf(dev->interface);
 		kfree(dev);
 	}
 }
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
 	if (dev == NULL)
 		goto out;
 
+	dev->interface = usb_get_intf(interface);
+
 	dev->buf = kmalloc(size, GFP_KERNEL);
 
 	if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
 			goto out;
 	}
 
-	dev->interface = interface;
-
 	dev->in_ep = in_ep;
 
 	if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 55db0fc..2d9d949 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -87,6 +87,7 @@ struct iowarrior {
 	char chip_serial[9];		/* the serial number string of the chip connected */
 	int report_size;		/* number of bytes in a report */
 	u16 product_id;
+	struct usb_anchor submitted;
 };
 
 /*--------------*/
@@ -243,6 +244,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
 	kfree(dev->int_in_buffer);
 	usb_free_urb(dev->int_in_urb);
 	kfree(dev->read_queue);
+	usb_put_intf(dev->interface);
 	kfree(dev);
 }
 
@@ -424,11 +426,13 @@ static ssize_t iowarrior_write(struct file *file,
 			retval = -EFAULT;
 			goto error;
 		}
+		usb_anchor_urb(int_out_urb, &dev->submitted);
 		retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
 		if (retval) {
 			dev_dbg(&dev->interface->dev,
 				"submit error %d for urb nr.%d\n",
 				retval, atomic_read(&dev->write_busy));
+			usb_unanchor_urb(int_out_urb);
 			goto error;
 		}
 		/* submit was ok */
@@ -764,11 +768,13 @@ static int iowarrior_probe(struct usb_interface *interface,
 	init_waitqueue_head(&dev->write_wait);
 
 	dev->udev = udev;
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	iface_desc = interface->cur_altsetting;
 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+	init_usb_anchor(&dev->submitted);
+
 	res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
 	if (res) {
 		dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -866,8 +872,6 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	dev = usb_get_intfdata(interface);
 	mutex_lock(&iowarrior_open_disc_lock);
 	usb_set_intfdata(interface, NULL);
-	/* prevent device read, write and ioctl */
-	dev->present = 0;
 
 	minor = dev->minor;
 	mutex_unlock(&iowarrior_open_disc_lock);
@@ -878,8 +882,7 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	mutex_lock(&dev->mutex);
 
 	/* prevent device read, write and ioctl */
-
-	mutex_unlock(&dev->mutex);
+	dev->present = 0;
 
 	if (dev->opened) {
 		/* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +890,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 		   Deleting the device is postponed until close() was called.
 		 */
 		usb_kill_urb(dev->int_in_urb);
+		usb_kill_anchored_urbs(&dev->submitted);
 		wake_up_interruptible(&dev->read_wait);
 		wake_up_interruptible(&dev->write_wait);
+		mutex_unlock(&dev->mutex);
 	} else {
 		/* no process is using the device, cleanup now */
+		mutex_unlock(&dev->mutex);
 		iowarrior_delete(dev);
 	}
 
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 006762b..6b3a6fd 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
 struct ld_usb {
 	struct mutex		mutex;		/* locks this structure */
 	struct usb_interface	*intf;		/* save off the usb interface pointer */
+	unsigned long		disconnected:1;
 
 	int			open_count;	/* number of times this port has been opened */
 
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
 	/* shutdown transfer */
 	if (dev->interrupt_in_running) {
 		dev->interrupt_in_running = 0;
-		if (dev->intf)
-			usb_kill_urb(dev->interrupt_in_urb);
+		usb_kill_urb(dev->interrupt_in_urb);
 	}
 	if (dev->interrupt_out_busy)
-		if (dev->intf)
-			usb_kill_urb(dev->interrupt_out_urb);
+		usb_kill_urb(dev->interrupt_out_urb);
 }
 
 /**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
  */
 static void ld_usb_delete(struct ld_usb *dev)
 {
-	ld_usb_abort_transfers(dev);
-
 	/* free data structures */
 	usb_free_urb(dev->interrupt_in_urb);
 	usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
 
 resubmit:
 	/* resubmit if we're still running */
-	if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+	if (dev->interrupt_in_running && !dev->buffer_overflow) {
 		retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
 		if (retval) {
 			dev_err(&dev->intf->dev,
@@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
 		goto exit;
 	}
 
-	if (mutex_lock_interruptible(&dev->mutex)) {
-		retval = -ERESTARTSYS;
-		goto exit;
-	}
+	mutex_lock(&dev->mutex);
 
 	if (dev->open_count != 1) {
 		retval = -ENODEV;
 		goto unlock_exit;
 	}
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 		mutex_unlock(&dev->mutex);
 		/* unlock here as ld_usb_delete frees dev */
@@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
 
 	dev = file->private_data;
 
-	if (!dev->intf)
+	if (dev->disconnected)
 		return EPOLLERR | EPOLLHUP;
 
 	poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
 	/* wait for data */
 	spin_lock_irq(&dev->rbsl);
-	if (dev->ring_head == dev->ring_tail) {
+	while (dev->ring_head == dev->ring_tail) {
 		dev->interrupt_in_done = 0;
 		spin_unlock_irq(&dev->rbsl);
 		if (file->f_flags & O_NONBLOCK) {
@@ -480,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 		retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
 		if (retval < 0)
 			goto unlock_exit;
-	} else {
-		spin_unlock_irq(&dev->rbsl);
+
+		spin_lock_irq(&dev->rbsl);
 	}
+	spin_unlock_irq(&dev->rbsl);
 
 	/* actual_buffer contains actual_length + interrupt_in_buffer */
 	actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+	if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+		retval = -EIO;
+		goto unlock_exit;
+	}
 	bytes_to_read = min(count, *actual_buffer);
 	if (bytes_to_read < *actual_buffer)
 		dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->intf == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -696,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
 		dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
 	dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-	dev->ring_buffer =
-		kmalloc_array(ring_buffer_size,
-			      sizeof(size_t) + dev->interrupt_in_endpoint_size,
-			      GFP_KERNEL);
+	dev->ring_buffer = kcalloc(ring_buffer_size,
+			sizeof(size_t) + dev->interrupt_in_endpoint_size,
+			GFP_KERNEL);
 	if (!dev->ring_buffer)
 		goto error;
 	dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -764,6 +762,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
 	/* give back our minor */
 	usb_deregister_dev(intf, &ld_usb_class);
 
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
+
 	mutex_lock(&dev->mutex);
 
 	/* if the device is not opened, then we clean up right now */
@@ -771,7 +772,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
 		mutex_unlock(&dev->mutex);
 		ld_usb_delete(dev);
 	} else {
-		dev->intf = NULL;
+		dev->disconnected = 1;
 		/* wake up pollers */
 		wake_up_interruptible_all(&dev->read_wait);
 		wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 006cf13..62dab24 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
 };
 
 MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
 
 #define LEGO_USB_TOWER_MINOR_BASE	160
 
@@ -191,6 +190,7 @@ struct lego_usb_tower {
 	unsigned char		minor;		/* the starting minor number for this device */
 
 	int			open_count;	/* number of times this port has been opened */
+	unsigned long		disconnected:1;
 
 	char*			read_buffer;
 	size_t			read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
  */
 static inline void tower_delete (struct lego_usb_tower *dev)
 {
-	tower_abort_transfers (dev);
-
 	/* free data structures */
 	usb_free_urb(dev->interrupt_in_urb);
 	usb_free_urb(dev->interrupt_out_urb);
 	kfree (dev->read_buffer);
 	kfree (dev->interrupt_in_buffer);
 	kfree (dev->interrupt_out_buffer);
+	usb_put_dev(dev->udev);
 	kfree (dev);
 }
 
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
 		goto exit;
 	}
 
-	mutex_lock(&open_disc_mutex);
 	dev = usb_get_intfdata(interface);
-
 	if (!dev) {
-		mutex_unlock(&open_disc_mutex);
 		retval = -ENODEV;
 		goto exit;
 	}
 
 	/* lock this device */
 	if (mutex_lock_interruptible(&dev->lock)) {
-		mutex_unlock(&open_disc_mutex);
 	        retval = -ERESTARTSYS;
 		goto exit;
 	}
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
 
 	/* allow opening only once */
 	if (dev->open_count) {
-		mutex_unlock(&open_disc_mutex);
 		retval = -EBUSY;
 		goto unlock_exit;
 	}
-	dev->open_count = 1;
-	mutex_unlock(&open_disc_mutex);
 
 	/* reset the tower */
 	result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
 		dev_err(&dev->udev->dev,
 			"Couldn't submit interrupt_in_urb %d\n", retval);
 		dev->interrupt_in_running = 0;
-		dev->open_count = 0;
 		goto unlock_exit;
 	}
 
 	/* save device in the file's private structure */
 	file->private_data = dev;
 
+	dev->open_count = 1;
+
 unlock_exit:
 	mutex_unlock(&dev->lock);
 
@@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file)
 
 	if (dev == NULL) {
 		retval = -ENODEV;
-		goto exit_nolock;
-	}
-
-	mutex_lock(&open_disc_mutex);
-	if (mutex_lock_interruptible(&dev->lock)) {
-	        retval = -ERESTARTSYS;
 		goto exit;
 	}
 
+	mutex_lock(&dev->lock);
+
 	if (dev->open_count != 1) {
 		dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
 			__func__);
 		retval = -ENODEV;
 		goto unlock_exit;
 	}
-	if (dev->udev == NULL) {
+
+	if (dev->disconnected) {
 		/* the device was unplugged before the file was released */
 
 		/* unlock here as tower_delete frees dev */
@@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file)
 
 unlock_exit:
 	mutex_unlock(&dev->lock);
-
 exit:
-	mutex_unlock(&open_disc_mutex);
-exit_nolock:
 	return retval;
 }
 
@@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
 	if (dev->interrupt_in_running) {
 		dev->interrupt_in_running = 0;
 		mb();
-		if (dev->udev)
-			usb_kill_urb (dev->interrupt_in_urb);
+		usb_kill_urb(dev->interrupt_in_urb);
 	}
-	if (dev->interrupt_out_busy && dev->udev)
+	if (dev->interrupt_out_busy)
 		usb_kill_urb(dev->interrupt_out_urb);
 }
 
@@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
 
 	dev = file->private_data;
 
-	if (!dev->udev)
+	if (dev->disconnected)
 		return EPOLLERR | EPOLLHUP;
 
 	poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
 	}
 
 	/* verify that the device wasn't unplugged */
-	if (dev->udev == NULL) {
+	if (dev->disconnected) {
 		retval = -ENODEV;
 		pr_err("No device or device unplugged %d\n", retval);
 		goto unlock_exit;
@@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
 
 resubmit:
 	/* resubmit if we're still running */
-	if (dev->interrupt_in_running && dev->udev) {
+	if (dev->interrupt_in_running) {
 		retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
 		if (retval)
 			dev_err(&dev->udev->dev,
@@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 
 	mutex_init(&dev->lock);
 
-	dev->udev = udev;
+	dev->udev = usb_get_dev(udev);
 	dev->open_count = 0;
+	dev->disconnected = 0;
 
 	dev->read_buffer = NULL;
 	dev->read_buffer_length = 0;
@@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 				  get_version_reply,
 				  sizeof(*get_version_reply),
 				  1000);
-	if (result < 0) {
-		dev_err(idev, "LEGO USB Tower get version control request failed\n");
+	if (result < sizeof(*get_version_reply)) {
+		if (result >= 0)
+			result = -EIO;
+		dev_err(idev, "get version request failed: %d\n", result);
 		retval = result;
 		goto error;
 	}
@@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 	if (retval) {
 		/* something prevented us from registering this driver */
 		dev_err(idev, "Not able to get a minor for this device.\n");
-		usb_set_intfdata (interface, NULL);
 		goto error;
 	}
 	dev->minor = interface->minor;
@@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface)
 	int minor;
 
 	dev = usb_get_intfdata (interface);
-	mutex_lock(&open_disc_mutex);
-	usb_set_intfdata (interface, NULL);
 
 	minor = dev->minor;
 
-	/* give back our minor */
+	/* give back our minor and prevent further open() */
 	usb_deregister_dev (interface, &tower_class);
 
+	/* stop I/O */
+	usb_poison_urb(dev->interrupt_in_urb);
+	usb_poison_urb(dev->interrupt_out_urb);
+
 	mutex_lock(&dev->lock);
-	mutex_unlock(&open_disc_mutex);
 
 	/* if the device is not opened, then we clean up right now */
 	if (!dev->open_count) {
 		mutex_unlock(&dev->lock);
 		tower_delete (dev);
 	} else {
-		dev->udev = NULL;
+		dev->disconnected = 1;
 		/* wake up pollers */
 		wake_up_interruptible_all(&dev->read_wait);
 		wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index e5c03c6..10c2a71 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -181,10 +181,13 @@ static ssize_t warm_reset_store(struct device *dev,
 	struct usb_interface *intf = to_usb_interface(dev);
 	struct usb_device *hdev = interface_to_usbdev(intf);
 	struct lvs_rh *lvs = usb_get_intfdata(intf);
+	int port;
 	int ret;
 
-	ret = lvs_rh_set_port_feature(hdev, lvs->portnum,
-			USB_PORT_FEAT_BH_PORT_RESET);
+	if (kstrtoint(buf, 0, &port) || port < 1 || port > 255)
+		port = lvs->portnum;
+
+	ret = lvs_rh_set_port_feature(hdev, port, USB_PORT_FEAT_BH_PORT_RESET);
 	if (ret < 0) {
 		dev_err(dev, "can't issue warm reset %d\n", ret);
 		return ret;
@@ -296,10 +299,14 @@ static ssize_t enable_compliance_store(struct device *dev,
 	struct usb_interface *intf = to_usb_interface(dev);
 	struct usb_device *hdev = interface_to_usbdev(intf);
 	struct lvs_rh *lvs = usb_get_intfdata(intf);
+	int port;
 	int ret;
 
+	if (kstrtoint(buf, 0, &port) || port < 1 || port > 255)
+		port = lvs->portnum;
+
 	ret = lvs_rh_set_port_feature(hdev,
-			lvs->portnum | USB_SS_PORT_LS_COMP_MOD << 3,
+			port | (USB_SS_PORT_LS_COMP_MOD << 3),
 			USB_PORT_FEAT_LINK_STATE);
 	if (ret < 0) {
 		dev_err(dev, "can't enable compliance mode %d\n", ret);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644
index a32d61a..0000000
--- a/drivers/usb/misc/rio500.c
+++ /dev/null
@@ -1,561 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/* 
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- * 
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003  replaced lock/unlock kernel with up/down
- *             Daniele Bellucci  bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR	64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
-        struct usb_device *rio_dev;     /* init: probe_rio */
-        unsigned int ifnum;             /* Interface number of the USB device */
-        int isopen;                     /* nz if open */
-        int present;                    /* Device is present on the bus */
-        char *obuf, *ibuf;              /* transfer buffers */
-        char bulk_in_ep, bulk_out_ep;   /* Endpoint assignments */
-        wait_queue_head_t wait_q;       /* for timeouts */
-	struct mutex lock;          /* general race avoidance */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
-	struct rio_usb_data *rio = &rio_instance;
-
-	/* against disconnect() */
-	mutex_lock(&rio500_mutex);
-	mutex_lock(&(rio->lock));
-
-	if (rio->isopen || !rio->present) {
-		mutex_unlock(&(rio->lock));
-		mutex_unlock(&rio500_mutex);
-		return -EBUSY;
-	}
-	rio->isopen = 1;
-
-	init_waitqueue_head(&rio->wait_q);
-
-	mutex_unlock(&(rio->lock));
-
-	dev_info(&rio->rio_dev->dev, "Rio opened.\n");
-	mutex_unlock(&rio500_mutex);
-
-	return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
-	struct rio_usb_data *rio = &rio_instance;
-
-	/* against disconnect() */
-	mutex_lock(&rio500_mutex);
-	mutex_lock(&(rio->lock));
-
-	rio->isopen = 0;
-	if (!rio->present) {
-		/* cleanup has been delayed */
-		kfree(rio->ibuf);
-		kfree(rio->obuf);
-		rio->ibuf = NULL;
-		rio->obuf = NULL;
-	} else {
-		dev_info(&rio->rio_dev->dev, "Rio closed.\n");
-	}
-	mutex_unlock(&(rio->lock));
-	mutex_unlock(&rio500_mutex);
-	return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct RioCommand rio_cmd;
-	struct rio_usb_data *rio = &rio_instance;
-	void __user *data;
-	unsigned char *buffer;
-	int result, requesttype;
-	int retries;
-	int retval=0;
-
-	mutex_lock(&(rio->lock));
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		retval = -ENODEV;
-		goto err_out;
-	}
-
-	switch (cmd) {
-	case RIO_RECV_COMMAND:
-		data = (void __user *) arg;
-		if (data == NULL)
-			break;
-		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-			retval = -EFAULT;
-			goto err_out;
-		}
-		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-			retval = -EINVAL;
-			goto err_out;
-		}
-		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-		if (buffer == NULL) {
-			retval = -ENOMEM;
-			goto err_out;
-		}
-		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-			retval = -EFAULT;
-			free_page((unsigned long) buffer);
-			goto err_out;
-		}
-
-		requesttype = rio_cmd.requesttype | USB_DIR_IN |
-		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-		dev_dbg(&rio->rio_dev->dev,
-			"sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-			requesttype, rio_cmd.request, rio_cmd.value,
-			rio_cmd.index, rio_cmd.length);
-		/* Send rio control message */
-		retries = 3;
-		while (retries) {
-			result = usb_control_msg(rio->rio_dev,
-						 usb_rcvctrlpipe(rio-> rio_dev, 0),
-						 rio_cmd.request,
-						 requesttype,
-						 rio_cmd.value,
-						 rio_cmd.index, buffer,
-						 rio_cmd.length,
-						 jiffies_to_msecs(rio_cmd.timeout));
-			if (result == -ETIMEDOUT)
-				retries--;
-			else if (result < 0) {
-				dev_err(&rio->rio_dev->dev,
-					"Error executing ioctrl. code = %d\n",
-					result);
-				retries = 0;
-			} else {
-				dev_dbg(&rio->rio_dev->dev,
-					"Executed ioctl. Result = %d (data=%02x)\n",
-					result, buffer[0]);
-				if (copy_to_user(rio_cmd.buffer, buffer,
-						 rio_cmd.length)) {
-					free_page((unsigned long) buffer);
-					retval = -EFAULT;
-					goto err_out;
-				}
-				retries = 0;
-			}
-
-			/* rio_cmd.buffer contains a raw stream of single byte
-			   data which has been returned from rio.  Data is
-			   interpreted at application level.  For data that
-			   will be cast to data types longer than 1 byte, data
-			   will be little_endian and will potentially need to
-			   be swapped at the app level */
-
-		}
-		free_page((unsigned long) buffer);
-		break;
-
-	case RIO_SEND_COMMAND:
-		data = (void __user *) arg;
-		if (data == NULL)
-			break;
-		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-			retval = -EFAULT;
-			goto err_out;
-		}
-		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-			retval = -EINVAL;
-			goto err_out;
-		}
-		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-		if (buffer == NULL) {
-			retval = -ENOMEM;
-			goto err_out;
-		}
-		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-			free_page((unsigned long)buffer);
-			retval = -EFAULT;
-			goto err_out;
-		}
-
-		requesttype = rio_cmd.requesttype | USB_DIR_OUT |
-		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-		dev_dbg(&rio->rio_dev->dev,
-			"sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-			requesttype, rio_cmd.request, rio_cmd.value,
-			rio_cmd.index, rio_cmd.length);
-		/* Send rio control message */
-		retries = 3;
-		while (retries) {
-			result = usb_control_msg(rio->rio_dev,
-						 usb_sndctrlpipe(rio-> rio_dev, 0),
-						 rio_cmd.request,
-						 requesttype,
-						 rio_cmd.value,
-						 rio_cmd.index, buffer,
-						 rio_cmd.length,
-						 jiffies_to_msecs(rio_cmd.timeout));
-			if (result == -ETIMEDOUT)
-				retries--;
-			else if (result < 0) {
-				dev_err(&rio->rio_dev->dev,
-					"Error executing ioctrl. code = %d\n",
-					result);
-				retries = 0;
-			} else {
-				dev_dbg(&rio->rio_dev->dev,
-					"Executed ioctl. Result = %d\n", result);
-				retries = 0;
-
-			}
-
-		}
-		free_page((unsigned long) buffer);
-		break;
-
-	default:
-		retval = -ENOTTY;
-		break;
-	}
-
-
-err_out:
-	mutex_unlock(&(rio->lock));
-	return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
-	  size_t count, loff_t * ppos)
-{
-	DEFINE_WAIT(wait);
-	struct rio_usb_data *rio = &rio_instance;
-
-	unsigned long copy_size;
-	unsigned long bytes_written = 0;
-	unsigned int partial;
-
-	int result = 0;
-	int maxretry;
-	int errn = 0;
-	int intr;
-
-	intr = mutex_lock_interruptible(&(rio->lock));
-	if (intr)
-		return -EINTR;
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		mutex_unlock(&(rio->lock));
-		return -ENODEV;
-	}
-
-
-
-	do {
-		unsigned long thistime;
-		char *obuf = rio->obuf;
-
-		thistime = copy_size =
-		    (count >= OBUF_SIZE) ? OBUF_SIZE : count;
-		if (copy_from_user(rio->obuf, buffer, copy_size)) {
-			errn = -EFAULT;
-			goto error;
-		}
-		maxretry = 5;
-		while (thistime) {
-			if (!rio->rio_dev) {
-				errn = -ENODEV;
-				goto error;
-			}
-			if (signal_pending(current)) {
-				mutex_unlock(&(rio->lock));
-				return bytes_written ? bytes_written : -EINTR;
-			}
-
-			result = usb_bulk_msg(rio->rio_dev,
-					 usb_sndbulkpipe(rio->rio_dev, 2),
-					 obuf, thistime, &partial, 5000);
-
-			dev_dbg(&rio->rio_dev->dev,
-				"write stats: result:%d thistime:%lu partial:%u\n",
-				result, thistime, partial);
-
-			if (result == -ETIMEDOUT) {	/* NAK - so hold for a while */
-				if (!maxretry--) {
-					errn = -ETIME;
-					goto error;
-				}
-				prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-				schedule_timeout(NAK_TIMEOUT);
-				finish_wait(&rio->wait_q, &wait);
-				continue;
-			} else if (!result && partial) {
-				obuf += partial;
-				thistime -= partial;
-			} else
-				break;
-		}
-		if (result) {
-			dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
-				result);
-			errn = -EIO;
-			goto error;
-		}
-		bytes_written += copy_size;
-		count -= copy_size;
-		buffer += copy_size;
-	} while (count > 0);
-
-	mutex_unlock(&(rio->lock));
-
-	return bytes_written ? bytes_written : -EIO;
-
-error:
-	mutex_unlock(&(rio->lock));
-	return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
-	DEFINE_WAIT(wait);
-	struct rio_usb_data *rio = &rio_instance;
-	ssize_t read_count;
-	unsigned int partial;
-	int this_read;
-	int result;
-	int maxretry = 10;
-	char *ibuf;
-	int intr;
-
-	intr = mutex_lock_interruptible(&(rio->lock));
-	if (intr)
-		return -EINTR;
-	/* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-		mutex_unlock(&(rio->lock));
-		return -ENODEV;
-	}
-
-	ibuf = rio->ibuf;
-
-	read_count = 0;
-
-
-	while (count > 0) {
-		if (signal_pending(current)) {
-			mutex_unlock(&(rio->lock));
-			return read_count ? read_count : -EINTR;
-		}
-		if (!rio->rio_dev) {
-			mutex_unlock(&(rio->lock));
-			return -ENODEV;
-		}
-		this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
-		result = usb_bulk_msg(rio->rio_dev,
-				      usb_rcvbulkpipe(rio->rio_dev, 1),
-				      ibuf, this_read, &partial,
-				      8000);
-
-		dev_dbg(&rio->rio_dev->dev,
-			"read stats: result:%d this_read:%u partial:%u\n",
-			result, this_read, partial);
-
-		if (partial) {
-			count = this_read = partial;
-		} else if (result == -ETIMEDOUT || result == 15) {	/* FIXME: 15 ??? */
-			if (!maxretry--) {
-				mutex_unlock(&(rio->lock));
-				dev_err(&rio->rio_dev->dev,
-					"read_rio: maxretry timeout\n");
-				return -ETIME;
-			}
-			prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(NAK_TIMEOUT);
-			finish_wait(&rio->wait_q, &wait);
-			continue;
-		} else if (result != -EREMOTEIO) {
-			mutex_unlock(&(rio->lock));
-			dev_err(&rio->rio_dev->dev,
-				"Read Whoops - result:%d partial:%u this_read:%u\n",
-				result, partial, this_read);
-			return -EIO;
-		} else {
-			mutex_unlock(&(rio->lock));
-			return (0);
-		}
-
-		if (this_read) {
-			if (copy_to_user(buffer, ibuf, this_read)) {
-				mutex_unlock(&(rio->lock));
-				return -EFAULT;
-			}
-			count -= this_read;
-			read_count += this_read;
-			buffer += this_read;
-		}
-	}
-	mutex_unlock(&(rio->lock));
-	return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
-	.owner =	THIS_MODULE,
-	.read =		read_rio,
-	.write =	write_rio,
-	.unlocked_ioctl = ioctl_rio,
-	.open =		open_rio,
-	.release =	close_rio,
-	.llseek =	noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
-	.name =		"rio500%d",
-	.fops =		&usb_rio_fops,
-	.minor_base =	RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
-		     const struct usb_device_id *id)
-{
-	struct usb_device *dev = interface_to_usbdev(intf);
-	struct rio_usb_data *rio = &rio_instance;
-	int retval = 0;
-
-	mutex_lock(&rio500_mutex);
-	if (rio->present) {
-		dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
-		retval = -EBUSY;
-		goto bail_out;
-	} else {
-		dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-	}
-
-	retval = usb_register_dev(intf, &usb_rio_class);
-	if (retval) {
-		dev_err(&dev->dev,
-			"Not able to get a minor for this device.\n");
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-
-	rio->rio_dev = dev;
-
-	if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) {
-		dev_err(&dev->dev,
-			"probe_rio: Not enough memory for the output buffer\n");
-		usb_deregister_dev(intf, &usb_rio_class);
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-	dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf);
-
-	if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) {
-		dev_err(&dev->dev,
-			"probe_rio: Not enough memory for the input buffer\n");
-		usb_deregister_dev(intf, &usb_rio_class);
-		kfree(rio->obuf);
-		retval = -ENOMEM;
-		goto bail_out;
-	}
-	dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
-
-	mutex_init(&(rio->lock));
-
-	usb_set_intfdata (intf, rio);
-	rio->present = 1;
-bail_out:
-	mutex_unlock(&rio500_mutex);
-
-	return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
-	struct rio_usb_data *rio = usb_get_intfdata (intf);
-
-	usb_set_intfdata (intf, NULL);
-	mutex_lock(&rio500_mutex);
-	if (rio) {
-		usb_deregister_dev(intf, &usb_rio_class);
-
-		mutex_lock(&(rio->lock));
-		if (rio->isopen) {
-			rio->isopen = 0;
-			/* better let it finish - the release will do whats needed */
-			rio->rio_dev = NULL;
-			mutex_unlock(&(rio->lock));
-			mutex_unlock(&rio500_mutex);
-			return;
-		}
-		kfree(rio->ibuf);
-		kfree(rio->obuf);
-
-		dev_info(&intf->dev, "USB Rio disconnected.\n");
-
-		rio->present = 0;
-		mutex_unlock(&(rio->lock));
-	}
-	mutex_unlock(&rio500_mutex);
-}
-
-static const struct usb_device_id rio_table[] = {
-	{ USB_DEVICE(0x0841, 1) }, 		/* Rio 500 */
-	{ }					/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
-	.name =		"rio500",
-	.probe =	probe_rio,
-	.disconnect =	disconnect_rio,
-	.id_table =	rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644
index 6db7a58..0000000
--- a/drivers/usb/misc/rio500_usb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*  ----------------------------------------------------------------------
-    Copyright (C) 2000  Cesar Miquel  (miquel@df.uba.ar)
-    ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND			0x1
-#define RIO_RECV_COMMAND			0x2
-
-#define RIO_DIR_OUT               	        0x0
-#define RIO_DIR_IN				0x1
-
-struct RioCommand {
-	short length;
-	int request;
-	int requesttype;
-	int value;
-	int index;
-	void __user *buffer;
-	int timeout;
-};
diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
index 3c97e40..3d75aa1 100644
--- a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
+++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
@@ -445,8 +445,7 @@ static int ssusb_redriver_vbus_notifier(struct notifier_block *nb,
 
 	redriver->vbus_active = event;
 
-	if (redriver->vbus_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
@@ -466,8 +465,7 @@ static int ssusb_redriver_id_notifier(struct notifier_block *nb,
 
 	redriver->host_active = host_active;
 
-	if (redriver->host_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
@@ -1144,8 +1142,8 @@ static int __maybe_unused redriver_i2c_suspend(struct device *dev)
 			__func__);
 
 	/* Disable redriver chip when USB cable disconnected */
-	if ((!redriver->vbus_active)
-			&& (!redriver->host_active))
+	if (!redriver->vbus_active && !redriver->host_active &&
+	    redriver->op_mode != OP_MODE_DP)
 		ssusb_redriver_gen_dev_set(redriver, false);
 
 	flush_workqueue(redriver->redriver_wq);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9ba4a4e..aa982d3 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 
@@ -57,6 +58,8 @@ struct usb_lcd {
 							   using up all RAM */
 	struct usb_anchor	submitted;		/* URBs to wait for
 							   before suspend */
+	struct rw_semaphore	io_rwsem;
+	unsigned long		disconnected:1;
 };
 #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
 
@@ -142,6 +145,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 
 	dev = file->private_data;
 
+	down_read(&dev->io_rwsem);
+
+	if (dev->disconnected) {
+		retval = -ENODEV;
+		goto out_up_io;
+	}
+
 	/* do a blocking bulk read to get data from the device */
 	retval = usb_bulk_msg(dev->udev,
 			      usb_rcvbulkpipe(dev->udev,
@@ -158,6 +168,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 			retval = bytes_read;
 	}
 
+out_up_io:
+	up_read(&dev->io_rwsem);
+
 	return retval;
 }
 
@@ -237,11 +250,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 	if (r < 0)
 		return -EINTR;
 
+	down_read(&dev->io_rwsem);
+
+	if (dev->disconnected) {
+		retval = -ENODEV;
+		goto err_up_io;
+	}
+
 	/* create a urb, and a buffer for it, and copy the data to the urb */
 	urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!urb) {
 		retval = -ENOMEM;
-		goto err_no_buf;
+		goto err_up_io;
 	}
 
 	buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +298,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 	   the USB core will eventually free it entirely */
 	usb_free_urb(urb);
 
+	up_read(&dev->io_rwsem);
 exit:
 	return count;
 error_unanchor:
@@ -285,7 +306,8 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
 error:
 	usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
 	usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+	up_read(&dev->io_rwsem);
 	up(&dev->limit_sem);
 	return retval;
 }
@@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface,
 
 	kref_init(&dev->kref);
 	sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+	init_rwsem(&dev->io_rwsem);
 	init_usb_anchor(&dev->submitted);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -422,6 +445,12 @@ static void lcd_disconnect(struct usb_interface *interface)
 	/* give back our minor */
 	usb_deregister_dev(interface, &lcd_class);
 
+	down_write(&dev->io_rwsem);
+	dev->disconnected = 1;
+	up_write(&dev->io_rwsem);
+
+	usb_kill_anchored_urbs(&dev->submitted);
+
 	/* decrement our usage count */
 	kref_put(&dev->kref, lcd_delete);
 
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6715a12..be0505b 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -60,6 +60,7 @@ struct usb_yurex {
 
 	struct kref		kref;
 	struct mutex		io_mutex;
+	unsigned long		disconnected:1;
 	struct fasync_struct	*async_queue;
 	wait_queue_head_t	waitq;
 
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
 				dev->int_buffer, dev->urb->transfer_dma);
 		usb_free_urb(dev->urb);
 	}
+	usb_put_intf(dev->interface);
 	usb_put_dev(dev->udev);
 	kfree(dev);
 }
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
 	switch (status) {
 	case 0: /*success*/
 		break;
+	/* The device is terminated or messed up, give up */
 	case -EOVERFLOW:
 		dev_err(&dev->interface->dev,
 			"%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
 	case -ENOENT:
 	case -ESHUTDOWN:
 	case -EILSEQ:
-		/* The device is terminated, clean up */
+	case -EPROTO:
+	case -ETIME:
 		return;
 	default:
 		dev_err(&dev->interface->dev,
 			"%s - unknown status received: %d\n", __func__, status);
-		goto exit;
+		return;
 	}
 
 	/* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
 		break;
 	}
 
-exit:
 	retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
 	if (retval) {
 		dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
 	init_waitqueue_head(&dev->waitq);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	/* set up the endpoint information */
 	iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
 
 	/* prevent more I/O from starting */
 	usb_poison_urb(dev->urb);
+	usb_poison_urb(dev->cntl_urb);
 	mutex_lock(&dev->io_mutex);
-	dev->interface = NULL;
+	dev->disconnected = 1;
 	mutex_unlock(&dev->io_mutex);
 
 	/* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 	dev = file->private_data;
 
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* already disconnected */
+	if (dev->disconnected) {		/* already disconnected */
 		mutex_unlock(&dev->io_mutex);
 		return -ENODEV;
 	}
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
 		goto error;
 
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* already disconnected */
+	if (dev->disconnected) {		/* already disconnected */
 		mutex_unlock(&dev->io_mutex);
 		retval = -ENODEV;
 		goto error;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f3ff59d..df24403 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -442,7 +442,6 @@ struct usbpd {
 	struct regulator	*vconn;
 	bool			vbus_enabled;
 	bool			vconn_enabled;
-	bool			vconn_is_external;
 
 	u8			tx_msgid[SOPII_MSG + 1];
 	u8			rx_msgid[SOPII_MSG + 1];
@@ -516,6 +515,21 @@ enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
 }
 EXPORT_SYMBOL(usbpd_get_plug_orientation);
 
+static unsigned int get_connector_type(struct usbpd *pd)
+{
+	int ret;
+	union power_supply_propval val;
+
+	ret = power_supply_get_property(pd->usb_psy,
+		POWER_SUPPLY_PROP_CONNECTOR_TYPE, &val);
+	if (ret) {
+		dev_err(&pd->dev, "Unable to read CONNECTOR TYPE: %d\n", ret);
+		return ret;
+	}
+
+	return val.intval;
+}
+
 static inline void stop_usb_host(struct usbpd *pd)
 {
 	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
@@ -831,11 +845,6 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
 		return -ENOTSUPP;
 	}
 
-	/* Can't sink more than 5V if VCONN is sourced from the VBUS input */
-	if (pd->vconn_enabled && !pd->vconn_is_external &&
-			pd->requested_voltage > 5000000)
-		return -ENOTSUPP;
-
 	pd->requested_current = curr;
 	pd->requested_pdo = pdo_pos;
 
@@ -899,6 +908,7 @@ static void pd_send_hard_reset(struct usbpd *pd)
 	pd->hard_reset_count++;
 	pd_phy_signal(HARD_RESET_SIG);
 	pd->in_pr_swap = false;
+	pd->pd_connected = false;
 	power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
 }
 
@@ -1295,7 +1305,9 @@ int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
 
 		for (i = 0; i < pd->num_svids; i++) {
 			if (pd->discovered_svids[i] == hdlr->svid) {
-				hdlr->connect(hdlr);
+				usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x disconnect\n",
+						hdlr->svid);
+				hdlr->connect(hdlr, pd->peer_usb_comm);
 				hdlr->discovered = true;
 				break;
 			}
@@ -1491,7 +1503,10 @@ static void handle_vdm_resp_ack(struct usbpd *pd, u32 *vdos, u8 num_vdos,
 			if (svid) {
 				handler = find_svid_handler(pd, svid);
 				if (handler) {
-					handler->connect(handler);
+					usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x disconnect\n",
+							handler->svid);
+					handler->connect(handler,
+							pd->peer_usb_comm);
 					handler->discovered = true;
 				}
 			}
@@ -1634,8 +1649,11 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 	u32 vdm_hdr;
 	int ret;
 
-	if (!pd->vdm_tx)
+	mutex_lock(&pd->svid_handler_lock);
+	if (!pd->vdm_tx) {
+		mutex_unlock(&pd->svid_handler_lock);
 		return;
+	}
 
 	/* only send one VDM at a time */
 	vdm_hdr = pd->vdm_tx->data[0];
@@ -1649,6 +1667,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 		pd->current_pr == PR_SRC && !in_src_ams(pd)) {
 		/* Set SinkTxNG and reschedule sm_work to send again */
 		start_src_ams(pd, true);
+		mutex_unlock(&pd->svid_handler_lock);
 		return;
 	}
 
@@ -1658,6 +1677,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 		usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
 				ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
 
+		mutex_unlock(&pd->svid_handler_lock);
 		/* retry when hitting PE_SRC/SNK_Ready again */
 		if (ret != -EBUSY && sop_type == SOP_MSG)
 			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -1690,6 +1710,7 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
 	}
 
 	pd->vdm_tx = NULL;
+	mutex_unlock(&pd->svid_handler_lock);
 }
 
 static void reset_vdm_state(struct usbpd *pd)
@@ -1699,12 +1720,13 @@ static void reset_vdm_state(struct usbpd *pd)
 	mutex_lock(&pd->svid_handler_lock);
 	list_for_each_entry(handler, &pd->svid_handlers, entry) {
 		if (handler->discovered) {
+			usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x disconnect\n",
+					handler->svid);
 			handler->disconnect(handler);
 			handler->discovered = false;
 		}
 	}
 
-	mutex_unlock(&pd->svid_handler_lock);
 	pd->vdm_state = VDM_NONE;
 	kfree(pd->vdm_tx_retry);
 	pd->vdm_tx_retry = NULL;
@@ -1715,6 +1737,7 @@ static void reset_vdm_state(struct usbpd *pd)
 	pd->vdm_tx = NULL;
 	pd->ss_lane_svid = 0x0;
 	pd->vdm_in_suspend = false;
+	mutex_unlock(&pd->svid_handler_lock);
 }
 
 static void handle_get_src_cap_extended(struct usbpd *pd)
@@ -2213,6 +2236,10 @@ static void handle_state_src_send_capabilities(struct usbpd *pd,
 	ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
 			ARRAY_SIZE(default_src_caps), SOP_MSG);
 	if (ret) {
+		if (pd->pd_connected) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
 		/*
 		 * Technically this is PE_SRC_Discovery, but we can
 		 * handle it by setting a timer to come back to the
@@ -2850,21 +2877,6 @@ static bool handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
 		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 		break;
 	case MSG_VCONN_SWAP:
-		/*
-		 * if VCONN is connected to VBUS, make sure we are
-		 * not in high voltage contract, otherwise reject.
-		 */
-		if (!pd->vconn_is_external &&
-				(pd->requested_voltage > 5000000)) {
-			ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
-					SOP_MSG);
-			if (ret)
-				usbpd_set_state(pd,
-						PE_SEND_SOFT_RESET);
-
-			break;
-		}
-
 		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
 		if (ret) {
 			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -4673,9 +4685,6 @@ struct usbpd *usbpd_create(struct device *parent)
 	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
 			EXTCON_PROP_USB_SS);
 
-	pd->vconn_is_external = device_property_present(parent,
-					"qcom,vconn-uses-external-source");
-
 	pd->num_sink_caps = device_property_read_u32_array(parent,
 			"qcom,default-sink-caps", NULL, 0);
 	if (pd->num_sink_caps > 0) {
@@ -4727,10 +4736,17 @@ struct usbpd *usbpd_create(struct device *parent)
 	pd->typec_caps.port_type_set = usbpd_typec_port_type_set;
 	pd->partner_desc.identity = &pd->partner_identity;
 
-	pd->typec_port = typec_register_port(parent, &pd->typec_caps);
-	if (IS_ERR(pd->typec_port)) {
-		usbpd_err(&pd->dev, "could not register typec port\n");
+	ret = get_connector_type(pd);
+	if (ret < 0)
 		goto put_psy;
+
+	/* For non-TypeC connector, it will be handled elsewhere */
+	if (ret != POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+		pd->typec_port = typec_register_port(parent, &pd->typec_caps);
+		if (IS_ERR(pd->typec_port)) {
+			usbpd_err(&pd->dev, "could not register typec port\n");
+			goto put_psy;
+		}
 	}
 
 	pd->current_pr = PR_NONE;
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 44cd6ec..ce1afb0 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/debugfs.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regulator/consumer.h>
@@ -91,30 +92,6 @@
 
 #define HSTX_TRIMSIZE			4
 
-static unsigned int tune1;
-module_param(tune1, uint, 0644);
-MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
-
-static unsigned int tune2;
-module_param(tune2, uint, 0644);
-MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
-
-static unsigned int tune3;
-module_param(tune3, uint, 0644);
-MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
-
-static unsigned int tune4;
-module_param(tune4, uint, 0644);
-MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
-
-static unsigned int tune5;
-module_param(tune5, uint, 0644);
-MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
-
-static bool eud_connected;
-module_param(eud_connected, bool, 0644);
-MODULE_PARM_DESC(eud_connected, "EUD_CONNECTED");
-
 struct qusb_phy {
 	struct usb_phy		phy;
 	void __iomem		*base;
@@ -122,6 +99,7 @@ struct qusb_phy {
 	void __iomem		*ref_clk_base;
 	void __iomem		*tcsr_clamp_dig_n;
 	void __iomem		*tcsr_conn_box_spare;
+	void __iomem		*eud_enable_reg;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
@@ -155,17 +133,16 @@ struct qusb_phy {
 	struct regulator_desc	dpdm_rdesc;
 	struct regulator_dev	*dpdm_rdev;
 
-	/* emulation targets specific */
-	void __iomem		*emu_phy_base;
-	bool			emulation;
-	int			*emu_init_seq;
-	int			emu_init_seq_len;
-	int			*phy_pll_reset_seq;
-	int			phy_pll_reset_seq_len;
-	int			*emu_dcm_reset_seq;
-	int			emu_dcm_reset_seq_len;
 	bool			put_into_high_z_state;
 	struct mutex		phy_lock;
+
+	/* debugfs entries */
+	struct dentry		*root;
+	u8			tune1;
+	u8			tune2;
+	u8			tune3;
+	u8			tune4;
+	u8			tune5;
 };
 
 static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -426,21 +403,13 @@ static int qusb_phy_init(struct usb_phy *phy)
 	u8 reg;
 	bool pll_lock_fail = false;
 
-	/*
-	 * If eud is enabled eud_connected parameter will be set to true.
-	 * Phy init will be called when spoof attach is done but it will
-	 * break the eud functionality by powering down the phy and
-	 * re-initializing it. Hence, bail out early from phy init when
-	 * eud_connected param is set to true.
-	 */
-	if (eud_connected) {
-		pr_debug("eud_connected is true so bailing out early from %s\n",
-				__func__);
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
+		dev_err(qphy->phy.dev, "eud is enabled\n");
 		return 0;
 	}
 
-	dev_dbg(phy->dev, "%s\n", __func__);
-
 	/*
 	 * ref clock is enabled by default after power on reset. Linux clock
 	 * driver will disable this clock as part of late init if peripheral
@@ -467,30 +436,6 @@ static int qusb_phy_init(struct usb_phy *phy)
 	if (ret)
 		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
 
-	if (qphy->emulation) {
-		if (qphy->emu_init_seq)
-			qusb_phy_write_seq(qphy->emu_phy_base,
-				qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
-
-		if (qphy->qusb_phy_init_seq)
-			qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
-					qphy->init_seq_len, 0);
-
-		/* Wait for 5ms as per QUSB2 RUMI sequence */
-		usleep_range(5000, 7000);
-
-		if (qphy->phy_pll_reset_seq)
-			qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
-					qphy->phy_pll_reset_seq_len, 10000);
-
-		if (qphy->emu_dcm_reset_seq)
-			qusb_phy_write_seq(qphy->emu_phy_base,
-					qphy->emu_dcm_reset_seq,
-					qphy->emu_dcm_reset_seq_len, 10000);
-
-		return 0;
-	}
-
 	/* Disable the PHY */
 	if (qphy->major_rev < 2)
 		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
@@ -517,7 +462,7 @@ static int qusb_phy_init(struct usb_phy *phy)
 	 * and try to read EFUSE value only once i.e. not every USB
 	 * cable connect case.
 	 */
-	if (qphy->tune2_efuse_reg && !tune2) {
+	if (qphy->tune2_efuse_reg && !qphy->tune2) {
 		if (!qphy->tune2_val)
 			qusb_phy_get_tune2_param(qphy);
 
@@ -528,28 +473,30 @@ static int qusb_phy_init(struct usb_phy *phy)
 	}
 
 	/* If tune modparam set, override tune value */
-
-	pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
-				__func__, tune1, tune2, tune3, tune4, tune5);
-	if (tune1)
-		writel_relaxed(tune1,
+	if (qphy->tune1) {
+		writel_relaxed(qphy->tune1,
 				qphy->base + QUSB2PHY_PORT_TUNE1);
+	}
 
-	if (tune2)
-		writel_relaxed(tune2,
+	if (qphy->tune2) {
+		writel_relaxed(qphy->tune2,
 				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
 
-	if (tune3)
-		writel_relaxed(tune3,
+	if (qphy->tune3) {
+		writel_relaxed(qphy->tune3,
 				qphy->base + QUSB2PHY_PORT_TUNE3);
+	}
 
-	if (tune4)
-		writel_relaxed(tune4,
+	if (qphy->tune4) {
+		writel_relaxed(qphy->tune4,
 				qphy->base + QUSB2PHY_PORT_TUNE4);
+	}
 
-	if (tune5)
-		writel_relaxed(tune5,
+	if (qphy->tune5) {
+		writel_relaxed(qphy->tune5,
 				qphy->base + QUSB2PHY_PORT_TUNE5);
+	}
 
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
@@ -700,19 +647,15 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
-			if (!eud_connected) {
-				/* Disable PHY */
-				writel_relaxed(POWER_DOWN |
-					readl_relaxed(qphy->base +
-						QUSB2PHY_PORT_POWERDOWN),
+			/* Disable PHY */
+			writel_relaxed(POWER_DOWN | readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_POWERDOWN),
 					qphy->base + QUSB2PHY_PORT_POWERDOWN);
-				/* Make sure that above write is completed */
-				wmb();
+			/* Make sure that above write is completed */
+			wmb();
 
-				if (qphy->tcsr_clamp_dig_n)
-					writel_relaxed(0x0,
-						qphy->tcsr_clamp_dig_n);
-			}
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
 
 			qusb_phy_enable_clocks(qphy, false);
 			qusb_phy_enable_power(qphy, false);
@@ -780,6 +723,11 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
 	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
 				__func__, qphy->dpdm_enable);
 
+	if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
+		dev_err(qphy->phy.dev, "eud is enabled\n");
+		return 0;
+	}
+
 	mutex_lock(&qphy->phy_lock);
 	if (!qphy->dpdm_enable) {
 		ret = qusb_phy_enable_power(qphy, true);
@@ -910,6 +858,16 @@ static int qusb_phy_regulator_init(struct qusb_phy *qphy)
 
 }
 
+static void qusb_phy_create_debugfs(struct qusb_phy *qphy)
+{
+	qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
+	debugfs_create_x8("tune1", 0644, qphy->root, &qphy->tune1);
+	debugfs_create_x8("tune2", 0644, qphy->root, &qphy->tune2);
+	debugfs_create_x8("tune3", 0644, qphy->root, &qphy->tune3);
+	debugfs_create_x8("tune4", 0644, qphy->root, &qphy->tune4);
+	debugfs_create_x8("tune5", 0644, qphy->root, &qphy->tune5);
+}
+
 static int qusb_phy_probe(struct platform_device *pdev)
 {
 	struct qusb_phy *qphy;
@@ -932,16 +890,6 @@ static int qusb_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(qphy->base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-							"emu_phy_base");
-	if (res) {
-		qphy->emu_phy_base = devm_ioremap_resource(dev, res);
-		if (IS_ERR(qphy->emu_phy_base)) {
-			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
-			qphy->emu_phy_base = NULL;
-		}
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 							"tune2_efuse_addr");
 	if (res) {
 		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
@@ -967,6 +915,16 @@ static int qusb_phy_probe(struct platform_device *pdev)
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"eud_enable_reg");
+	if (res) {
+		qphy->eud_enable_reg = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->eud_enable_reg)) {
+			dev_err(dev, "err getting eud_enable_reg address\n");
+			return PTR_ERR(qphy->eud_enable_reg);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 							"ref_clk_addr");
 	if (res) {
 		qphy->ref_clk_base = devm_ioremap_nocache(dev,
@@ -1085,74 +1043,6 @@ static int qusb_phy_probe(struct platform_device *pdev)
 	if (IS_ERR(qphy->gdsc))
 		qphy->gdsc = NULL;
 
-	qphy->emulation = of_property_read_bool(dev->of_node,
-					"qcom,emulation");
-
-	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
-	if (size) {
-		qphy->emu_init_seq = devm_kzalloc(dev,
-						size, GFP_KERNEL);
-		if (qphy->emu_init_seq) {
-			qphy->emu_init_seq_len =
-				(size / sizeof(*qphy->emu_init_seq));
-			if (qphy->emu_init_seq_len % 2) {
-				dev_err(dev, "invalid emu_init_seq_len\n");
-				return -EINVAL;
-			}
-
-			of_property_read_u32_array(dev->of_node,
-				"qcom,emu-init-seq",
-				qphy->emu_init_seq,
-				qphy->emu_init_seq_len);
-		} else {
-			dev_dbg(dev, "error allocating memory for emu_init_seq\n");
-		}
-	}
-
-	size = 0;
-	of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
-	if (size) {
-		qphy->phy_pll_reset_seq = devm_kzalloc(dev,
-						size, GFP_KERNEL);
-		if (qphy->phy_pll_reset_seq) {
-			qphy->phy_pll_reset_seq_len =
-				(size / sizeof(*qphy->phy_pll_reset_seq));
-			if (qphy->phy_pll_reset_seq_len % 2) {
-				dev_err(dev, "invalid phy_pll_reset_seq_len\n");
-				return -EINVAL;
-			}
-
-			of_property_read_u32_array(dev->of_node,
-				"qcom,phy-pll-reset-seq",
-				qphy->phy_pll_reset_seq,
-				qphy->phy_pll_reset_seq_len);
-		} else {
-			dev_dbg(dev, "error allocating memory for phy_pll_reset_seq\n");
-		}
-	}
-
-	size = 0;
-	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
-	if (size) {
-		qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
-						size, GFP_KERNEL);
-		if (qphy->emu_dcm_reset_seq) {
-			qphy->emu_dcm_reset_seq_len =
-				(size / sizeof(*qphy->emu_dcm_reset_seq));
-			if (qphy->emu_dcm_reset_seq_len % 2) {
-				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
-				return -EINVAL;
-			}
-
-			of_property_read_u32_array(dev->of_node,
-				"qcom,emu-dcm-reset-seq",
-				qphy->emu_dcm_reset_seq,
-				qphy->emu_dcm_reset_seq_len);
-		} else {
-			dev_dbg(dev, "error allocating memory for emu_dcm_reset_seq\n");
-		}
-	}
-
 	size = 0;
 	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
 	if (size) {
@@ -1266,6 +1156,8 @@ static int qusb_phy_probe(struct platform_device *pdev)
 
 	qphy->suspended = true;
 
+	qusb_phy_create_debugfs(qphy);
+
 	return ret;
 }
 
@@ -1273,6 +1165,7 @@ static int qusb_phy_remove(struct platform_device *pdev)
 {
 	struct qusb_phy *qphy = platform_get_drvdata(pdev);
 
+	debugfs_remove_recursive(qphy->root);
 	usb_remove_phy(&qphy->phy);
 	qphy->cable_connected = false;
 	qusb_phy_set_suspend(&qphy->phy, true);
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 594c8ef..974231c 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -25,6 +25,11 @@
 #define OPMODE_MASK				(0x3 << 3)
 #define OPMODE_NONDRIVING			(0x1 << 3)
 #define SLEEPM					BIT(0)
+#define OPMODE_NORMAL				(0x00)
+#define TERMSEL					BIT(5)
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL1		(0x40)
+#define XCVRSEL					BIT(0)
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL5		(0x50)
 #define POR					BIT(1)
@@ -516,6 +521,62 @@ static int msm_hsphy_notify_disconnect(struct usb_phy *uphy,
 	return 0;
 }
 
+static int msm_hsphy_drive_dp_pulse(struct usb_phy *uphy,
+					unsigned int interval_ms)
+{
+	struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+	int ret;
+
+	ret = msm_hsphy_enable_power(phy, true);
+	if (ret < 0) {
+		dev_dbg(uphy->dev,
+			"dpdm regulator enable failed:%d\n", ret);
+		return ret;
+	}
+	msm_hsphy_enable_clocks(phy, true);
+	/* set utmi_phy_cmn_cntrl_override_en &
+	 * utmi_phy_datapath_ctrl_override_en
+	 */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN,
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
+				UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN,
+				UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN);
+	/* set opmode to normal i.e. 0x0 & termsel to fs */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+				OPMODE_MASK, OPMODE_NORMAL);
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+				TERMSEL, TERMSEL);
+	/* set xcvrsel to fs */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL1,
+					XCVRSEL, XCVRSEL);
+	msleep(interval_ms);
+	/* clear termsel to fs */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+				TERMSEL, 0x00);
+	/* clear xcvrsel */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL1,
+					XCVRSEL, 0x00);
+	/* clear utmi_phy_cmn_cntrl_override_en &
+	 * utmi_phy_datapath_ctrl_override_en
+	 */
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0x00);
+	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
+				UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN, 0x00);
+
+	msleep(20);
+
+	msm_hsphy_enable_clocks(phy, false);
+	ret = msm_hsphy_enable_power(phy, false);
+	if (ret < 0) {
+		dev_dbg(uphy->dev,
+			"dpdm regulator disable failed:%d\n", ret);
+	}
+	return 0;
+}
+
 static int msm_hsphy_dpdm_regulator_enable(struct regulator_dev *rdev)
 {
 	int ret = 0;
@@ -788,6 +849,7 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 	phy->phy.notify_connect		= msm_hsphy_notify_connect;
 	phy->phy.notify_disconnect	= msm_hsphy_notify_disconnect;
 	phy->phy.type			= USB_PHY_TYPE_USB2;
+	phy->phy.drive_dp_pulse		= msm_hsphy_drive_dp_pulse;
 
 	ret = usb_add_phy_dev(&phy->phy);
 	if (ret)
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 1deae76..4618752 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -16,7 +16,6 @@
 #include <linux/clk.h>
 #include <linux/extcon.h>
 #include <linux/reset.h>
-#include <linux/hrtimer.h>
 
 enum core_ldo_levels {
 	CORE_LEVEL_NONE = 0,
@@ -73,9 +72,6 @@ enum core_ldo_levels {
 #define DP_MODE			BIT(1) /* enables DP mode */
 #define USB3_DP_COMBO_MODE	(USB3_MODE | DP_MODE) /*enables combo mode */
 
-/* USB3 Gen2 link training indicator */
-#define RX_EQUALIZATION_IN_PROGRESS	BIT(3)
-
 enum qmp_phy_rev_reg {
 	USB3_PHY_PCS_STATUS,
 	USB3_PHY_AUTONOMOUS_MODE_CTRL,
@@ -143,7 +139,6 @@ struct msm_ssphy_qmp {
 	int			reg_offset_cnt;
 	u32			*qmp_phy_init_seq;
 	int			init_seq_len;
-	struct hrtimer		timer;
 };
 
 static const struct of_device_id msm_usb_id_table[] = {
@@ -684,7 +679,9 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
 		if (phy->cable_connected) {
 			msm_ssusb_qmp_enable_autonomous(phy, 1);
 		} else {
-			if (uphy->type  == USB_PHY_TYPE_USB3_AND_DP)
+			/* Reset phy mode to USB only if DP not connected */
+			if (uphy->type  == USB_PHY_TYPE_USB3_AND_DP &&
+				!(phy->phy.flags & PHY_USB_DP_CONCURRENT_MODE))
 				msm_ssphy_qmp_setmode(phy, USB3_MODE);
 			writel_relaxed(0x00,
 			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -693,7 +690,6 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
 		/* Make sure above write completed with PHY */
 		wmb();
 
-		hrtimer_cancel(&phy->timer);
 		msm_ssphy_qmp_enable_clks(phy, false);
 		phy->in_suspend = true;
 		msm_ssphy_power_enable(phy, 0);
@@ -718,74 +714,6 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
 	return 0;
 }
 
-static enum hrtimer_restart timer_fn(struct hrtimer *timer)
-{
-	struct msm_ssphy_qmp *phy =
-		container_of(timer, struct msm_ssphy_qmp, timer);
-	u8 status2, status2_1, sw1, mx1, sw2, mx2;
-	int timeout = 15000;
-
-	status2_1 = sw1 = sw2 = mx1 = mx2 = 0;
-
-	status2 = readl_relaxed(phy->base +
-			phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]);
-	if (status2 & RX_EQUALIZATION_IN_PROGRESS) {
-		while (timeout > 0) {
-			status2_1 = readl_relaxed(phy->base +
-					phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]);
-			if (status2_1 & RX_EQUALIZATION_IN_PROGRESS) {
-				timeout -= 500;
-				udelay(500);
-				continue;
-			}
-
-			writel_relaxed(0x08, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			writel_relaxed(0x08, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			sw1 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			mx1 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			udelay(1);
-			writel_relaxed(0x0, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			writel_relaxed(0x0, phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-			sw2 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]);
-			mx2 = readl_relaxed(phy->base +
-				phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]);
-
-			break;
-		}
-	}
-
-	dev_dbg(phy->phy.dev,
-		"st=%x st2=%x sw1=%x sw2=%x mx1=%x mx2=%x timeout=%d\n",
-		status2, status2_1, sw1, sw2, mx1, mx2, timeout);
-
-	hrtimer_forward_now(timer, ms_to_ktime(1));
-
-	return HRTIMER_RESTART;
-}
-
-static int msm_ssphy_qmp_link_training(struct usb_phy *uphy, bool start)
-{
-	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
-					phy);
-
-	if (start) {
-		hrtimer_start(&phy->timer, 0, HRTIMER_MODE_REL);
-		dev_dbg(uphy->dev, "link training start\n");
-	} else {
-		hrtimer_cancel(&phy->timer);
-		dev_dbg(uphy->dev, "link training stop\n");
-	}
-
-	return 0;
-}
-
 static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy,
 				       enum usb_device_speed speed)
 {
@@ -808,7 +736,6 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
 		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 	readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 
-	hrtimer_cancel(&phy->timer);
 	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
 	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
 	phy->cable_connected = false;
@@ -1179,19 +1106,12 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
 		phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
 
-	hrtimer_init(&phy->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	phy->timer.function = timer_fn;
-
 	phy->phy.dev			= dev;
 	phy->phy.init			= msm_ssphy_qmp_init;
 	phy->phy.set_suspend		= msm_ssphy_qmp_set_suspend;
 	phy->phy.notify_connect		= msm_ssphy_qmp_notify_connect;
 	phy->phy.notify_disconnect	= msm_ssphy_qmp_notify_disconnect;
 
-	if (of_property_read_bool(dev->of_node, "qcom,link-training-reset"))
-		phy->phy.link_training	= msm_ssphy_qmp_link_training;
-
-
 	if (phy->phy.type == USB_PHY_TYPE_USB3_AND_DP)
 		phy->phy.reset		= msm_ssphy_qmp_dp_combo_reset;
 	else
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 6137f79..c47b721 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -207,6 +207,7 @@ struct usbhs_priv;
 /* DCPCTR */
 #define BSTS		(1 << 15)	/* Buffer Status */
 #define SUREQ		(1 << 14)	/* Sending SETUP Token */
+#define INBUFM		(1 << 14)	/* (PIPEnCTR) Transfer Buffer Monitor */
 #define CSSTS		(1 << 12)	/* CSSTS Status */
 #define	ACLRM		(1 << 9)	/* Buffer Auto-Clear Mode */
 #define SQCLR		(1 << 8)	/* Toggle Bit Clear */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 6036cba..aeb53ec 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
 	list_del_init(&pkt->node);
 }
 
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
 	return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 88d1816..c3d3cc3 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
 		    void *buf, int len, int zero, int sequence);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
 void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
 
 #endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 59cac40..7feac41 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -721,8 +721,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 	struct device *dev = usbhsg_gpriv_to_dev(gpriv);
 	unsigned long flags;
-
-	usbhsg_pipe_disable(uep);
+	int ret = 0;
 
 	dev_dbg(dev, "set halt %d (pipe %d)\n",
 		halt, usbhs_pipe_number(pipe));
@@ -730,6 +729,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	/********************  spin lock ********************/
 	usbhs_lock(priv, flags);
 
+	/*
+	 * According to usb_ep_set_halt()'s description, this function should
+	 * return -EAGAIN if the IN endpoint has any queue or data. Note
+	 * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+	 * IN endpoint in the gadget mode.
+	 */
+	if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+	    usbhs_pipe_contains_transmittable_data(pipe))) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
 	if (halt)
 		usbhs_pipe_stall(pipe);
 	else
@@ -740,10 +751,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
 	else
 		usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
 
+out:
 	usbhs_unlock(priv, flags);
 	/********************  spin unlock ******************/
 
-	return 0;
+	return ret;
 }
 
 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c4922b9..9e5afdd 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
 	return -EBUSY;
 }
 
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+	u16 val;
+
+	/* Do not support for DCP pipe */
+	if (usbhs_pipe_is_dcp(pipe))
+		return false;
+
+	val = usbhsp_pipectrl_get(pipe);
+	if (val & INBUFM)
+		return true;
+
+	return false;
+}
+
 /*
  *		PID ctrl
  */
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3080423..3b13052 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
 void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
 				       int needs_bfre, int bfre_enable);
 int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
 void usbhs_pipe_enable(struct usbhs_pipe *pipe);
 void usbhs_pipe_disable(struct usbhs_pipe *pipe);
 void usbhs_pipe_stall(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e18735e..f06706e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1020,6 +1020,9 @@ static const struct usb_device_id id_table_combined[] = {
 	/* EZPrototypes devices */
 	{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
 	{ USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+	/* Sienna devices */
+	{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+	{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f12d806..22d6621 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,9 @@
 
 #define FTDI_LUMEL_PD12_PID	0x6002
 
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID		0x8348
+
 /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
 #define CYBER_CORTEX_AV_PID	0x8698
 
@@ -689,6 +692,12 @@
 #define BANDB_ZZ_PROG1_USB_PID	0xBA02
 
 /*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID		0x0920
+#define ECHELON_U20_PID		0x7500
+
+/*
  * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
  */
 #define INTREPID_VID		0x093C
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index d34779f..e66a59e 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
 
 	ep_desc = find_ep(serial, endpoint);
 	if (!ep_desc) {
-		/* leak the urb, something's wrong and the callers don't care */
-		return urb;
+		usb_free_urb(urb);
+		return NULL;
 	}
 	if (usb_endpoint_xfer_int(ep_desc)) {
 		ep_type_name = "INT";
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e0a4749..3cc659a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_PH8_AUDIO		0x0083
 #define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
+#define CINTERION_PRODUCT_CLS8			0x00b0
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID			0x0b3c
@@ -968,6 +969,11 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
+	/* Motorola devices */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) },	/* mdm6600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) },	/* mdm9600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) },	/* mdm ram dl */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) },	/* mdm qc dl */
 
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1149,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
 	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff),	/* Telit FN980 (rmnet) */
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff),	/* Telit FN980 (MBIM) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff),	/* Telit FN980 (RNDIS) */
+	  .driver_info = NCTRL(2) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),	/* Telit FN980 (ECM) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1549,6 +1563,7 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
 	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1841,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+	  .driver_info = RSVD(0) | RSVD(4) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
@@ -1952,11 +1969,15 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff),			/* D-Link DWM-222 A2 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },	/* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/A3 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),			/* Olicard 600 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff),			/* BroadMobi BM818 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },			/* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e3c5832..c9201e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
 	struct ti_port *tport;
 	int port_number;
 	int status;
-	int do_unlock;
 	unsigned long flags;
 
 	tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
 			"%s - cannot send close port command, %d\n"
 							, __func__, status);
 
-	/* if mutex_lock is interrupted, continue anyway */
-	do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
+	mutex_lock(&tdev->td_open_close_lock);
 	--tport->tp_tdev->td_open_port_count;
-	if (tport->tp_tdev->td_open_port_count <= 0) {
+	if (tport->tp_tdev->td_open_port_count == 0) {
 		/* last port is closed, shut down interrupt urb */
 		usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-		tport->tp_tdev->td_open_port_count = 0;
 	}
-	if (do_unlock)
-		mutex_unlock(&tdev->td_open_close_lock);
+	mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f7aaa7f..4341537 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -311,10 +311,7 @@ static void serial_cleanup(struct tty_struct *tty)
 	serial = port->serial;
 	owner = serial->type->driver.owner;
 
-	mutex_lock(&serial->disc_mutex);
-	if (!serial->disconnected)
-		usb_autopm_put_interface(serial->interface);
-	mutex_unlock(&serial->disc_mutex);
+	usb_autopm_put_interface(serial->interface);
 
 	usb_serial_put(serial);
 	module_put(owner);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index cc794e2..1d9ce9cb 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
 
 static int auto_delink_en = 1;
 module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
 
 #ifdef CONFIG_REALTEK_AUTOPM
 static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
 			goto INIT_FAIL;
 	}
 
-	if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
-	    CHECK_FW_VER(chip, 0x5901))
-		SET_AUTO_DELINK(chip);
-	if (STATUS_LEN(chip) == 16) {
-		if (SUPPORT_AUTO_DELINK(chip))
+	if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+	    CHECK_PID(chip, 0x0159)) {
+		if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+				CHECK_FW_VER(chip, 0x5901))
 			SET_AUTO_DELINK(chip);
+		if (STATUS_LEN(chip) == 16) {
+			if (SUPPORT_AUTO_DELINK(chip))
+				SET_AUTO_DELINK(chip);
+		}
 	}
 #ifdef CONFIG_REALTEK_AUTOPM
 	if (ss_en)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ea0d27a..1cd9b630 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
 		US_FL_IGNORE_RESIDUE ),
 
 /* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0117,
 		"JMicron",
 		"USB to ATA/ATAPI Bridge",
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 5f29ce8..819ae3b 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -37,6 +37,7 @@
 	S(SRC_ATTACHED),			\
 	S(SRC_STARTUP),				\
 	S(SRC_SEND_CAPABILITIES),		\
+	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
 	S(SRC_NEGOTIATE_CAPABILITIES),		\
 	S(SRC_TRANSITION_SUPPLY),		\
 	S(SRC_READY),				\
@@ -1445,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
 				else if ((pdo_min_voltage(pdo[i]) ==
 					  pdo_min_voltage(pdo[i - 1])) &&
 					 (pdo_max_voltage(pdo[i]) ==
-					  pdo_min_voltage(pdo[i - 1])))
+					  pdo_max_voltage(pdo[i - 1])))
 					return PDO_ERR_DUPE_PDO;
 				break;
 			/*
@@ -2987,10 +2988,34 @@ static void run_state_machine(struct tcpm_port *port)
 			/* port->hard_reset_count = 0; */
 			port->caps_count = 0;
 			port->pd_capable = true;
-			tcpm_set_state_cond(port, hard_reset_state(port),
+			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
 					    PD_T_SEND_SOURCE_CAP);
 		}
 		break;
+	case SRC_SEND_CAPABILITIES_TIMEOUT:
+		/*
+		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+		 *
+		 * PD 2.0 sinks are supposed to accept src-capabilities with a
+		 * 3.0 header and simply ignore any src PDOs which the sink does
+		 * not understand such as PPS but some 2.0 sinks instead ignore
+		 * the entire PD_DATA_SOURCE_CAP message, causing contract
+		 * negotiation to fail.
+		 *
+		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+		 * sending src-capabilities with a lower PD revision to
+		 * make these broken sinks work.
+		 */
+		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+			tcpm_set_state(port, HARD_RESET_SEND, 0);
+		} else if (port->negotiated_rev > PD_REV20) {
+			port->negotiated_rev--;
+			port->hard_reset_count = 0;
+			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+		} else {
+			tcpm_set_state(port, hard_reset_state(port), 0);
+		}
+		break;
 	case SRC_NEGOTIATE_CAPABILITIES:
 		ret = tcpm_pd_check_request(port);
 		if (ret < 0) {
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index f101347..e0cf11f 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -59,6 +59,7 @@ struct usb_skel {
 	spinlock_t		err_lock;		/* lock for errors */
 	struct kref		kref;
 	struct mutex		io_mutex;		/* synchronize I/O with disconnect */
+	unsigned long		disconnected:1;
 	wait_queue_head_t	bulk_in_wait;		/* to wait for an ongoing read */
 };
 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -71,6 +72,7 @@ static void skel_delete(struct kref *kref)
 	struct usb_skel *dev = to_skel_dev(kref);
 
 	usb_free_urb(dev->bulk_in_urb);
+	usb_put_intf(dev->interface);
 	usb_put_dev(dev->udev);
 	kfree(dev->bulk_in_buffer);
 	kfree(dev);
@@ -122,10 +124,7 @@ static int skel_release(struct inode *inode, struct file *file)
 		return -ENODEV;
 
 	/* allow the device to be autosuspended */
-	mutex_lock(&dev->io_mutex);
-	if (dev->interface)
-		usb_autopm_put_interface(dev->interface);
-	mutex_unlock(&dev->io_mutex);
+	usb_autopm_put_interface(dev->interface);
 
 	/* decrement the count on our device */
 	kref_put(&dev->kref, skel_delete);
@@ -238,7 +237,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
 	if (rv < 0)
 		return rv;
 
-	if (!dev->interface) {		/* disconnect() was called */
+	if (dev->disconnected) {		/* disconnect() was called */
 		rv = -ENODEV;
 		goto exit;
 	}
@@ -420,7 +419,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
 
 	/* this lock makes sure we don't submit URBs to gone devices */
 	mutex_lock(&dev->io_mutex);
-	if (!dev->interface) {		/* disconnect() was called */
+	if (dev->disconnected) {		/* disconnect() was called */
 		mutex_unlock(&dev->io_mutex);
 		retval = -ENODEV;
 		goto error;
@@ -505,7 +504,7 @@ static int skel_probe(struct usb_interface *interface,
 	init_waitqueue_head(&dev->bulk_in_wait);
 
 	dev->udev = usb_get_dev(interface_to_usbdev(interface));
-	dev->interface = interface;
+	dev->interface = usb_get_intf(interface);
 
 	/* set up the endpoint information */
 	/* use only the first bulk-in and bulk-out endpoints */
@@ -571,7 +570,7 @@ static void skel_disconnect(struct usb_interface *interface)
 
 	/* prevent more I/O from starting */
 	mutex_lock(&dev->io_mutex);
-	dev->interface = NULL;
+	dev->disconnected = 1;
 	mutex_unlock(&dev->io_mutex);
 
 	usb_kill_anchored_urbs(&dev->submitted);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6cf00d9..a92c286 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -373,11 +373,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
 	pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
 
 	/*
-	 * Try to reset the device.  The success of this is dependent on
-	 * being able to lock the device, which is not always possible.
+	 * Try to get the locks ourselves to prevent a deadlock. The
+	 * success of this is dependent on being able to lock the device,
+	 * which is not always possible.
+	 * We can not use the "try" reset interface here, which will
+	 * overwrite the previously restored configuration information.
 	 */
-	if (vdev->reset_works && !pci_try_reset_function(pdev))
-		vdev->needs_reset = false;
+	if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
+		if (device_trylock(&pdev->dev)) {
+			if (!__pci_reset_function_locked(pdev))
+				vdev->needs_reset = false;
+			device_unlock(&pdev->dev);
+		}
+		pci_cfg_access_unlock(pdev);
+	}
 
 	pci_restore_state(pdev);
 out:
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 4058985..a9be2d8 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -23,6 +23,12 @@
  * Using this limit prevents one virtqueue from starving others. */
 #define VHOST_TEST_WEIGHT 0x80000
 
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
 enum {
 	VHOST_TEST_VQ = 0,
 	VHOST_TEST_VQ_MAX = 1,
@@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
 		}
 		vhost_add_used_and_signal(&n->dev, vq, head, 0);
 		total_len += len;
-		if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
-			vhost_poll_queue(&vq->poll);
+		if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
 			break;
-		}
 	}
 
 	mutex_unlock(&vq->mutex);
@@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
 	dev = &n->dev;
 	vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
 	n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-	vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+	vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+		       VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
 
 	f->private_data = n;
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 0752f8d..98b6eb9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2073,7 +2073,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
 		/* If this is an input descriptor, increment that count. */
 		if (access == VHOST_ACCESS_WO) {
 			*in_num += ret;
-			if (unlikely(log)) {
+			if (unlikely(log && ret)) {
 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
 				++*log_num;
@@ -2216,7 +2216,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
 			/* If this is an input descriptor,
 			 * increment that count. */
 			*in_num += ret;
-			if (unlikely(log)) {
+			if (unlikely(log && ret)) {
 				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
 				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
 				++*log_num;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 9f39f0c..cc10063 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
  */
 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
 {
-	static const int default_resolutions[][2] = {
-		{  800,  600 },
-		{ 1024,  768 },
-		{ 1280, 1024 },
-	};
-	u32 i, right_margin;
+	/*
+	 * All x86 firmwares horizontally center the image (the yoffset
+	 * calculations differ between boards, but xoffset is predictable).
+	 */
+	u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
 
-	for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
-		if (default_resolutions[i][0] == si->lfb_width &&
-		    default_resolutions[i][1] == si->lfb_height)
-			break;
-	}
-	/* If not a default resolution used for textmode, this should be fine */
-	if (i >= ARRAY_SIZE(default_resolutions))
-		return true;
-
-	/* If the right margin is 5 times smaller then the left one, reject */
-	right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
-	if (right_margin < (bgrt_tab.image_offset_x / 5))
-		return false;
-
-	return true;
+	return bgrt_tab.image_offset_x == expected_xoffset;
 }
 #else
 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 6439231..da565f3 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -433,7 +433,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
 	if (ret < 0)
 		return ret;
 
-	ret = ssd1307fb_write_cmd(par->client, 0x0);
+	ret = ssd1307fb_write_cmd(par->client, par->page_offset);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 1abe4d02..ffde179 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
 static const struct of_device_id aspeed_wdt_of_table[] = {
 	{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
 	{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+	{ .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -264,7 +265,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
 		set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
 	}
 
-	if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
+	if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+		(of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
 		u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
 
 		reg &= config->ext_pulse_width_mask;
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index ed05514..e6c27b7 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -249,6 +249,7 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
 				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+MODULE_ALIAS("platform:bcm2835-wdt");
 MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
 MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
 MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7e7bdcb..9f3123b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -55,7 +55,7 @@
 
 #define IMX2_WDT_WMCR		0x08		/* Misc Register */
 
-#define IMX2_WDT_MAX_TIME	128
+#define IMX2_WDT_MAX_TIME	128U
 #define IMX2_WDT_DEFAULT_TIME	60		/* in seconds */
 
 #define WDOG_SEC_TO_COUNT(s)	((s * 2 - 1) << 8)
@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
 {
 	unsigned int actual;
 
-	actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+	actual = min(new_timeout, IMX2_WDT_MAX_TIME);
 	__imx2_wdt_set_timeout(wdog, actual);
 	wdog->timeout = new_timeout;
 	return 0;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 7494dbe..db58aaa 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -29,6 +29,8 @@
 #include "../pci/pci.h"
 #ifdef CONFIG_PCI_MMCONFIG
 #include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
 #endif
 
 static bool __read_mostly pci_seg_supported = true;
@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
 #ifdef CONFIG_PCI_IOV
 	struct pci_dev *physfn = pci_dev->physfn;
 #endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+	static bool pci_mcfg_reserved = false;
+	/*
+	 * Reserve MCFG areas in Xen on first invocation due to this being
+	 * potentially called from inside of acpi_init immediately after
+	 * MCFG table has been finally parsed.
+	 */
+	if (!pci_mcfg_reserved) {
+		xen_mcfg_late();
+		pci_mcfg_reserved = true;
+	}
+#endif
 	if (pci_seg_supported) {
 		struct {
 			struct physdev_pci_device_add add;
@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
 arch_initcall(register_xen_pci_notifier);
 
 #ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
 {
 	struct pci_mmcfg_region *cfg;
 	int rc;
@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
 	}
 	return 0;
 }
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
 #endif
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e..e569413 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
 	int err;
 	u16 old_value;
-	pci_power_t new_state, old_state;
+	pci_power_t new_state;
 
 	err = pci_read_config_word(dev, offset, &old_value);
 	if (err)
 		goto out;
 
-	old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
 	new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
 	new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 39c6315..454c682 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
 	wait_queue_head_t read_waitq;
 
 	struct kref kref;
+
+	struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
 	mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
 	struct xenbus_file_priv *u;
 	struct xenbus_transaction_holder *trans, *tmp;
 	struct watch_adapter *watch, *tmp_watch;
 	struct read_buffer *rb, *tmp_rb;
 
-	u = container_of(kref, struct xenbus_file_priv, kref);
+	u = container_of(wq, struct xenbus_file_priv, wq);
 
 	/*
 	 * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
 	kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+	struct xenbus_file_priv *u;
+
+	/*
+	 * We might be called in xenbus_thread().
+	 * Use workqueue to avoid deadlock.
+	 */
+	u = container_of(kref, struct xenbus_file_priv, kref);
+	schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
 	struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -652,6 +667,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
 	INIT_LIST_HEAD(&u->watches);
 	INIT_LIST_HEAD(&u->read_buffers);
 	init_waitqueue_head(&u->read_waitq);
+	INIT_WORK(&u->wq, xenbus_worker);
 
 	mutex_init(&u->reply_mutex);
 	mutex_init(&u->msgbuffer_mutex);
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 9eb3470..a43a8d2 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -66,6 +66,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 	if (!v9ses->cachetag) {
 		if (v9fs_random_cachetag(v9ses) < 0) {
 			v9ses->fscache = NULL;
+			kfree(v9ses->cachetag);
+			v9ses->cachetag = NULL;
 			return;
 		}
 	}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 05454a7..550d0b1 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
 	v9inode = V9FS_I(inode);
 	mutex_lock(&v9inode->v_mutex);
 	if (!v9inode->writeback_fid &&
+	    (vma->vm_flags & VM_SHARED) &&
 	    (vma->vm_flags & VM_WRITE)) {
 		/*
 		 * clone a fid and add it to writeback_fid
@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
 			(vma->vm_end - vma->vm_start - 1),
 	};
 
+	if (!(vma->vm_flags & VM_SHARED))
+		return;
 
 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
 
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 6127f0f..ee07162d 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -76,6 +76,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
 			cell = rcu_dereference_raw(net->ws_cell);
 			if (cell) {
 				afs_get_cell(cell);
+				ret = 0;
 				break;
 			}
 			ret = -EDESTADDRREQ;
@@ -110,6 +111,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
 
 	done_seqretry(&net->cells_lock, seq);
 
+	if (ret != 0 && cell)
+		afs_put_cell(net, cell);
+
 	return ret == 0 ? cell : ERR_PTR(ret);
 }
 
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 9e51d6f..40c6860 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -423,18 +423,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
 	struct afs_call *call = container_of(work, struct afs_call, work);
 	struct afs_uuid *r = call->request;
 
-	struct {
-		__be32	match;
-	} reply;
-
 	_enter("");
 
 	if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
-		reply.match = htonl(0);
+		afs_send_empty_reply(call);
 	else
-		reply.match = htonl(1);
+		rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+					1, 1, "K-1");
 
-	afs_send_simple_reply(call, &reply, sizeof(reply));
 	afs_put_call(call);
 	_leave("");
 }
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 855bf2b..54e7f6f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -937,7 +937,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 	dir_version = (long)dir->status.data_version;
 	de_version = (long)dentry->d_fsdata;
 	if (de_version == dir_version)
-		goto out_valid;
+		goto out_valid_noupdate;
 
 	dir_version = (long)dir->invalid_before;
 	if (de_version - dir_version >= 0)
@@ -1001,6 +1001,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
 out_valid:
 	dentry->d_fsdata = (void *)dir_version;
+out_valid_noupdate:
 	dput(parent);
 	key_put(key);
 	_leave(" = 1 [valid]");
diff --git a/fs/afs/file.c b/fs/afs/file.c
index b95ff42..3b628b0 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -193,11 +193,13 @@ void afs_put_read(struct afs_read *req)
 	int i;
 
 	if (refcount_dec_and_test(&req->usage)) {
-		for (i = 0; i < req->nr_pages; i++)
-			if (req->pages[i])
-				put_page(req->pages[i]);
-		if (req->pages != req->array)
-			kfree(req->pages);
+		if (req->pages) {
+			for (i = 0; i < req->nr_pages; i++)
+				if (req->pages[i])
+					put_page(req->pages[i]);
+			if (req->pages != req->array)
+				kfree(req->pages);
+		}
 		kfree(req);
 	}
 }
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index c3b7408..c7dd47e 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -60,23 +60,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
 		struct afs_uuid__xdr *xdr;
 		struct afs_uuid *uuid;
 		int j;
+		int n = entry->nr_servers;
 
 		tmp = ntohl(uvldb->serverFlags[i]);
 		if (tmp & AFS_VLSF_DONTUSE ||
 		    (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
 			continue;
 		if (tmp & AFS_VLSF_RWVOL) {
-			entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+			entry->fs_mask[n] |= AFS_VOL_VTM_RW;
 			if (vlflags & AFS_VLF_BACKEXISTS)
-				entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+				entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
 		}
 		if (tmp & AFS_VLSF_ROVOL)
-			entry->fs_mask[i] |= AFS_VOL_VTM_RO;
-		if (!entry->fs_mask[i])
+			entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+		if (!entry->fs_mask[n])
 			continue;
 
 		xdr = &uvldb->serverNumber[i];
-		uuid = (struct afs_uuid *)&entry->fs_server[i];
+		uuid = (struct afs_uuid *)&entry->fs_server[n];
 		uuid->time_low			= xdr->time_low;
 		uuid->time_mid			= htons(ntohl(xdr->time_mid));
 		uuid->time_hi_and_version	= htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index efae2fb..e7fd0b5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1137,6 +1137,18 @@ static int load_elf_binary(struct linux_binprm *bprm)
 	current->mm->start_stack = bprm->p;
 
 	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+		/*
+		 * For architectures with ELF randomization, when executing
+		 * a loader directly (i.e. no interpreter listed in ELF
+		 * headers), move the brk area out of the mmap region
+		 * (since it grows up, and may collide early with the stack
+		 * growing down), and into the unused ELF_ET_DYN_BASE region.
+		 */
+		if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+		    loc->elf_ex.e_type == ET_DYN && !interpreter)
+			current->mm->brk = current->mm->start_brk =
+				ELF_ET_DYN_BASE;
+
 		current->mm->brk = current->mm->start_brk =
 			arch_randomize_brk(current->mm);
 #ifdef compat_brk_randomized
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ac6c383..1985565 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
 		goto out;
 	}
 
-	trans = btrfs_attach_transaction(root);
+	trans = btrfs_join_transaction_nostart(root);
 	if (IS_ERR(trans)) {
 		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
 			ret = PTR_ERR(trans);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9bfa665..c71e534 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -42,6 +42,22 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
 	return NULL;
 }
 
+bool btrfs_compress_is_valid_type(const char *str, size_t len)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
+		size_t comp_len = strlen(btrfs_compress_types[i]);
+
+		if (len < comp_len)
+			continue;
+
+		if (!strncmp(btrfs_compress_types[i], str, comp_len))
+			return true;
+	}
+	return false;
+}
+
 static int btrfs_decompress_bio(struct compressed_bio *cb);
 
 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index ddda9b8..f97d90a 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -127,6 +127,7 @@ extern const struct btrfs_compress_op btrfs_lzo_compress;
 extern const struct btrfs_compress_op btrfs_zstd_compress;
 
 const char* btrfs_compress_type2str(enum btrfs_compression_type type);
+bool btrfs_compress_is_valid_type(const char *str, size_t len);
 
 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
 
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 79ac1eb..9fd3832 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1374,6 +1374,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 	struct tree_mod_elem *tm;
 	struct extent_buffer *eb = NULL;
 	struct extent_buffer *eb_root;
+	u64 eb_root_owner = 0;
 	struct extent_buffer *old;
 	struct tree_mod_root *old_root = NULL;
 	u64 old_generation = 0;
@@ -1411,6 +1412,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 			free_extent_buffer(old);
 		}
 	} else if (old_root) {
+		eb_root_owner = btrfs_header_owner(eb_root);
 		btrfs_tree_read_unlock(eb_root);
 		free_extent_buffer(eb_root);
 		eb = alloc_dummy_extent_buffer(fs_info, logical);
@@ -1428,7 +1430,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 	if (old_root) {
 		btrfs_set_header_bytenr(eb, eb->start);
 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
-		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
+		btrfs_set_header_owner(eb, eb_root_owner);
 		btrfs_set_header_level(eb, old_root->level);
 		btrfs_set_header_generation(eb, old_generation);
 	}
@@ -5514,6 +5516,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 	advance_left = advance_right = 0;
 
 	while (1) {
+		cond_resched();
 		if (advance_left && !left_end_reached) {
 			ret = tree_advance(fs_info, left_path, &left_level,
 					left_root_level,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 82682da..faca485 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -39,6 +39,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
 extern struct kmem_cache *btrfs_bit_radix_cachep;
 extern struct kmem_cache *btrfs_path_cachep;
 extern struct kmem_cache *btrfs_free_space_cachep;
+extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
 struct btrfs_ordered_sum;
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
@@ -3200,6 +3201,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
 				    struct btrfs_trans_handle *trans, int mode,
 				    u64 start, u64 num_bytes, u64 min_size,
 				    loff_t actual_len, u64 *alloc_hint);
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+		u64 start, u64 end, int *page_started, unsigned long *nr_written,
+		struct writeback_control *wbc);
 extern const struct dentry_operations btrfs_dentry_operations;
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 void btrfs_test_inode_set_ops(struct inode *inode);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0cc800d..72c7456 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7367,6 +7367,14 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
 			 */
 			if ((flags & extra) && !(block_group->flags & extra))
 				goto loop;
+
+			/*
+			 * This block group has different flags than we want.
+			 * It's possible that we have MIXED_GROUP flag but no
+			 * block group is mixed.  Just skip such block group.
+			 */
+			btrfs_release_block_group(block_group, delalloc);
+			continue;
 		}
 
 have_block_group:
@@ -9992,6 +10000,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
 			btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
 				  cache->key.objectid);
+			btrfs_put_block_group(cache);
 			ret = -EINVAL;
 			goto error;
 		}
@@ -10478,22 +10487,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	}
 	spin_unlock(&block_group->lock);
 
-	if (remove_em) {
-		struct extent_map_tree *em_tree;
-
-		em_tree = &fs_info->mapping_tree.map_tree;
-		write_lock(&em_tree->lock);
-		/*
-		 * The em might be in the pending_chunks list, so make sure the
-		 * chunk mutex is locked, since remove_extent_mapping() will
-		 * delete us from that list.
-		 */
-		remove_extent_mapping(em_tree, em);
-		write_unlock(&em_tree->lock);
-		/* once for the tree */
-		free_extent_map(em);
-	}
-
 	mutex_unlock(&fs_info->chunk_mutex);
 
 	ret = remove_block_group_free_space(trans, block_group);
@@ -10510,6 +10503,24 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		goto out;
 
 	ret = btrfs_del_item(trans, root, path);
+	if (ret)
+		goto out;
+
+	if (remove_em) {
+		struct extent_map_tree *em_tree;
+
+		em_tree = &fs_info->mapping_tree.map_tree;
+		write_lock(&em_tree->lock);
+		/*
+		 * The em might be in the pending_chunks list, so make sure the
+		 * chunk mutex is locked, since remove_extent_mapping() will
+		 * delete us from that list.
+		 */
+		remove_extent_mapping(em_tree, em);
+		write_unlock(&em_tree->lock);
+		/* once for the tree */
+		free_extent_map(em);
+	}
 out:
 	btrfs_free_path(path);
 	return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 90b0a6e..cb598eb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3199,7 +3199,7 @@ static void update_nr_written(struct writeback_control *wbc,
 /*
  * helper for __extent_writepage, doing all of the delayed allocation setup.
  *
- * This returns 1 if our fill_delalloc function did all the work required
+ * This returns 1 if btrfs_run_delalloc_range function did all the work required
  * to write the page (copy into inline extent).  In this case the IO has
  * been started and the page is already unlocked.
  *
@@ -3220,7 +3220,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
 	int ret;
 	int page_started = 0;
 
-	if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
+	if (epd->extent_locked)
 		return 0;
 
 	while (delalloc_end < page_end) {
@@ -3233,18 +3233,16 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
 			delalloc_start = delalloc_end + 1;
 			continue;
 		}
-		ret = tree->ops->fill_delalloc(inode, page,
-					       delalloc_start,
-					       delalloc_end,
-					       &page_started,
-					       nr_written, wbc);
+		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
+				delalloc_end, &page_started, nr_written, wbc);
 		/* File system has been set read-only */
 		if (ret) {
 			SetPageError(page);
-			/* fill_delalloc should be return < 0 for error
-			 * but just in case, we use > 0 here meaning the
-			 * IO is started, so we don't want to return > 0
-			 * unless things are going well.
+			/*
+			 * btrfs_run_delalloc_range should return < 0 for error
+			 * but just in case, we use > 0 here meaning the IO is
+			 * started, so we don't want to return > 0 unless
+			 * things are going well.
 			 */
 			ret = ret < 0 ? ret : -EIO;
 			goto done;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index b4d03e6..ed27bec 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -106,11 +106,6 @@ struct extent_io_ops {
 	/*
 	 * Optional hooks, called if the pointer is not NULL
 	 */
-	int (*fill_delalloc)(void *private_data, struct page *locked_page,
-			     u64 start, u64 end, int *page_started,
-			     unsigned long *nr_written,
-			     struct writeback_control *wbc);
-
 	int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
 	void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
 				      struct extent_state *state, int uptodate);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c841865..4870440d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2056,25 +2056,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	struct btrfs_trans_handle *trans;
 	struct btrfs_log_ctx ctx;
 	int ret = 0, err;
-	u64 len;
 
-	/*
-	 * If the inode needs a full sync, make sure we use a full range to
-	 * avoid log tree corruption, due to hole detection racing with ordered
-	 * extent completion for adjacent ranges, and assertion failures during
-	 * hole detection.
-	 */
-	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-		     &BTRFS_I(inode)->runtime_flags)) {
-		start = 0;
-		end = LLONG_MAX;
-	}
-
-	/*
-	 * The range length can be represented by u64, we have to do the typecasts
-	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-	 */
-	len = (u64)end - (u64)start + 1;
 	trace_btrfs_sync_file(file, datasync);
 
 	btrfs_init_log_ctx(&ctx, inode);
@@ -2101,6 +2083,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	atomic_inc(&root->log_batch);
 
 	/*
+	 * If the inode needs a full sync, make sure we use a full range to
+	 * avoid log tree corruption, due to hole detection racing with ordered
+	 * extent completion for adjacent ranges, and assertion failures during
+	 * hole detection. Do this while holding the inode lock, to avoid races
+	 * with other tasks.
+	 */
+	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+		     &BTRFS_I(inode)->runtime_flags)) {
+		start = 0;
+		end = LLONG_MAX;
+	}
+
+	/*
 	 * Before we acquired the inode's lock, someone may have dirtied more
 	 * pages in the target range. We need to make sure that writeback for
 	 * any such pages does not start while we are logging the inode, because
@@ -2127,8 +2122,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	/*
 	 * We have to do this here to avoid the priority inversion of waiting on
 	 * IO of a lower priority task while holding a transaciton open.
+	 *
+	 * Also, the range length can be represented by u64, we have to do the
+	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
 	 */
-	ret = btrfs_wait_ordered_range(inode, start, len);
+	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
 	if (ret) {
 		up_write(&BTRFS_I(inode)->dio_sem);
 		inode_unlock(inode);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8ecf8c0..4381e0a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -763,7 +763,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 		} else {
 			ASSERT(num_bitmaps);
 			num_bitmaps--;
-			e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+			e->bitmap = kmem_cache_zalloc(
+					btrfs_free_space_bitmap_cachep, GFP_NOFS);
 			if (!e->bitmap) {
 				kmem_cache_free(
 					btrfs_free_space_cachep, e);
@@ -1864,7 +1865,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
 			struct btrfs_free_space *bitmap_info)
 {
 	unlink_free_space(ctl, bitmap_info);
-	kfree(bitmap_info->bitmap);
+	kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
 	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
 	ctl->total_bitmaps--;
 	ctl->op->recalc_thresholds(ctl);
@@ -2118,7 +2119,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
 		}
 
 		/* allocate the bitmap */
-		info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
+						 GFP_NOFS);
 		spin_lock(&ctl->tree_lock);
 		if (!info->bitmap) {
 			ret = -ENOMEM;
@@ -2130,7 +2132,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
 out:
 	if (info) {
 		if (info->bitmap)
-			kfree(info->bitmap);
+			kmem_cache_free(btrfs_free_space_bitmap_cachep,
+					info->bitmap);
 		kmem_cache_free(btrfs_free_space_cachep, info);
 	}
 
@@ -2786,7 +2789,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 	if (entry->bytes == 0) {
 		ctl->free_extents--;
 		if (entry->bitmap) {
-			kfree(entry->bitmap);
+			kmem_cache_free(btrfs_free_space_bitmap_cachep,
+					entry->bitmap);
 			ctl->total_bitmaps--;
 			ctl->op->recalc_thresholds(ctl);
 		}
@@ -3594,7 +3598,7 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
 	}
 
 	if (!map) {
-		map = kzalloc(PAGE_SIZE, GFP_NOFS);
+		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
 		if (!map) {
 			kmem_cache_free(btrfs_free_space_cachep, info);
 			return -ENOMEM;
@@ -3624,7 +3628,7 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
 	if (info)
 		kmem_cache_free(btrfs_free_space_cachep, info);
 	if (map)
-		kfree(map);
+		kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
 	return 0;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 355ff08..37332f8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -72,6 +72,7 @@ static struct kmem_cache *btrfs_inode_cachep;
 struct kmem_cache *btrfs_trans_handle_cachep;
 struct kmem_cache *btrfs_path_cachep;
 struct kmem_cache *btrfs_free_space_cachep;
+struct kmem_cache *btrfs_free_space_bitmap_cachep;
 
 #define S_SHIFT 12
 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -110,17 +111,17 @@ static void __endio_write_update_ordered(struct inode *inode,
  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
  * to be released, which we want to happen only when finishing the ordered
- * extent (btrfs_finish_ordered_io()). Also note that the caller of the
- * fill_delalloc() callback already does proper cleanup for the first page of
- * the range, that is, it invokes the callback writepage_end_io_hook() for the
- * range of the first page.
+ * extent (btrfs_finish_ordered_io()).
  */
 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
-						 const u64 offset,
-						 const u64 bytes)
+						 struct page *locked_page,
+						 u64 offset, u64 bytes)
 {
 	unsigned long index = offset >> PAGE_SHIFT;
 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
+	u64 page_start = page_offset(locked_page);
+	u64 page_end = page_start + PAGE_SIZE - 1;
+
 	struct page *page;
 
 	while (index <= end_index) {
@@ -131,8 +132,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
 		ClearPagePrivate2(page);
 		put_page(page);
 	}
-	return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
-					    bytes - PAGE_SIZE, false);
+
+	/*
+	 * In case this page belongs to the delalloc range being instantiated
+	 * then skip it, since the first page of a range is going to be
+	 * properly cleaned up by the caller of run_delalloc_range
+	 */
+	if (page_start >= offset && page_end <= (offset + bytes - 1)) {
+		offset += PAGE_SIZE;
+		bytes -= PAGE_SIZE;
+	}
+
+	return __endio_write_update_ordered(inode, offset, bytes, false);
 }
 
 static int btrfs_dirty_inode(struct inode *inode);
@@ -1599,12 +1610,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
 }
 
 /*
- * extent_io.c call back to do delayed allocation processing
+ * Function to process delayed allocation (create CoW) for ranges which are
+ * being touched for the first time.
  */
-static int run_delalloc_range(void *private_data, struct page *locked_page,
-			      u64 start, u64 end, int *page_started,
-			      unsigned long *nr_written,
-			      struct writeback_control *wbc)
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+		u64 start, u64 end, int *page_started, unsigned long *nr_written,
+		struct writeback_control *wbc)
 {
 	struct inode *inode = private_data;
 	int ret;
@@ -1629,7 +1640,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
 					   write_flags);
 	}
 	if (ret)
-		btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
+		btrfs_cleanup_ordered_extents(inode, locked_page, start,
+					      end - start + 1);
 	return ret;
 }
 
@@ -9350,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
 	kmem_cache_destroy(btrfs_trans_handle_cachep);
 	kmem_cache_destroy(btrfs_path_cachep);
 	kmem_cache_destroy(btrfs_free_space_cachep);
+	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
 }
 
 int __init btrfs_init_cachep(void)
@@ -9379,6 +9392,12 @@ int __init btrfs_init_cachep(void)
 	if (!btrfs_free_space_cachep)
 		goto fail;
 
+	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+							PAGE_SIZE, PAGE_SIZE,
+							SLAB_RED_ZONE, NULL);
+	if (!btrfs_free_space_bitmap_cachep)
+		goto fail;
+
 	return 0;
 fail:
 	btrfs_destroy_cachep();
@@ -10598,7 +10617,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
 	.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
 
 	/* optional callbacks */
-	.fill_delalloc = run_delalloc_range,
 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
 	.writepage_start_hook = btrfs_writepage_start_hook,
 	.set_bit_hook = btrfs_set_bit_hook,
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 61d22a5..6980a0e1 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -366,11 +366,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
 
 static int prop_compression_validate(const char *value, size_t len)
 {
-	if (!strncmp("lzo", value, 3))
-		return 0;
-	else if (!strncmp("zlib", value, 4))
-		return 0;
-	else if (!strncmp("zstd", value, 4))
+	if (btrfs_compress_is_valid_type(value, len))
 		return 0;
 
 	return -EINVAL;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 734866a..3ea2008 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2796,9 +2796,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 	btrfs_free_path(path);
 
 	mutex_lock(&fs_info->qgroup_rescan_lock);
-	if (!btrfs_fs_closing(fs_info))
-		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
-
 	if (err > 0 &&
 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2814,16 +2811,30 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
 	if (IS_ERR(trans)) {
 		err = PTR_ERR(trans);
+		trans = NULL;
 		btrfs_err(fs_info,
 			  "fail to start transaction for status update: %d",
 			  err);
-		goto done;
 	}
-	ret = update_qgroup_status_item(trans);
-	if (ret < 0) {
-		err = ret;
-		btrfs_err(fs_info, "fail to update qgroup status: %d", err);
+
+	mutex_lock(&fs_info->qgroup_rescan_lock);
+	if (!btrfs_fs_closing(fs_info))
+		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+	if (trans) {
+		ret = update_qgroup_status_item(trans);
+		if (ret < 0) {
+			err = ret;
+			btrfs_err(fs_info, "fail to update qgroup status: %d",
+				  err);
+		}
 	}
+	fs_info->qgroup_rescan_running = false;
+	complete_all(&fs_info->qgroup_rescan_completion);
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+	if (!trans)
+		return;
+
 	btrfs_end_transaction(trans);
 
 	if (btrfs_fs_closing(fs_info)) {
@@ -2834,12 +2845,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 	} else {
 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
 	}
-
-done:
-	mutex_lock(&fs_info->qgroup_rescan_lock);
-	fs_info->qgroup_rescan_running = false;
-	mutex_unlock(&fs_info->qgroup_rescan_lock);
-	complete_all(&fs_info->qgroup_rescan_completion);
 }
 
 /*
@@ -3067,6 +3072,9 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
 	while ((unode = ulist_next(&reserved->range_changed, &uiter)))
 		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
 				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
+	/* Also free data bytes of already reserved one */
+	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
+				  orig_reserved, BTRFS_QGROUP_RSV_DATA);
 	extent_changeset_release(reserved);
 	return ret;
 }
@@ -3111,7 +3119,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
 		 * EXTENT_QGROUP_RESERVED, we won't double free.
 		 * So not need to rush.
 		 */
-		ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+		ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
 				free_start, free_start + free_len - 1,
 				EXTENT_QGROUP_RESERVED, &changeset);
 		if (ret < 0)
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e5b9e59..cd2a586 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -511,7 +511,7 @@ static int process_leaf(struct btrfs_root *root,
 	struct btrfs_extent_data_ref *dref;
 	struct btrfs_shared_data_ref *sref;
 	u32 count;
-	int i = 0, tree_block_level = 0, ret;
+	int i = 0, tree_block_level = 0, ret = 0;
 	struct btrfs_key key;
 	int nritems = btrfs_header_nritems(leaf);
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 5d57ed6..bccd9de 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3187,6 +3187,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
 			if (!page) {
 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
 							PAGE_SIZE, true);
+				btrfs_delalloc_release_extents(BTRFS_I(inode),
+							PAGE_SIZE, true);
 				ret = -ENOMEM;
 				goto out;
 			}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 3be1456..916c397 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
 	struct rb_node *parent = NULL;
 	struct full_stripe_lock *entry;
 	struct full_stripe_lock *ret;
+	unsigned int nofs_flag;
 
 	lockdep_assert_held(&locks_root->lock);
 
@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
 		}
 	}
 
-	/* Insert new lock */
+	/*
+	 * Insert new lock.
+	 *
+	 * We must use GFP_NOFS because the scrub task might be waiting for a
+	 * worker task executing this function and in turn a transaction commit
+	 * might be waiting the scrub task to pause (which needs to wait for all
+	 * the worker tasks to complete before pausing).
+	 */
+	nofs_flag = memalloc_nofs_save();
 	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+	memalloc_nofs_restore(nofs_flag);
 	if (!ret)
 		return ERR_PTR(-ENOMEM);
 	ret->logical = fstripe_logical;
@@ -568,12 +578,11 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
 		scrub_free_ctx(sctx);
 }
 
-static noinline_for_stack
-struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
+static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
+		struct btrfs_fs_info *fs_info, int is_dev_replace)
 {
 	struct scrub_ctx *sctx;
 	int		i;
-	struct btrfs_fs_info *fs_info = dev->fs_info;
 
 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 	if (!sctx)
@@ -582,7 +591,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	sctx->is_dev_replace = is_dev_replace;
 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
 	sctx->curr = -1;
-	sctx->fs_info = dev->fs_info;
+	sctx->fs_info = fs_info;
+	INIT_LIST_HEAD(&sctx->csum_list);
 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 		struct scrub_bio *sbio;
 
@@ -607,7 +617,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	atomic_set(&sctx->workers_pending, 0);
 	atomic_set(&sctx->cancel_req, 0);
 	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
-	INIT_LIST_HEAD(&sctx->csum_list);
 
 	spin_lock_init(&sctx->list_lock);
 	spin_lock_init(&sctx->stat_lock);
@@ -1622,8 +1631,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 	mutex_lock(&sctx->wr_lock);
 again:
 	if (!sctx->wr_curr_bio) {
+		unsigned int nofs_flag;
+
+		/*
+		 * We must use GFP_NOFS because the scrub task might be waiting
+		 * for a worker task executing this function and in turn a
+		 * transaction commit might be waiting the scrub task to pause
+		 * (which needs to wait for all the worker tasks to complete
+		 * before pausing).
+		 */
+		nofs_flag = memalloc_nofs_save();
 		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
 					      GFP_KERNEL);
+		memalloc_nofs_restore(nofs_flag);
 		if (!sctx->wr_curr_bio) {
 			mutex_unlock(&sctx->wr_lock);
 			return -ENOMEM;
@@ -3022,8 +3042,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   struct map_lookup *map,
 					   struct btrfs_device *scrub_dev,
-					   int num, u64 base, u64 length,
-					   int is_dev_replace)
+					   int num, u64 base, u64 length)
 {
 	struct btrfs_path *path, *ppath;
 	struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -3299,7 +3318,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 			extent_physical = extent_logical - logical + physical;
 			extent_dev = scrub_dev;
 			extent_mirror_num = mirror_num;
-			if (is_dev_replace)
+			if (sctx->is_dev_replace)
 				scrub_remap_extent(fs_info, extent_logical,
 						   extent_len, &extent_physical,
 						   &extent_dev,
@@ -3397,8 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 					  struct btrfs_device *scrub_dev,
 					  u64 chunk_offset, u64 length,
 					  u64 dev_offset,
-					  struct btrfs_block_group_cache *cache,
-					  int is_dev_replace)
+					  struct btrfs_block_group_cache *cache)
 {
 	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
@@ -3435,8 +3453,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
 		    map->stripes[i].physical == dev_offset) {
 			ret = scrub_stripe(sctx, map, scrub_dev, i,
-					   chunk_offset, length,
-					   is_dev_replace);
+					   chunk_offset, length);
 			if (ret)
 				goto out;
 		}
@@ -3449,8 +3466,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 
 static noinline_for_stack
 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
-			   struct btrfs_device *scrub_dev, u64 start, u64 end,
-			   int is_dev_replace)
+			   struct btrfs_device *scrub_dev, u64 start, u64 end)
 {
 	struct btrfs_dev_extent *dev_extent = NULL;
 	struct btrfs_path *path;
@@ -3544,7 +3560,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 */
 		scrub_pause_on(fs_info);
 		ret = btrfs_inc_block_group_ro(cache);
-		if (!ret && is_dev_replace) {
+		if (!ret && sctx->is_dev_replace) {
 			/*
 			 * If we are doing a device replace wait for any tasks
 			 * that started dellaloc right before we set the block
@@ -3609,7 +3625,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		dev_replace->item_needs_writeback = 1;
 		btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
-				  found_key.offset, cache, is_dev_replace);
+				  found_key.offset, cache);
 
 		/*
 		 * flush, submit all pending read and write bios, afterwards
@@ -3670,7 +3686,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		btrfs_put_block_group(cache);
 		if (ret)
 			break;
-		if (is_dev_replace &&
+		if (sctx->is_dev_replace &&
 		    atomic64_read(&dev_replace->num_write_errors) > 0) {
 			ret = -EIO;
 			break;
@@ -3762,16 +3778,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
 	return -ENOMEM;
 }
 
-static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
-{
-	if (--fs_info->scrub_workers_refcnt == 0) {
-		btrfs_destroy_workqueue(fs_info->scrub_workers);
-		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
-		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
-	}
-	WARN_ON(fs_info->scrub_workers_refcnt < 0);
-}
-
 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		    u64 end, struct btrfs_scrub_progress *progress,
 		    int readonly, int is_dev_replace)
@@ -3779,6 +3785,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	struct scrub_ctx *sctx;
 	int ret;
 	struct btrfs_device *dev;
+	unsigned int nofs_flag;
+	struct btrfs_workqueue *scrub_workers = NULL;
+	struct btrfs_workqueue *scrub_wr_comp = NULL;
+	struct btrfs_workqueue *scrub_parity = NULL;
 
 	if (btrfs_fs_closing(fs_info))
 		return -EINVAL;
@@ -3820,13 +3830,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		return -EINVAL;
 	}
 
+	/* Allocate outside of device_list_mutex */
+	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
+	if (IS_ERR(sctx))
+		return PTR_ERR(sctx);
 
 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
 	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
 		     !is_dev_replace)) {
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto out_free_ctx;
 	}
 
 	if (!is_dev_replace && !readonly &&
@@ -3834,7 +3849,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 		btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
 				rcu_str_deref(dev->name));
-		return -EROFS;
+		ret = -EROFS;
+		goto out_free_ctx;
 	}
 
 	mutex_lock(&fs_info->scrub_lock);
@@ -3842,7 +3858,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
 		mutex_unlock(&fs_info->scrub_lock);
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		return -EIO;
+		ret = -EIO;
+		goto out_free_ctx;
 	}
 
 	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
@@ -3852,7 +3869,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
 		mutex_unlock(&fs_info->scrub_lock);
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		return -EINPROGRESS;
+		ret = -EINPROGRESS;
+		goto out_free_ctx;
 	}
 	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
 
@@ -3860,16 +3878,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	if (ret) {
 		mutex_unlock(&fs_info->scrub_lock);
 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		return ret;
+		goto out_free_ctx;
 	}
 
-	sctx = scrub_setup_ctx(dev, is_dev_replace);
-	if (IS_ERR(sctx)) {
-		mutex_unlock(&fs_info->scrub_lock);
-		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		scrub_workers_put(fs_info);
-		return PTR_ERR(sctx);
-	}
 	sctx->readonly = readonly;
 	dev->scrub_ctx = sctx;
 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -3882,6 +3893,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	atomic_inc(&fs_info->scrubs_running);
 	mutex_unlock(&fs_info->scrub_lock);
 
+	/*
+	 * In order to avoid deadlock with reclaim when there is a transaction
+	 * trying to pause scrub, make sure we use GFP_NOFS for all the
+	 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
+	 * invoked by our callees. The pausing request is done when the
+	 * transaction commit starts, and it blocks the transaction until scrub
+	 * is paused (done at specific points at scrub_stripe() or right above
+	 * before incrementing fs_info->scrubs_running).
+	 */
+	nofs_flag = memalloc_nofs_save();
 	if (!is_dev_replace) {
 		/*
 		 * by holding device list mutex, we can
@@ -3893,8 +3914,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	}
 
 	if (!ret)
-		ret = scrub_enumerate_chunks(sctx, dev, start, end,
-					     is_dev_replace);
+		ret = scrub_enumerate_chunks(sctx, dev, start, end);
+	memalloc_nofs_restore(nofs_flag);
 
 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
 	atomic_dec(&fs_info->scrubs_running);
@@ -3907,12 +3928,24 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 
 	mutex_lock(&fs_info->scrub_lock);
 	dev->scrub_ctx = NULL;
-	scrub_workers_put(fs_info);
+	if (--fs_info->scrub_workers_refcnt == 0) {
+		scrub_workers = fs_info->scrub_workers;
+		scrub_wr_comp = fs_info->scrub_wr_completion_workers;
+		scrub_parity = fs_info->scrub_parity_workers;
+	}
 	mutex_unlock(&fs_info->scrub_lock);
 
+	btrfs_destroy_workqueue(scrub_workers);
+	btrfs_destroy_workqueue(scrub_wr_comp);
+	btrfs_destroy_workqueue(scrub_parity);
 	scrub_put_ctx(sctx);
 
 	return ret;
+
+out_free_ctx:
+	scrub_free_ctx(sctx);
+
+	return ret;
 }
 
 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f1ca53a..26317bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
 					   __TRANS_ATTACH |
-					   __TRANS_JOIN),
+					   __TRANS_JOIN |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -531,7 +534,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 		ret = join_transaction(fs_info, type);
 		if (ret == -EBUSY) {
 			wait_current_trans(fs_info);
-			if (unlikely(type == TRANS_ATTACH))
+			if (unlikely(type == TRANS_ATTACH ||
+				     type == TRANS_JOIN_NOSTART))
 				ret = -ENOENT;
 		}
 	} while (ret == -EBUSY);
@@ -648,6 +652,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
 }
 
 /*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+				 BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
  * btrfs_attach_transaction() - catch the running transaction
  *
  * It is used when we want to commit the current the transaction, but
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4cbb1b5..c1d34cc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -97,11 +97,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN		(1U << 11)
 #define __TRANS_JOIN_NOLOCK	(1U << 12)
 #define __TRANS_DUMMY		(1U << 13)
+#define __TRANS_JOIN_NOSTART	(1U << 14)
 
 #define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH		(__TRANS_ATTACH)
 #define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
 
@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 					int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
 					struct btrfs_root *root);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 08c5afa..4d4f57f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2860,7 +2860,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
  * in the tree of log roots
  */
 static int update_log_root(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *log)
+			   struct btrfs_root *log,
+			   struct btrfs_root_item *root_item)
 {
 	struct btrfs_fs_info *fs_info = log->fs_info;
 	int ret;
@@ -2868,10 +2869,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
 	if (log->log_transid == 1) {
 		/* insert root item on the first sync */
 		ret = btrfs_insert_root(trans, fs_info->log_root_tree,
-				&log->root_key, &log->root_item);
+				&log->root_key, root_item);
 	} else {
 		ret = btrfs_update_root(trans, fs_info->log_root_tree,
-				&log->root_key, &log->root_item);
+				&log->root_key, root_item);
 	}
 	return ret;
 }
@@ -2969,6 +2970,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log = root->log_root;
 	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+	struct btrfs_root_item new_root_item;
 	int log_transid = 0;
 	struct btrfs_log_ctx root_log_ctx;
 	struct blk_plug plug;
@@ -3032,18 +3034,26 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 		goto out;
 	}
 
+	/*
+	 * We _must_ update under the root->log_mutex in order to make sure we
+	 * have a consistent view of the log root we are trying to commit at
+	 * this moment.
+	 *
+	 * We _must_ copy this into a local copy, because we are not holding the
+	 * log_root_tree->log_mutex yet.  This is important because when we
+	 * commit the log_root_tree we must have a consistent view of the
+	 * log_root_tree when we update the super block to point at the
+	 * log_root_tree bytenr.  If we update the log_root_tree here we'll race
+	 * with the commit and possibly point at the new block which we may not
+	 * have written out.
+	 */
 	btrfs_set_root_node(&log->root_item, log->node);
+	memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
 
 	root->log_transid++;
 	log->log_transid = root->log_transid;
 	root->log_start_pid = 0;
 	/*
-	 * Update or create log root item under the root's log_mutex to prevent
-	 * races with concurrent log syncs that can lead to failure to update
-	 * log root item because it was not created yet.
-	 */
-	ret = update_log_root(trans, log);
-	/*
 	 * IO has been started, blocks of the log tree have WRITTEN flag set
 	 * in their headers. new modifications of the log will be written to
 	 * new positions. so it's safe to allow log writers to go in.
@@ -3063,6 +3073,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	mutex_unlock(&log_root_tree->log_mutex);
 
 	mutex_lock(&log_root_tree->log_mutex);
+
+	/*
+	 * Now we are safe to update the log_root_tree because we're under the
+	 * log_mutex, and we're a current writer so we're holding the commit
+	 * open until we drop the log_mutex.
+	 */
+	ret = update_log_root(trans, log, &new_root_item);
+
 	if (atomic_dec_and_test(&log_root_tree->log_writers)) {
 		/* atomic_dec_and_test implies a barrier */
 		cond_wake_up_nomb(&log_root_tree->log_writer_wait);
@@ -5107,7 +5125,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 						BTRFS_I(other_inode),
 						LOG_OTHER_INODE, 0, LLONG_MAX,
 						ctx);
-				iput(other_inode);
+				btrfs_add_delayed_iput(other_inode);
 				if (err)
 					goto out_unlock;
 				else
@@ -5519,7 +5537,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
 			}
 
 			if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
-				iput(di_inode);
+				btrfs_add_delayed_iput(di_inode);
 				break;
 			}
 
@@ -5531,7 +5549,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
 			if (!ret &&
 			    btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
 				ret = 1;
-			iput(di_inode);
+			btrfs_add_delayed_iput(di_inode);
 			if (ret)
 				goto next_dir_inode;
 			if (ctx->log_new_dentries) {
@@ -5678,7 +5696,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
 			if (!ret && ctx && ctx->log_new_dentries)
 				ret = log_new_dir_dentries(trans, root,
 						   BTRFS_I(dir_inode), ctx);
-			iput(dir_inode);
+			btrfs_add_delayed_iput(dir_inode);
 			if (ret)
 				goto out;
 		}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6e008bd..a8297e7 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7411,6 +7411,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
 	struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
 	struct extent_map *em;
 	struct map_lookup *map;
+	struct btrfs_device *dev;
 	u64 stripe_len;
 	bool found = false;
 	int ret = 0;
@@ -7460,6 +7461,34 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
 			physical_offset, devid);
 		ret = -EUCLEAN;
 	}
+
+	/* Make sure no dev extent is beyond device bondary */
+	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+	if (!dev) {
+		btrfs_err(fs_info, "failed to find devid %llu", devid);
+		ret = -EUCLEAN;
+		goto out;
+	}
+
+	/* It's possible this device is a dummy for seed device */
+	if (dev->disk_total_bytes == 0) {
+		dev = find_device(fs_info->fs_devices->seed, devid, NULL);
+		if (!dev) {
+			btrfs_err(fs_info, "failed to find seed devid %llu",
+				  devid);
+			ret = -EUCLEAN;
+			goto out;
+		}
+	}
+
+	if (physical_offset + physical_len > dev->disk_total_bytes) {
+		btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
+			  devid, physical_offset, physical_len,
+			  dev->disk_total_bytes);
+		ret = -EUCLEAN;
+		goto out;
+	}
 out:
 	free_extent_map(em);
 	return ret;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9c332a6..476728b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ static int ceph_writepages_start(struct address_space *mapping,
 			if (page_offset(page) >= ceph_wbc.i_size) {
 				dout("%p page eof %llu\n",
 				     page, ceph_wbc.i_size);
-				if (ceph_wbc.size_stable ||
-				    page_offset(page) >= i_size_read(inode))
+				if ((ceph_wbc.size_stable ||
+				    page_offset(page) >= i_size_read(inode)) &&
+				    clear_page_dirty_for_io(page))
 					mapping->a_ops->invalidatepage(page,
 								0, PAGE_SIZE);
 				unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a11fa0b..db547af0 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1280,6 +1280,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 {
 	struct ceph_inode_info *ci = cap->ci;
 	struct inode *inode = &ci->vfs_inode;
+	struct ceph_buffer *old_blob = NULL;
 	struct cap_msg_args arg;
 	int held, revoking;
 	int wake = 0;
@@ -1344,7 +1345,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 	ci->i_requested_max_size = arg.max_size;
 
 	if (flushing & CEPH_CAP_XATTR_EXCL) {
-		__ceph_build_xattrs_blob(ci);
+		old_blob = __ceph_build_xattrs_blob(ci);
 		arg.xattr_version = ci->i_xattrs.version;
 		arg.xattr_buf = ci->i_xattrs.blob;
 	} else {
@@ -1379,6 +1380,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 
 	spin_unlock(&ci->i_ceph_lock);
 
+	ceph_buffer_put(old_blob);
+
 	ret = send_cap_msg(&arg);
 	if (ret < 0) {
 		dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 3e518c2..8196c21 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -528,13 +528,16 @@ static void ceph_i_callback(struct rcu_head *head)
 	kmem_cache_free(ceph_inode_cachep, ci);
 }
 
-void ceph_destroy_inode(struct inode *inode)
+void ceph_evict_inode(struct inode *inode)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_inode_frag *frag;
 	struct rb_node *n;
 
-	dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+
+	truncate_inode_pages_final(&inode->i_data);
+	clear_inode(inode);
 
 	ceph_fscache_unregister_inode_cookie(ci);
 
@@ -576,7 +579,10 @@ void ceph_destroy_inode(struct inode *inode)
 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 
 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
+}
 
+void ceph_destroy_inode(struct inode *inode)
+{
 	call_rcu(&inode->i_rcu, ceph_i_callback);
 }
 
@@ -742,6 +748,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 	int issued, new_issued, info_caps;
 	struct timespec64 mtime, atime, ctime;
 	struct ceph_buffer *xattr_blob = NULL;
+	struct ceph_buffer *old_blob = NULL;
 	struct ceph_string *pool_ns = NULL;
 	struct ceph_cap *new_cap = NULL;
 	int err = 0;
@@ -800,7 +807,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 
 	/* update inode */
 	inode->i_rdev = le32_to_cpu(info->rdev);
-	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+	/* directories have fl_stripe_unit set to zero */
+	if (le32_to_cpu(info->layout.fl_stripe_unit))
+		inode->i_blkbits =
+			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+	else
+		inode->i_blkbits = CEPH_BLOCK_SHIFT;
 
 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
 
@@ -878,7 +890,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
 		if (ci->i_xattrs.blob)
-			ceph_buffer_put(ci->i_xattrs.blob);
+			old_blob = ci->i_xattrs.blob;
 		ci->i_xattrs.blob = xattr_blob;
 		if (xattr_blob)
 			memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1017,8 +1029,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 out:
 	if (new_cap)
 		ceph_put_cap(mdsc, new_cap);
-	if (xattr_blob)
-		ceph_buffer_put(xattr_blob);
+	ceph_buffer_put(old_blob);
+	ceph_buffer_put(xattr_blob);
 	ceph_put_string(pool_ns);
 	return err;
 }
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 9dae2ec..6a8f4a9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
 		req->r_wait_for_completion = ceph_lock_wait_for_completion;
 
 	err = ceph_mdsc_do_request(mdsc, inode, req);
-
-	if (operation == CEPH_MDS_OP_GETFILELOCK) {
+	if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
 		fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
 		if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
 			fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index bfcf11c..09db6d0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3640,7 +3640,9 @@ static void delayed_work(struct work_struct *work)
 				pr_info("mds%d hung\n", s->s_mds);
 			}
 		}
-		if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+		if (s->s_state == CEPH_MDS_SESSION_NEW ||
+		    s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+		    s->s_state == CEPH_MDS_SESSION_REJECTED) {
 			/* this mds is failed or recovering, just wait */
 			ceph_put_mds_session(s);
 			continue;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 1f46b02..5cf7b5f 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -460,6 +460,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 	struct inode *inode = &ci->vfs_inode;
 	struct ceph_cap_snap *capsnap;
 	struct ceph_snap_context *old_snapc, *new_snapc;
+	struct ceph_buffer *old_blob = NULL;
 	int used, dirty;
 
 	capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -536,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 	capsnap->gid = inode->i_gid;
 
 	if (dirty & CEPH_CAP_XATTR_EXCL) {
-		__ceph_build_xattrs_blob(ci);
+		old_blob = __ceph_build_xattrs_blob(ci);
 		capsnap->xattr_blob =
 			ceph_buffer_get(ci->i_xattrs.blob);
 		capsnap->xattr_version = ci->i_xattrs.version;
@@ -579,6 +580,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 	}
 	spin_unlock(&ci->i_ceph_lock);
 
+	ceph_buffer_put(old_blob);
 	kfree(capsnap);
 	ceph_put_snap_context(old_snapc);
 }
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c5cf46e..ccab249 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -830,6 +830,7 @@ static const struct super_operations ceph_super_ops = {
 	.destroy_inode	= ceph_destroy_inode,
 	.write_inode    = ceph_write_inode,
 	.drop_inode	= ceph_drop_inode,
+	.evict_inode	= ceph_evict_inode,
 	.sync_fs        = ceph_sync_fs,
 	.put_super	= ceph_put_super,
 	.remount_fs	= ceph_remount,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d8579a5..8d3eabf 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -854,6 +854,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
 extern const struct inode_operations ceph_file_iops;
 
 extern struct inode *ceph_alloc_inode(struct super_block *sb);
+extern void ceph_evict_inode(struct inode *inode);
 extern void ceph_destroy_inode(struct inode *inode);
 extern int ceph_drop_inode(struct inode *inode);
 
@@ -896,7 +897,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
 int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
 ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
 extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
 extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
 extern void __init ceph_xattr_init(void);
 extern void ceph_xattr_exit(void);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 0a2d489..5e4f3f8 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -734,12 +734,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
 
 /*
  * If there are dirty xattrs, reencode xattrs into the prealloc_blob
- * and swap into place.
+ * and swap into place.  It returns the old i_xattrs.blob (or NULL) so
+ * that it can be freed by the caller as the i_ceph_lock is likely to be
+ * held.
  */
-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
 {
 	struct rb_node *p;
 	struct ceph_inode_xattr *xattr = NULL;
+	struct ceph_buffer *old_blob = NULL;
 	void *dest;
 
 	dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -770,12 +773,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
 			dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
 
 		if (ci->i_xattrs.blob)
-			ceph_buffer_put(ci->i_xattrs.blob);
+			old_blob = ci->i_xattrs.blob;
 		ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
 		ci->i_xattrs.prealloc_blob = NULL;
 		ci->i_xattrs.dirty = false;
 		ci->i_xattrs.version++;
 	}
+
+	return old_blob;
 }
 
 static inline int __get_request_mask(struct inode *in) {
@@ -1011,6 +1016,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 	struct ceph_cap_flush *prealloc_cf = NULL;
+	struct ceph_buffer *old_blob = NULL;
 	int issued;
 	int err;
 	int dirty = 0;
@@ -1084,13 +1090,15 @@ int __ceph_setxattr(struct inode *inode, const char *name,
 		struct ceph_buffer *blob;
 
 		spin_unlock(&ci->i_ceph_lock);
-		dout(" preaallocating new blob size=%d\n", required_blob_size);
+		ceph_buffer_put(old_blob); /* Shouldn't be required */
+		dout(" pre-allocating new blob size=%d\n", required_blob_size);
 		blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
 		if (!blob)
 			goto do_sync_unlocked;
 		spin_lock(&ci->i_ceph_lock);
+		/* prealloc_blob can't be released while holding i_ceph_lock */
 		if (ci->i_xattrs.prealloc_blob)
-			ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+			old_blob = ci->i_xattrs.prealloc_blob;
 		ci->i_xattrs.prealloc_blob = blob;
 		goto retry;
 	}
@@ -1106,6 +1114,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
 	}
 
 	spin_unlock(&ci->i_ceph_lock);
+	ceph_buffer_put(old_blob);
 	if (lock_snap_rwsem)
 		up_read(&mdsc->snap_rwsem);
 	if (dirty)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9731d0d..aba2b48 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -72,5 +72,10 @@ struct cifs_sb_info {
 	struct delayed_work prune_tlinks;
 	struct rcu_head rcu;
 	char *prepath;
+	/*
+	 * Indicate whether serverino option was turned off later
+	 * (cifs_autodisable_serverino) in order to match new mounts.
+	 */
+	bool mnt_cifs_serverino_autodisabled;
 };
 #endif				/* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index fb32f3d..d545701 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -292,6 +292,7 @@ cifs_alloc_inode(struct super_block *sb)
 	cifs_inode->uniqueid = 0;
 	cifs_inode->createtime = 0;
 	cifs_inode->epoch = 0;
+	spin_lock_init(&cifs_inode->open_file_lock);
 	generate_random_uuid(cifs_inode->lease_key);
 
 	/*
@@ -427,6 +428,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
 	cifs_show_security(s, tcon->ses);
 	cifs_show_cache_flavor(s, cifs_sb);
 
+	if (tcon->no_lease)
+		seq_puts(s, ",nolease");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
 		seq_puts(s, ",multiuser");
 	else if (tcon->ses->user_name)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6f227cc..4dbae6e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -543,6 +543,7 @@ struct smb_vol {
 	bool noblocksnd:1;
 	bool noautotune:1;
 	bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+	bool no_lease:1;     /* disable requesting leases */
 	bool fsc:1;	/* enable fscache */
 	bool mfsymlinks:1; /* use Minshall+French Symlinks */
 	bool multiuser:1;
@@ -1004,6 +1005,7 @@ struct cifs_tcon {
 	bool need_reopen_files:1; /* need to reopen tcon file handles */
 	bool use_resilient:1; /* use resilient instead of durable handles */
 	bool use_persistent:1; /* use persistent instead of durable handles */
+	bool no_lease:1;    /* Do not request leases on files or directories */
 	__le32 capabilities;
 	__u32 share_flags;
 	__u32 maximal_access;
@@ -1287,6 +1289,7 @@ struct cifsInodeInfo {
 	struct rw_semaphore lock_sem;	/* protect the fields above */
 	/* BB add in lists for dirty pages i.e. write caching info for oplock */
 	struct list_head openFileList;
+	spinlock_t	open_file_lock;	/* protects openFileList */
 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
 	unsigned int oplock;		/* oplock/lease level we have */
 	unsigned int epoch;		/* used to track lease state changes */
@@ -1563,6 +1566,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 	kfree(param);
 }
 
+static inline bool is_interrupt_error(int error)
+{
+	switch (error) {
+	case -EINTR:
+	case -ERESTARTSYS:
+	case -ERESTARTNOHAND:
+	case -ERESTARTNOINTR:
+		return true;
+	}
+	return false;
+}
+
+static inline bool is_retryable_error(int error)
+{
+	if (is_interrupt_error(error) || error == -EAGAIN)
+		return true;
+	return false;
+}
+
 #define   MID_FREE 0
 #define   MID_REQUEST_ALLOCATED 1
 #define   MID_REQUEST_SUBMITTED 2
@@ -1668,10 +1690,14 @@ require use of the stronger protocol */
  *  tcp_ses_lock protects:
  *	list operations on tcp and SMB session lists
  *  tcon->open_file_lock protects the list of open files hanging off the tcon
+ *  inode->open_file_lock protects the openFileList hanging off the inode
  *  cfile->file_info_lock protects counters and fields in cifs file struct
  *  f_owner.lock protects certain per file struct operations
  *  mapping->page_lock protects certain per page operations
  *
+ *  Note that the cifs_tcon.open_file_lock should be taken before
+ *  not after the cifsInodeInfo.open_file_lock
+ *
  *  Semaphores
  *  ----------
  *  sesSem     operations on smb session
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 269471c8f..86a54b8 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2033,16 +2033,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 
 		wdata2->cfile = find_writable_file(CIFS_I(inode), false);
 		if (!wdata2->cfile) {
-			cifs_dbg(VFS, "No writable handles for inode\n");
+			cifs_dbg(VFS, "No writable handle to retry writepages\n");
 			rc = -EBADF;
-			break;
+		} else {
+			wdata2->pid = wdata2->cfile->pid;
+			rc = server->ops->async_writev(wdata2,
+						       cifs_writedata_release);
 		}
-		wdata2->pid = wdata2->cfile->pid;
-		rc = server->ops->async_writev(wdata2, cifs_writedata_release);
 
 		for (j = 0; j < nr_pages; j++) {
 			unlock_page(wdata2->pages[j]);
-			if (rc != 0 && rc != -EAGAIN) {
+			if (rc != 0 && !is_retryable_error(rc)) {
 				SetPageError(wdata2->pages[j]);
 				end_page_writeback(wdata2->pages[j]);
 				put_page(wdata2->pages[j]);
@@ -2051,8 +2052,9 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 
 		if (rc) {
 			kref_put(&wdata2->refcount, cifs_writedata_release);
-			if (rc == -EAGAIN)
+			if (is_retryable_error(rc))
 				continue;
+			i += nr_pages;
 			break;
 		}
 
@@ -2060,7 +2062,15 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 		i += nr_pages;
 	} while (i < wdata->nr_pages);
 
-	mapping_set_error(inode->i_mapping, rc);
+	/* cleanup remaining pages from the original wdata */
+	for (; i < wdata->nr_pages; i++) {
+		SetPageError(wdata->pages[i]);
+		end_page_writeback(wdata->pages[i]);
+		put_page(wdata->pages[i]);
+	}
+
+	if (rc != 0 && !is_retryable_error(rc))
+		mapping_set_error(inode->i_mapping, rc);
 	kref_put(&wdata->refcount, cifs_writedata_release);
 }
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index c53a2e8..966e493 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,7 +70,7 @@ enum {
 	Opt_user_xattr, Opt_nouser_xattr,
 	Opt_forceuid, Opt_noforceuid,
 	Opt_forcegid, Opt_noforcegid,
-	Opt_noblocksend, Opt_noautotune,
+	Opt_noblocksend, Opt_noautotune, Opt_nolease,
 	Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
 	Opt_mapposix, Opt_nomapposix,
 	Opt_mapchars, Opt_nomapchars, Opt_sfu,
@@ -129,6 +129,7 @@ static const match_table_t cifs_mount_option_tokens = {
 	{ Opt_noforcegid, "noforcegid" },
 	{ Opt_noblocksend, "noblocksend" },
 	{ Opt_noautotune, "noautotune" },
+	{ Opt_nolease, "nolease" },
 	{ Opt_hard, "hard" },
 	{ Opt_soft, "soft" },
 	{ Opt_perm, "perm" },
@@ -1542,6 +1543,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 		case Opt_noautotune:
 			vol->noautotune = 1;
 			break;
+		case Opt_nolease:
+			vol->no_lease = 1;
+			break;
 		case Opt_hard:
 			vol->retry = 1;
 			break;
@@ -2756,6 +2760,7 @@ static int
 cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 {
 	int rc = 0;
+	int is_domain = 0;
 	const char *delim, *payload;
 	char *desc;
 	ssize_t len;
@@ -2803,6 +2808,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 			rc = PTR_ERR(key);
 			goto out_err;
 		}
+		is_domain = 1;
 	}
 
 	down_read(&key->sem);
@@ -2860,6 +2866,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 		goto out_key_put;
 	}
 
+	/*
+	 * If we have a domain key then we must set the domainName in the
+	 * for the request.
+	 */
+	if (is_domain && ses->domainName) {
+		vol->domainname = kstrndup(ses->domainName,
+					   strlen(ses->domainName),
+					   GFP_KERNEL);
+		if (!vol->domainname) {
+			cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+				 "domain\n", len);
+			rc = -ENOMEM;
+			kfree(vol->username);
+			vol->username = NULL;
+			kzfree(vol->password);
+			vol->password = NULL;
+			goto out_key_put;
+		}
+	}
+
 out_key_put:
 	up_read(&key->sem);
 	key_put(key);
@@ -3001,6 +3027,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
 		return 0;
 	if (tcon->snapshot_time != volume_info->snapshot_time)
 		return 0;
+	if (tcon->no_lease != volume_info->no_lease)
+		return 0;
 	return 1;
 }
 
@@ -3209,6 +3237,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 	tcon->nocase = volume_info->nocase;
 	tcon->nohandlecache = volume_info->nohandlecache;
 	tcon->local_lease = volume_info->local_lease;
+	tcon->no_lease = volume_info->no_lease;
 	INIT_LIST_HEAD(&tcon->pending_opens);
 
 	spin_lock(&cifs_tcp_ses_lock);
@@ -3247,12 +3276,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
 {
 	struct cifs_sb_info *old = CIFS_SB(sb);
 	struct cifs_sb_info *new = mnt_data->cifs_sb;
+	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
+	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
 
 	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
 		return 0;
 
-	if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
-	    (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
+	if (old->mnt_cifs_serverino_autodisabled)
+		newflags &= ~CIFS_MOUNT_SERVER_INUM;
+
+	if (oldflags != newflags)
 		return 0;
 
 	/*
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 907e85d..2fb6fa5 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -840,10 +840,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
 static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
+	struct inode *inode;
+
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	if (d_really_is_positive(direntry)) {
+		inode = d_inode(direntry);
+		if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+			CIFS_I(inode)->time = 0; /* force reval */
+
 		if (cifs_revalidate_dentry(direntry))
 			return 0;
 		else {
@@ -854,7 +860,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 			 * attributes will have been updated by
 			 * cifs_revalidate_dentry().
 			 */
-			if (IS_AUTOMOUNT(d_inode(direntry)) &&
+			if (IS_AUTOMOUNT(inode) &&
 			   !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
 				spin_lock(&direntry->d_lock);
 				direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 23cee91..b4e33ef 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -252,6 +252,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
 					 xid, fid);
 
+	if (rc) {
+		server->ops->close(xid, tcon, fid);
+		if (rc == -ESTALE)
+			rc = -EOPENSTALE;
+	}
+
 out:
 	kfree(buf);
 	return rc;
@@ -336,10 +342,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
 	list_add(&cfile->tlist, &tcon->openFileList);
 
 	/* if readable file instance put first in list*/
+	spin_lock(&cinode->open_file_lock);
 	if (file->f_mode & FMODE_READ)
 		list_add(&cfile->flist, &cinode->openFileList);
 	else
 		list_add_tail(&cfile->flist, &cinode->openFileList);
+	spin_unlock(&cinode->open_file_lock);
 	spin_unlock(&tcon->open_file_lock);
 
 	if (fid->purge_cache)
@@ -395,10 +403,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 	bool oplock_break_cancelled;
 
 	spin_lock(&tcon->open_file_lock);
-
+	spin_lock(&cifsi->open_file_lock);
 	spin_lock(&cifs_file->file_info_lock);
 	if (--cifs_file->count > 0) {
 		spin_unlock(&cifs_file->file_info_lock);
+		spin_unlock(&cifsi->open_file_lock);
 		spin_unlock(&tcon->open_file_lock);
 		return;
 	}
@@ -427,6 +436,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 		cifs_set_oplock_level(cifsi, 0);
 	}
 
+	spin_unlock(&cifsi->open_file_lock);
 	spin_unlock(&tcon->open_file_lock);
 
 	oplock_break_cancelled = wait_oplock_handler ?
@@ -749,7 +759,8 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
 
 	if (can_flush) {
 		rc = filemap_write_and_wait(inode->i_mapping);
-		mapping_set_error(inode->i_mapping, rc);
+		if (!is_interrupt_error(rc))
+			mapping_set_error(inode->i_mapping, rc);
 
 		if (tcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&inode, full_path,
@@ -1830,13 +1841,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 {
 	struct cifsFileInfo *open_file = NULL;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 	/* we could simply get the first_list_entry since write-only entries
 	   are always at the end of the list but since the first entry might
 	   have a close pending, we go through the whole list */
@@ -1848,7 +1858,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 				/* found a good file */
 				/* lock it so it will not be closed on us */
 				cifsFileInfo_get(open_file);
-				spin_unlock(&tcon->open_file_lock);
+				spin_unlock(&cifs_inode->open_file_lock);
 				return open_file;
 			} /* else might as well continue, and look for
 			     another, or simply have the caller reopen it
@@ -1856,7 +1866,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 		} else /* write only file */
 			break; /* write only files are last so must be done */
 	}
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 	return NULL;
 }
 
@@ -1865,7 +1875,6 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 {
 	struct cifsFileInfo *open_file, *inv_file = NULL;
 	struct cifs_sb_info *cifs_sb;
-	struct cifs_tcon *tcon;
 	bool any_available = false;
 	int rc;
 	unsigned int refind = 0;
@@ -1881,16 +1890,15 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 	}
 
 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-	tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 refind_writable:
 	if (refind > MAX_REOPEN_ATT) {
-		spin_unlock(&tcon->open_file_lock);
+		spin_unlock(&cifs_inode->open_file_lock);
 		return NULL;
 	}
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1902,7 +1910,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 			if (!open_file->invalidHandle) {
 				/* found a good writable file */
 				cifsFileInfo_get(open_file);
-				spin_unlock(&tcon->open_file_lock);
+				spin_unlock(&cifs_inode->open_file_lock);
 				return open_file;
 			} else {
 				if (!inv_file)
@@ -1921,21 +1929,21 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 		cifsFileInfo_get(inv_file);
 	}
 
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 
 	if (inv_file) {
 		rc = cifs_reopen_file(inv_file, false);
 		if (!rc)
 			return inv_file;
 		else {
-			spin_lock(&tcon->open_file_lock);
+			spin_lock(&cifs_inode->open_file_lock);
 			list_move_tail(&inv_file->flist,
 					&cifs_inode->openFileList);
-			spin_unlock(&tcon->open_file_lock);
+			spin_unlock(&cifs_inode->open_file_lock);
 			cifsFileInfo_put(inv_file);
 			++refind;
 			inv_file = NULL;
-			spin_lock(&tcon->open_file_lock);
+			spin_lock(&cifs_inode->open_file_lock);
 			goto refind_writable;
 		}
 	}
@@ -2137,6 +2145,7 @@ static int cifs_writepages(struct address_space *mapping,
 	pgoff_t end, index;
 	struct cifs_writedata *wdata;
 	int rc = 0;
+	int saved_rc = 0;
 
 	/*
 	 * If wsize is smaller than the page cache size, default to writing
@@ -2163,8 +2172,10 @@ static int cifs_writepages(struct address_space *mapping,
 
 		rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
 						   &wsize, &credits);
-		if (rc)
+		if (rc != 0) {
+			done = true;
 			break;
+		}
 
 		tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
 
@@ -2172,6 +2183,7 @@ static int cifs_writepages(struct address_space *mapping,
 						  &found_pages);
 		if (!wdata) {
 			rc = -ENOMEM;
+			done = true;
 			add_credits_and_wake_if(server, credits, 0);
 			break;
 		}
@@ -2200,7 +2212,7 @@ static int cifs_writepages(struct address_space *mapping,
 		if (rc != 0) {
 			add_credits_and_wake_if(server, wdata->credits, 0);
 			for (i = 0; i < nr_pages; ++i) {
-				if (rc == -EAGAIN)
+				if (is_retryable_error(rc))
 					redirty_page_for_writepage(wbc,
 							   wdata->pages[i]);
 				else
@@ -2208,7 +2220,7 @@ static int cifs_writepages(struct address_space *mapping,
 				end_page_writeback(wdata->pages[i]);
 				put_page(wdata->pages[i]);
 			}
-			if (rc != -EAGAIN)
+			if (!is_retryable_error(rc))
 				mapping_set_error(mapping, rc);
 		}
 		kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2218,6 +2230,15 @@ static int cifs_writepages(struct address_space *mapping,
 			continue;
 		}
 
+		/* Return immediately if we received a signal during writing */
+		if (is_interrupt_error(rc)) {
+			done = true;
+			break;
+		}
+
+		if (rc != 0 && saved_rc == 0)
+			saved_rc = rc;
+
 		wbc->nr_to_write -= nr_pages;
 		if (wbc->nr_to_write <= 0)
 			done = true;
@@ -2235,6 +2256,9 @@ static int cifs_writepages(struct address_space *mapping,
 		goto retry;
 	}
 
+	if (saved_rc != 0)
+		rc = saved_rc;
+
 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
 		mapping->writeback_index = index;
 
@@ -2266,8 +2290,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
 	set_page_writeback(page);
 retry_write:
 	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
-	if (rc == -EAGAIN) {
-		if (wbc->sync_mode == WB_SYNC_ALL)
+	if (is_retryable_error(rc)) {
+		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
 			goto retry_write;
 		redirty_page_for_writepage(wbc, page);
 	} else if (rc != 0) {
@@ -3980,17 +4004,15 @@ static int cifs_readpage(struct file *file, struct page *page)
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
 	struct cifsFileInfo *open_file;
-	struct cifs_tcon *tcon =
-		cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-	spin_lock(&tcon->open_file_lock);
+	spin_lock(&cifs_inode->open_file_lock);
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-			spin_unlock(&tcon->open_file_lock);
+			spin_unlock(&cifs_inode->open_file_lock);
 			return 1;
 		}
 	}
-	spin_unlock(&tcon->open_file_lock);
+	spin_unlock(&cifs_inode->open_file_lock);
 	return 0;
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1fadd314..26154db 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -410,6 +410,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 		/* if uniqueid is different, return error */
 		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
 		    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+			CIFS_I(*pinode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgiiu_exit;
 		}
@@ -417,6 +418,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 		/* if filetype is different, return error */
 		if (unlikely(((*pinode)->i_mode & S_IFMT) !=
 		    (fattr.cf_mode & S_IFMT))) {
+			CIFS_I(*pinode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgiiu_exit;
 		}
@@ -926,6 +928,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 		/* if uniqueid is different, return error */
 		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
 		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+			CIFS_I(*inode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgii_exit;
 		}
@@ -933,6 +936,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 		/* if filetype is different, return error */
 		if (unlikely(((*inode)->i_mode & S_IFMT) !=
 		    (fattr.cf_mode & S_IFMT))) {
+			CIFS_I(*inode)->time = 0; /* force reval */
 			rc = -ESTALE;
 			goto cgii_exit;
 		}
@@ -2261,6 +2265,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
 	 * the flush returns error?
 	 */
 	rc = filemap_write_and_wait(inode->i_mapping);
+	if (is_interrupt_error(rc)) {
+		rc = -ERESTARTSYS;
+		goto out;
+	}
+
 	mapping_set_error(inode->i_mapping, rc);
 	rc = 0;
 
@@ -2404,6 +2413,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
 	 * the flush returns error?
 	 */
 	rc = filemap_write_and_wait(inode->i_mapping);
+	if (is_interrupt_error(rc)) {
+		rc = -ERESTARTSYS;
+		goto cifs_setattr_exit;
+	}
+
 	mapping_set_error(inode->i_mapping, rc);
 	rc = 0;
 
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index facc94e1..e45f8e3 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -523,6 +523,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 {
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+		cifs_sb->mnt_cifs_serverino_autodisabled = true;
 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
 			 cifs_sb_master_tcon(cifs_sb)->treeName);
 	}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 47db8eb..c7f0c85 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -183,6 +183,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
 	/* we do not want to loop forever */
 	last_mid = cur_mid;
 	cur_mid++;
+	/* avoid 0xFFFF MID */
+	if (cur_mid == 0xffff)
+		cur_mid++;
 
 	/*
 	 * This nested loop looks more expensive than it is.
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0ccf8f9..f0d966d 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -553,7 +553,50 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 	oparams.fid = pfid;
 	oparams.reconnect = false;
 
+	/*
+	 * We do not hold the lock for the open because in case
+	 * SMB2_open needs to reconnect, it will end up calling
+	 * cifs_mark_open_files_invalid() which takes the lock again
+	 * thus causing a deadlock
+	 */
+	mutex_unlock(&tcon->crfid.fid_mutex);
 	rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
+	mutex_lock(&tcon->crfid.fid_mutex);
+
+	/*
+	 * Now we need to check again as the cached root might have
+	 * been successfully re-opened from a concurrent process
+	 */
+
+	if (tcon->crfid.is_valid) {
+		/* work was already done */
+
+		/* stash fids for close() later */
+		struct cifs_fid fid = {
+			.persistent_fid = pfid->persistent_fid,
+			.volatile_fid = pfid->volatile_fid,
+		};
+
+		/*
+		 * Caller expects this func to set pfid to a valid
+		 * cached root, so we copy the existing one and get a
+		 * reference
+		 */
+		memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
+		kref_get(&tcon->crfid.refcount);
+
+		mutex_unlock(&tcon->crfid.fid_mutex);
+
+		if (rc == 0) {
+			/* close extra handle outside of critical section */
+			SMB2_close(xid, tcon, fid.persistent_fid,
+				   fid.volatile_fid);
+		}
+		return 0;
+	}
+
+	/* Cached root is still invalid, continue normaly */
+
 	if (rc == 0) {
 		memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
 		tcon->crfid.tcon = tcon;
@@ -561,6 +604,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 		kref_init(&tcon->crfid.refcount);
 		kref_get(&tcon->crfid.refcount);
 	}
+
 	mutex_unlock(&tcon->crfid.fid_mutex);
 	return rc;
 }
@@ -2354,6 +2398,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
 	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
 		return;
 
+	/* Check if the server granted an oplock rather than a lease */
+	if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+		return smb2_set_oplock_level(cinode, oplock, epoch,
+					     purge_cache);
+
 	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
 		new_oplock |= CIFS_CACHE_READ_FLG;
 		strcat(message, "R");
@@ -2545,7 +2594,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
 static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 				   unsigned int buflen)
 {
-	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+	void *addr;
+	/*
+	 * VMAP_STACK (at least) puts stack into the vmalloc address space
+	 */
+	if (is_vmalloc_addr(buf))
+		addr = vmalloc_to_page(buf);
+	else
+		addr = virt_to_page(buf);
+	sg_set_page(sg, addr, buflen, offset_in_page(buf));
 }
 
 /* Assumes the first rqst has a transform header as the first iov.
@@ -3121,7 +3178,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 {
 	int ret, length;
 	char *buf = server->smallbuf;
-	char *tmpbuf;
 	struct smb2_sync_hdr *shdr;
 	unsigned int pdu_length = server->pdu_size;
 	unsigned int buf_size;
@@ -3151,18 +3207,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 		return length;
 
 	next_is_large = server->large_buf;
- one_more:
+one_more:
 	shdr = (struct smb2_sync_hdr *)buf;
 	if (shdr->NextCommand) {
-		if (next_is_large) {
-			tmpbuf = server->bigbuf;
+		if (next_is_large)
 			next_buffer = (char *)cifs_buf_get();
-		} else {
-			tmpbuf = server->smallbuf;
+		else
 			next_buffer = (char *)cifs_small_buf_get();
-		}
 		memcpy(next_buffer,
-		       tmpbuf + le32_to_cpu(shdr->NextCommand),
+		       buf + le32_to_cpu(shdr->NextCommand),
 		       pdu_length - le32_to_cpu(shdr->NextCommand));
 	}
 
@@ -3191,12 +3244,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 		pdu_length -= le32_to_cpu(shdr->NextCommand);
 		server->large_buf = next_is_large;
 		if (next_is_large)
-			server->bigbuf = next_buffer;
+			server->bigbuf = buf = next_buffer;
 		else
-			server->smallbuf = next_buffer;
-
-		buf += le32_to_cpu(shdr->NextCommand);
+			server->smallbuf = buf = next_buffer;
 		goto one_more;
+	} else if (ret != 0) {
+		/*
+		 * ret != 0 here means that we didn't get to handle_mid() thus
+		 * server->smallbuf and server->bigbuf are still valid. We need
+		 * to free next_buffer because it is not going to be used
+		 * anywhere.
+		 */
+		if (next_is_large)
+			free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+		else
+			free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
 	}
 
 	return ret;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2bc47eb..b1f5d0d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -712,6 +712,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
 			/* ops set to 3.0 by default for default so update */
 			ses->server->ops = &smb21_operations;
+			ses->server->vals = &smb21_values;
 		}
 	} else if (le16_to_cpu(rsp->DialectRevision) !=
 				ses->server->vals->protocol_id) {
@@ -2191,7 +2192,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
 	iov[1].iov_len = uni_path_len;
 	iov[1].iov_base = path;
 
-	if (!server->oplocks)
+	if ((!server->oplocks) || (tcon->no_lease))
 		*oplock = SMB2_OPLOCK_LEVEL_NONE;
 
 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5fdb9a5..1959931 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2090,7 +2090,8 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  * rqst: the data to write
  * return value: 0 if successfully write, otherwise error code
  */
-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server,
+	int num_rqst, struct smb_rqst *rqst_array)
 {
 	struct smbd_connection *info = server->smbd_conn;
 	struct kvec vec;
@@ -2102,6 +2103,8 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 		info->max_send_size - sizeof(struct smbd_data_transfer);
 	struct kvec *iov;
 	int rc;
+	struct smb_rqst *rqst;
+	int rqst_idx;
 
 	info->smbd_send_pending++;
 	if (info->transport_status != SMBD_CONNECTED) {
@@ -2110,46 +2113,40 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 	}
 
 	/*
-	 * Skip the RFC1002 length defined in MS-SMB2 section 2.1
-	 * It is used only for TCP transport in the iov[0]
-	 * In future we may want to add a transport layer under protocol
-	 * layer so this will only be issued to TCP transport
-	 */
-
-	if (rqst->rq_iov[0].iov_len != 4) {
-		log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
-		return -EINVAL;
-	}
-
-	/*
 	 * Add in the page array if there is one. The caller needs to set
 	 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
 	 * ends at page boundary
 	 */
-	buflen = smb_rqst_len(server, rqst);
+	remaining_data_length = 0;
+	for (i = 0; i < num_rqst; i++)
+		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
 
-	if (buflen + sizeof(struct smbd_data_transfer) >
+	if (remaining_data_length + sizeof(struct smbd_data_transfer) >
 		info->max_fragmented_send_size) {
 		log_write(ERR, "payload size %d > max size %d\n",
-			buflen, info->max_fragmented_send_size);
+			remaining_data_length, info->max_fragmented_send_size);
 		rc = -EINVAL;
 		goto done;
 	}
 
-	iov = &rqst->rq_iov[1];
+	rqst_idx = 0;
 
-	cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
-	for (i = 0; i < rqst->rq_nvec-1; i++)
+next_rqst:
+	rqst = &rqst_array[rqst_idx];
+	iov = rqst->rq_iov;
+
+	cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
+		rqst_idx, smb_rqst_len(server, rqst));
+	for (i = 0; i < rqst->rq_nvec; i++)
 		dump_smb(iov[i].iov_base, iov[i].iov_len);
 
-	remaining_data_length = buflen;
 
-	log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
-		"rq_tailsz=%d buflen=%d\n",
-		rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
-		rqst->rq_tailsz, buflen);
+	log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
+		"rq_tailsz=%d buflen=%lu\n",
+		rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+		rqst->rq_tailsz, smb_rqst_len(server, rqst));
 
-	start = i = iov[0].iov_len ? 0 : 1;
+	start = i = 0;
 	buflen = 0;
 	while (true) {
 		buflen += iov[i].iov_len;
@@ -2197,14 +2194,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 						goto done;
 				}
 				i++;
-				if (i == rqst->rq_nvec-1)
+				if (i == rqst->rq_nvec)
 					break;
 			}
 			start = i;
 			buflen = 0;
 		} else {
 			i++;
-			if (i == rqst->rq_nvec-1) {
+			if (i == rqst->rq_nvec) {
 				/* send out all remaining vecs */
 				remaining_data_length -= buflen;
 				log_write(INFO,
@@ -2248,6 +2245,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 		}
 	}
 
+	rqst_idx++;
+	if (rqst_idx < num_rqst)
+		goto next_rqst;
+
 done:
 	/*
 	 * As an optimization, we don't wait for individual I/O to finish
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
index a110962..b5c240f 100644
--- a/fs/cifs/smbdirect.h
+++ b/fs/cifs/smbdirect.h
@@ -292,7 +292,8 @@ void smbd_destroy(struct smbd_connection *info);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server,
+	int num_rqst, struct smb_rqst *rqst);
 
 enum mr_state {
 	MR_READY,
@@ -332,7 +333,7 @@ static inline void *smbd_get_connection(
 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
 static inline void smbd_destroy(struct smbd_connection *info) {}
 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
 #endif
 
 #endif
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f2938bd..fe77f41 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -287,7 +287,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
 	__be32 rfc1002_marker;
 
 	if (cifs_rdma_enabled(server) && server->smbd_conn) {
-		rc = smbd_send(server, rqst);
+		rc = smbd_send(server, num_rqst, rqst);
 		goto smbd_done;
 	}
 	if (ssocket == NULL)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 50ddb79..a2db401 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -31,7 +31,7 @@
 #include "cifs_fs_sb.h"
 #include "cifs_unicode.h"
 
-#define MAX_EA_VALUE_SIZE 65535
+#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
 #define CIFS_XATTR_ATTRIB "cifs.dosattrib"  /* full name: user.cifs.dosattrib */
 #define CIFS_XATTR_CREATETIME "cifs.creationtime"  /* user.cifs.creationtime */
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 913061c..7edc817 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
 
 void ext4_exit_system_zone(void)
 {
+	rcu_barrier();
 	kmem_cache_destroy(ext4_system_zone_cachep);
 }
 
@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
 	return 0;
 }
 
+static void release_system_zone(struct ext4_system_blocks *system_blks)
+{
+	struct ext4_system_zone	*entry, *n;
+
+	rbtree_postorder_for_each_entry_safe(entry, n,
+				&system_blks->root, node)
+		kmem_cache_free(ext4_system_zone_cachep, entry);
+}
+
 /*
  * Mark a range of blocks as belonging to the "system zone" --- that
  * is, filesystem metadata blocks which should never be used by
  * inodes.
  */
-static int add_system_zone(struct ext4_sb_info *sbi,
+static int add_system_zone(struct ext4_system_blocks *system_blks,
 			   ext4_fsblk_t start_blk,
 			   unsigned int count)
 {
 	struct ext4_system_zone *new_entry = NULL, *entry;
-	struct rb_node **n = &sbi->system_blks.rb_node, *node;
+	struct rb_node **n = &system_blks->root.rb_node, *node;
 	struct rb_node *parent = NULL, *new_node = NULL;
 
 	while (*n) {
@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
 		new_node = &new_entry->node;
 
 		rb_link_node(new_node, parent, n);
-		rb_insert_color(new_node, &sbi->system_blks);
+		rb_insert_color(new_node, &system_blks->root);
 	}
 
 	/* Can we merge to the left? */
@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
 		if (can_merge(entry, new_entry)) {
 			new_entry->start_blk = entry->start_blk;
 			new_entry->count += entry->count;
-			rb_erase(node, &sbi->system_blks);
+			rb_erase(node, &system_blks->root);
 			kmem_cache_free(ext4_system_zone_cachep, entry);
 		}
 	}
@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
 		entry = rb_entry(node, struct ext4_system_zone, node);
 		if (can_merge(new_entry, entry)) {
 			new_entry->count += entry->count;
-			rb_erase(node, &sbi->system_blks);
+			rb_erase(node, &system_blks->root);
 			kmem_cache_free(ext4_system_zone_cachep, entry);
 		}
 	}
@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
 	int first = 1;
 
 	printk(KERN_INFO "System zones: ");
-	node = rb_first(&sbi->system_blks);
+	node = rb_first(&sbi->system_blks->root);
 	while (node) {
 		entry = rb_entry(node, struct ext4_system_zone, node);
 		printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
@@ -137,68 +147,18 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
 	printk(KERN_CONT "\n");
 }
 
-int ext4_setup_system_zone(struct super_block *sb)
-{
-	ext4_group_t ngroups = ext4_get_groups_count(sb);
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	struct ext4_group_desc *gdp;
-	ext4_group_t i;
-	int flex_size = ext4_flex_bg_size(sbi);
-	int ret;
-
-	if (!test_opt(sb, BLOCK_VALIDITY)) {
-		if (sbi->system_blks.rb_node)
-			ext4_release_system_zone(sb);
-		return 0;
-	}
-	if (sbi->system_blks.rb_node)
-		return 0;
-
-	for (i=0; i < ngroups; i++) {
-		if (ext4_bg_has_super(sb, i) &&
-		    ((i < 5) || ((i % flex_size) == 0)))
-			add_system_zone(sbi, ext4_group_first_block_no(sb, i),
-					ext4_bg_num_gdb(sb, i) + 1);
-		gdp = ext4_get_group_desc(sb, i, NULL);
-		ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
-		if (ret)
-			return ret;
-		ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
-		if (ret)
-			return ret;
-		ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
-				sbi->s_itb_per_group);
-		if (ret)
-			return ret;
-	}
-
-	if (test_opt(sb, DEBUG))
-		debug_print_tree(sbi);
-	return 0;
-}
-
-/* Called when the filesystem is unmounted */
-void ext4_release_system_zone(struct super_block *sb)
-{
-	struct ext4_system_zone	*entry, *n;
-
-	rbtree_postorder_for_each_entry_safe(entry, n,
-			&EXT4_SB(sb)->system_blks, node)
-		kmem_cache_free(ext4_system_zone_cachep, entry);
-
-	EXT4_SB(sb)->system_blks = RB_ROOT;
-}
-
 /*
  * Returns 1 if the passed-in block region (start_blk,
  * start_blk+count) is valid; 0 if some part of the block region
  * overlaps with filesystem metadata blocks.
  */
-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
-			  unsigned int count)
+static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
+				     struct ext4_system_blocks *system_blks,
+				     ext4_fsblk_t start_blk,
+				     unsigned int count)
 {
 	struct ext4_system_zone *entry;
-	struct rb_node *n = sbi->system_blks.rb_node;
+	struct rb_node *n;
 
 	if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
 	    (start_blk + count < start_blk) ||
@@ -206,6 +166,11 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
 		sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
 		return 0;
 	}
+
+	if (system_blks == NULL)
+		return 1;
+
+	n = system_blks->root.rb_node;
 	while (n) {
 		entry = rb_entry(n, struct ext4_system_zone, node);
 		if (start_blk + count - 1 < entry->start_blk)
@@ -220,6 +185,177 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
 	return 1;
 }
 
+static int ext4_protect_reserved_inode(struct super_block *sb,
+				       struct ext4_system_blocks *system_blks,
+				       u32 ino)
+{
+	struct inode *inode;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct ext4_map_blocks map;
+	u32 i = 0, num;
+	int err = 0, n;
+
+	if ((ino < EXT4_ROOT_INO) ||
+	    (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
+		return -EINVAL;
+	inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+	num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	while (i < num) {
+		map.m_lblk = i;
+		map.m_len = num - i;
+		n = ext4_map_blocks(NULL, inode, &map, 0);
+		if (n < 0) {
+			err = n;
+			break;
+		}
+		if (n == 0) {
+			i++;
+		} else {
+			if (!ext4_data_block_valid_rcu(sbi, system_blks,
+						map.m_pblk, n)) {
+				ext4_error(sb, "blocks %llu-%llu from inode %u "
+					   "overlap system zone", map.m_pblk,
+					   map.m_pblk + map.m_len - 1, ino);
+				err = -EFSCORRUPTED;
+				break;
+			}
+			err = add_system_zone(system_blks, map.m_pblk, n);
+			if (err < 0)
+				break;
+			i += n;
+		}
+	}
+	iput(inode);
+	return err;
+}
+
+static void ext4_destroy_system_zone(struct rcu_head *rcu)
+{
+	struct ext4_system_blocks *system_blks;
+
+	system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
+	release_system_zone(system_blks);
+	kfree(system_blks);
+}
+
+/*
+ * Build system zone rbtree which is used for block validity checking.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. That's why we first build the rbtree and then
+ * swap it in place.
+ */
+int ext4_setup_system_zone(struct super_block *sb)
+{
+	ext4_group_t ngroups = ext4_get_groups_count(sb);
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct ext4_system_blocks *system_blks;
+	struct ext4_group_desc *gdp;
+	ext4_group_t i;
+	int flex_size = ext4_flex_bg_size(sbi);
+	int ret;
+
+	if (!test_opt(sb, BLOCK_VALIDITY)) {
+		if (sbi->system_blks)
+			ext4_release_system_zone(sb);
+		return 0;
+	}
+	if (sbi->system_blks)
+		return 0;
+
+	system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+	if (!system_blks)
+		return -ENOMEM;
+
+	for (i=0; i < ngroups; i++) {
+		if (ext4_bg_has_super(sb, i) &&
+		    ((i < 5) || ((i % flex_size) == 0)))
+			add_system_zone(system_blks,
+					ext4_group_first_block_no(sb, i),
+					ext4_bg_num_gdb(sb, i) + 1);
+		gdp = ext4_get_group_desc(sb, i, NULL);
+		ret = add_system_zone(system_blks,
+				ext4_block_bitmap(sb, gdp), 1);
+		if (ret)
+			goto err;
+		ret = add_system_zone(system_blks,
+				ext4_inode_bitmap(sb, gdp), 1);
+		if (ret)
+			goto err;
+		ret = add_system_zone(system_blks,
+				ext4_inode_table(sb, gdp),
+				sbi->s_itb_per_group);
+		if (ret)
+			goto err;
+	}
+	if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
+		ret = ext4_protect_reserved_inode(sb, system_blks,
+				le32_to_cpu(sbi->s_es->s_journal_inum));
+		if (ret)
+			goto err;
+	}
+
+	/*
+	 * System blks rbtree complete, announce it once to prevent racing
+	 * with ext4_data_block_valid() accessing the rbtree at the same
+	 * time.
+	 */
+	rcu_assign_pointer(sbi->system_blks, system_blks);
+
+	if (test_opt(sb, DEBUG))
+		debug_print_tree(sbi);
+	return 0;
+err:
+	release_system_zone(system_blks);
+	kfree(system_blks);
+	return ret;
+}
+
+/*
+ * Called when the filesystem is unmounted or when remounting it with
+ * noblock_validity specified.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. So we first clear the system_blks pointer and
+ * then free the rbtree only after RCU grace period expires.
+ */
+void ext4_release_system_zone(struct super_block *sb)
+{
+	struct ext4_system_blocks *system_blks;
+
+	system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
+					lockdep_is_held(&sb->s_umount));
+	rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
+
+	if (system_blks)
+		call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
+}
+
+int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+			  unsigned int count)
+{
+	struct ext4_system_blocks *system_blks;
+	int ret;
+
+	/*
+	 * Lock the system zone to prevent it being released concurrently
+	 * when doing a remount which inverse current "[no]block_validity"
+	 * mount option.
+	 */
+	rcu_read_lock();
+	system_blks = rcu_dereference(sbi->system_blks);
+	ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
+					count);
+	rcu_read_unlock();
+	return ret;
+}
+
 int ext4_check_blockref(const char *function, unsigned int line,
 			struct inode *inode, __le32 *p, unsigned int max)
 {
@@ -227,6 +363,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
 	__le32 *bref = p;
 	unsigned int blk;
 
+	if (ext4_has_feature_journal(inode->i_sb) &&
+	    (inode->i_ino ==
+	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+		return 0;
+
 	while (bref < p+max) {
 		blk = le32_to_cpu(*bref++);
 		if (blk &&
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0e07137..73789fb 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -194,6 +194,14 @@ struct ext4_map_blocks {
 };
 
 /*
+ * Block validity checking, system zone rbtree.
+ */
+struct ext4_system_blocks {
+	struct rb_root root;
+	struct rcu_head rcu;
+};
+
+/*
  * Flags for ext4_io_end->flags
  */
 #define	EXT4_IO_END_UNWRITTEN	0x0001
@@ -1411,7 +1419,7 @@ struct ext4_sb_info {
 	int s_jquota_fmt;			/* Format of quota to use */
 #endif
 	unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
-	struct rb_root system_blks;
+	struct ext4_system_blocks __rcu *system_blks;
 
 #ifdef EXTENTS_STATS
 	/* ext4 extents stats */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 9b0ea2b..cf6bd0d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -518,10 +518,14 @@ __read_extent_tree_block(const char *function, unsigned int line,
 	}
 	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
 		return bh;
-	err = __ext4_ext_check(function, line, inode,
-			       ext_block_hdr(bh), depth, pblk);
-	if (err)
-		goto errout;
+	if (!ext4_has_feature_journal(inode->i_sb) ||
+	    (inode->i_ino !=
+	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
+		err = __ext4_ext_check(function, line, inode,
+				       ext_block_hdr(bh), depth, pblk);
+		if (err)
+			goto errout;
+	}
 	set_buffer_verified(bh);
 	/*
 	 * If this is a leaf block, cache all of its entries
@@ -3744,8 +3748,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
 	 * illegal.
 	 */
 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
-#ifdef EXT4_DEBUG
-		ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+#ifdef CONFIG_EXT4_DEBUG
+		ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
 			     " len %u; IO logical block %llu, len %u",
 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
 			     (unsigned long long)map->m_lblk, map->m_len);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9b95ec7..0ed91c3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -400,6 +400,10 @@ static int __check_block_validity(struct inode *inode, const char *func,
 				unsigned int line,
 				struct ext4_map_blocks *map)
 {
+	if (ext4_has_feature_journal(inode->i_sb) &&
+	    (inode->i_ino ==
+	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+		return 0;
 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 				   map->m_len)) {
 		ext4_error_inode(inode, func, line, map->m_pblk,
@@ -4327,6 +4331,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
 	trace_ext4_punch_hole(inode, offset, length, 0);
 
+	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+	if (ext4_has_inline_data(inode)) {
+		down_write(&EXT4_I(inode)->i_mmap_sem);
+		ret = ext4_convert_inline_data(inode);
+		up_write(&EXT4_I(inode)->i_mmap_sem);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * Write out all dirty pages to avoid race conditions
 	 * Then release them.
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 9c07b71..548a747 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -905,6 +905,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 	unsigned int cp_blks = 1 + __cp_payload(sbi);
 	block_t cp_blk_no;
 	int i;
+	int err;
 
 	sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
 				 GFP_KERNEL);
@@ -932,6 +933,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 	} else if (cp2) {
 		cur_page = cp2;
 	} else {
+		err = -EFSCORRUPTED;
 		goto fail_no_cp;
 	}
 
@@ -944,8 +946,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 		sbi->cur_cp_pack = 2;
 
 	/* Sanity checking of checkpoint */
-	if (f2fs_sanity_check_ckpt(sbi))
+	if (f2fs_sanity_check_ckpt(sbi)) {
+		err = -EFSCORRUPTED;
 		goto free_fail_no_cp;
+	}
 
 	if (cp_blks <= 1)
 		goto done;
@@ -959,8 +963,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 		unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
 		cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
-		if (IS_ERR(cur_page))
+		if (IS_ERR(cur_page)) {
+			err = PTR_ERR(cur_page);
 			goto free_fail_no_cp;
+		}
 		sit_bitmap_ptr = page_address(cur_page);
 		memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
 		f2fs_put_page(cur_page, 1);
@@ -975,7 +981,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 	f2fs_put_page(cp2, 1);
 fail_no_cp:
 	kvfree(sbi->ckpt);
-	return -EINVAL;
+	return err;
 }
 
 static void __add_dirty_inode(struct inode *inode, enum inode_type type)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e8ae480..f3653e1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -492,7 +492,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 			fio->is_por ? META_POR : (__is_meta_io(fio) ?
 			META_GENERIC : DATA_GENERIC_ENHANCE)))
-		return -EFAULT;
+		return -EFSCORRUPTED;
 
 	trace_f2fs_submit_page_bio(page, fio);
 	f2fs_trace_ios(fio, 0);
@@ -1165,7 +1165,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 
 	if (__is_valid_data_blkaddr(blkaddr) &&
 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
-		err = -EFAULT;
+		err = -EFSCORRUPTED;
 		goto sync_out;
 	}
 
@@ -1931,7 +1931,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 
 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
 						DATA_GENERIC_ENHANCE))
-			return -EFAULT;
+			return -EFSCORRUPTED;
 
 		ipu_force = true;
 		fio->need_lock = LOCK_DONE;
@@ -1958,7 +1958,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
 						DATA_GENERIC_ENHANCE)) {
-		err = -EFAULT;
+		err = -EFSCORRUPTED;
 		goto out_writepage;
 	}
 	/*
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b28baf4..20ac8ea 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3725,3 +3725,7 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
 }
 
 #endif /* _LINUX_F2FS_H */
+
+#define EFSBADCRC	EBADMSG		/* Bad CRC detected */
+#define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
+
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3b7988cb..9b561f1 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1745,6 +1745,8 @@ static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
 static int f2fs_ioc_start_atomic_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int ret;
 
 	if (!inode_owner_or_capable(inode))
@@ -1785,6 +1787,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 		goto out;
 	}
 
+	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+	if (list_empty(&fi->inmem_ilist))
+		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
+	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+
+	/* add inode in inmem_list first and set atomic_file */
 	set_inode_flag(inode, FI_ATOMIC_FILE);
 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1826,11 +1834,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
 			goto err_out;
 
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
-		if (!ret) {
-			clear_inode_flag(inode, FI_ATOMIC_FILE);
-			F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
-			stat_dec_atomic_write(inode);
-		}
+		if (!ret)
+			f2fs_drop_inmem_pages(inode);
 	} else {
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
 	}
@@ -2173,9 +2178,9 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
 		return -EROFS;
 
 	end = range.start + range.len;
-	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
+	if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
+					end >= MAX_BLKADDR(sbi))
 		return -EINVAL;
-	}
 
 	ret = mnt_want_write_file(filp);
 	if (ret)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 963fb45..f953085 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -676,7 +676,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
 	}
 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
 						DATA_GENERIC_ENHANCE))) {
-		err = -EFAULT;
+		err = -EFSCORRUPTED;
 		goto put_page;
 	}
 got_it:
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index d67622c..b65c056 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -162,7 +162,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
 			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
 			"run fsck to fix.",
 			__func__, dn->inode->i_ino, dn->data_blkaddr);
-		return -EINVAL;
+		return -EFSCORRUPTED;
 	}
 
 	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
@@ -405,7 +405,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
 			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
 			"run fsck to fix.",
 			__func__, dir->i_ino, dn.data_blkaddr);
-		err = -EINVAL;
+		err = -EFSCORRUPTED;
 		goto out;
 	}
 
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index ccb0222..e269959 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -74,7 +74,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
 	if (!__is_valid_data_blkaddr(addr))
 		return 1;
 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
-		return -EFAULT;
+		return -EFSCORRUPTED;
 	return 0;
 }
 
@@ -374,7 +374,7 @@ static int do_read_inode(struct inode *inode)
 
 	if (!sanity_check_inode(inode, node_page)) {
 		f2fs_put_page(node_page, 1);
-		return -EINVAL;
+		return -EFSCORRUPTED;
 	}
 
 	/* check data exist */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 56fe5b97..8410408 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -37,7 +37,7 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 		f2fs_msg(sbi->sb, KERN_WARNING,
 				"%s: out-of-range nid=%x, run fsck to fix.",
 				__func__, nid);
-		return -EINVAL;
+		return -EFSCORRUPTED;
 	}
 	return 0;
 }
@@ -1289,7 +1289,7 @@ static int read_node_page(struct page *page, int op_flags)
 	if (PageUptodate(page)) {
 		if (!f2fs_inode_chksum_verify(sbi, page)) {
 			ClearPageUptodate(page);
-			return -EBADMSG;
+			return -EFSBADCRC;
 		}
 		return LOCKED_PAGE;
 	}
@@ -1375,7 +1375,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
 	}
 
 	if (!f2fs_inode_chksum_verify(sbi, page)) {
-		err = -EBADMSG;
+		err = -EFSBADCRC;
 		goto out_err;
 	}
 page_hit:
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e04f82b..8579a0b 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -557,7 +557,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 			"Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
 			inode->i_ino, ofs_of_node(dn.node_page),
 			ofs_of_node(page));
-		err = -EFAULT;
+		err = -EFSCORRUPTED;
 		goto err;
 	}
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index ac824f6..3231ced 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -185,8 +185,6 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
 
 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
 {
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct inmem_pages *new;
 
 	f2fs_trace_pid(page);
@@ -200,15 +198,11 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
 	INIT_LIST_HEAD(&new->list);
 
 	/* increase reference count with clean state */
-	mutex_lock(&fi->inmem_lock);
 	get_page(page);
-	list_add_tail(&new->list, &fi->inmem_pages);
-	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-	if (list_empty(&fi->inmem_ilist))
-		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
-	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+	mutex_lock(&F2FS_I(inode)->inmem_lock);
+	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
-	mutex_unlock(&fi->inmem_lock);
+	mutex_unlock(&F2FS_I(inode)->inmem_lock);
 
 	trace_f2fs_register_inmem_page(page, INMEM);
 }
@@ -330,19 +324,17 @@ void f2fs_drop_inmem_pages(struct inode *inode)
 		mutex_lock(&fi->inmem_lock);
 		__revoke_inmem_pages(inode, &fi->inmem_pages,
 						true, false, true);
-
-		if (list_empty(&fi->inmem_pages)) {
-			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-			if (!list_empty(&fi->inmem_ilist))
-				list_del_init(&fi->inmem_ilist);
-			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
-		}
 		mutex_unlock(&fi->inmem_lock);
 	}
 
 	clear_inode_flag(inode, FI_ATOMIC_FILE);
 	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
 	stat_dec_atomic_write(inode);
+
+	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+	if (!list_empty(&fi->inmem_ilist))
+		list_del_init(&fi->inmem_ilist);
+	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
 }
 
 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
@@ -471,11 +463,6 @@ int f2fs_commit_inmem_pages(struct inode *inode)
 
 	mutex_lock(&fi->inmem_lock);
 	err = __f2fs_commit_inmem_pages(inode);
-
-	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-	if (!list_empty(&fi->inmem_ilist))
-		list_del_init(&fi->inmem_ilist);
-	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
 	mutex_unlock(&fi->inmem_lock);
 
 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
@@ -2774,7 +2761,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"Found FS corruption, run fsck to fix.");
-		return -EIO;
+		return -EFSCORRUPTED;
 	}
 
 	/* start/end segment number in main_area */
@@ -3197,7 +3184,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 
 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		return -EFAULT;
+		return -EFSCORRUPTED;
 	}
 
 	stat_inc_inplace_blocks(fio->sbi);
@@ -3393,11 +3380,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
 		seg_i = CURSEG_I(sbi, i);
 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
-		if (blk_off > ENTRIES_IN_SUM) {
-			f2fs_bug_on(sbi, 1);
-			f2fs_put_page(page, 1);
-			return -EFAULT;
-		}
 		seg_i->next_segno = segno;
 		reset_curseg(sbi, i, 0);
 		seg_i->alloc_type = ckpt->alloc_type[i];
@@ -4105,7 +4087,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
 					"Wrong journal entry on segno %u",
 					start);
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
-			err = -EINVAL;
+			err = -EFSCORRUPTED;
 			break;
 		}
 
@@ -4146,7 +4128,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
 			"SIT is corrupted node# %u vs %u",
 			total_node_blocks, valid_node_count(sbi));
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		err = -EINVAL;
+		err = -EFSCORRUPTED;
 	}
 
 	return err;
@@ -4237,6 +4219,41 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
 	return init_victim_secmap(sbi);
 }
 
+static int sanity_check_curseg(struct f2fs_sb_info *sbi)
+{
+	int i;
+
+	/*
+	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
+	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
+	 */
+	for (i = 0; i < NO_CHECK_TYPE; i++) {
+		struct curseg_info *curseg = CURSEG_I(sbi, i);
+		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
+		unsigned int blkofs = curseg->next_blkoff;
+
+		if (f2fs_test_bit(blkofs, se->cur_valid_map))
+			goto out;
+
+		if (curseg->alloc_type == SSR)
+			continue;
+
+		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
+			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
+				continue;
+out:
+			f2fs_msg(sbi->sb, KERN_ERR,
+				"Current segment's next free block offset is "
+				"inconsistent with bitmap, logtype:%u, "
+				"segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
+				i, curseg->segno, curseg->alloc_type,
+				curseg->next_blkoff, blkofs);
+			return -EFSCORRUPTED;
+		}
+	}
+	return 0;
+}
+
 /*
  * Update min, max modified time for cost-benefit GC algorithm
  */
@@ -4332,6 +4349,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
 	if (err)
 		return err;
 
+	err = sanity_check_curseg(sbi);
+	if (err)
+		return err;
+
 	init_min_max_mtime(sbi);
 	return 0;
 }
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 429007b..4bd151f 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -697,7 +697,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
 				"Mismatch valid blocks %d vs. %d",
 					GET_SIT_VBLOCKS(raw_sit), valid_blocks);
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		return -EINVAL;
+		return -EFSCORRUPTED;
 	}
 
 	/* check segment usage, and check boundary of a given segment number */
@@ -707,7 +707,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
 				"Wrong valid blocks %d or segno %u",
 					GET_SIT_VBLOCKS(raw_sit), segno);
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		return -EINVAL;
+		return -EFSCORRUPTED;
 	}
 	return 0;
 }
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ac24cff6..32dc7a5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2478,11 +2478,11 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		}
 	}
 
-	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
 		f2fs_msg(sb, KERN_INFO,
 			"Magic Mismatch, valid(0x%x) - read(0x%x)",
 			F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-		return 1;
+		return -EINVAL;
 	}
 
 	/* Currently, support only 4KB page cache size */
@@ -2490,7 +2490,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid page_cache_size (%lu), supports only 4KB",
 			PAGE_SIZE);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* Currently, support only 4KB block size */
@@ -2499,7 +2499,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid blocksize (%u), supports only 4KB",
 			blocksize);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check log blocks per segment */
@@ -2507,7 +2507,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid log blocks per segment (%u)",
 			le32_to_cpu(raw_super->log_blocks_per_seg));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2517,7 +2517,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 				F2FS_MIN_LOG_SECTOR_SIZE) {
 		f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
 			le32_to_cpu(raw_super->log_sectorsize));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 	if (le32_to_cpu(raw_super->log_sectors_per_block) +
 		le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2526,7 +2526,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			"Invalid log sectors per block(%u) log sectorsize(%u)",
 			le32_to_cpu(raw_super->log_sectors_per_block),
 			le32_to_cpu(raw_super->log_sectorsize));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2542,7 +2542,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid segment count (%u)",
 			segment_count);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (total_sections > segment_count ||
@@ -2551,28 +2551,28 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid segment/section count (%u, %u x %u)",
 			segment_count, total_sections, segs_per_sec);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if ((segment_count / segs_per_sec) < total_sections) {
 		f2fs_msg(sb, KERN_INFO,
 			"Small segment_count (%u < %u * %u)",
 			segment_count, segs_per_sec, total_sections);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
 		f2fs_msg(sb, KERN_INFO,
 			"Wrong segment_count / block_count (%u > %llu)",
 			segment_count, le64_to_cpu(raw_super->block_count));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (secs_per_zone > total_sections || !secs_per_zone) {
 		f2fs_msg(sb, KERN_INFO,
 			"Wrong secs_per_zone / total_sections (%u, %u)",
 			secs_per_zone, total_sections);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
 			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2583,7 +2583,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			le32_to_cpu(raw_super->extension_count),
 			raw_super->hot_ext_count,
 			F2FS_MAX_EXTENSION);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	if (le32_to_cpu(raw_super->cp_payload) >
@@ -2592,7 +2592,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			"Insane cp_payload (%u > %u)",
 			le32_to_cpu(raw_super->cp_payload),
 			blocks_per_seg - F2FS_CP_PACKS);
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check reserved ino info */
@@ -2604,12 +2604,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 			le32_to_cpu(raw_super->node_ino),
 			le32_to_cpu(raw_super->meta_ino),
 			le32_to_cpu(raw_super->root_ino));
-		return 1;
+		return -EFSCORRUPTED;
 	}
 
 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
 	if (sanity_check_area_boundary(sbi, bh))
-		return 1;
+		return -EFSCORRUPTED;
 
 	return 0;
 }
@@ -2714,11 +2714,11 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 		}
 	}
 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
-		for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
+		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
 			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
 				le32_to_cpu(ckpt->cur_data_segno[j])) {
 				f2fs_msg(sbi->sb, KERN_ERR,
-					"Data segment (%u) and Data segment (%u)"
+					"Node segment (%u) and Data segment (%u)"
 					" has the same segno: %u", i, j,
 					le32_to_cpu(ckpt->cur_node_segno[i]));
 				return 1;
@@ -2925,11 +2925,11 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
 		}
 
 		/* sanity checking of raw super */
-		if (sanity_check_raw_super(sbi, bh)) {
+		err = sanity_check_raw_super(sbi, bh);
+		if (err) {
 			f2fs_msg(sb, KERN_ERR,
 				"Can't find valid F2FS filesystem in %dth superblock",
 				block + 1);
-			err = -EINVAL;
 			brelse(bh);
 			continue;
 		}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e791741..9632420 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -346,7 +346,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 
 	*xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
 	if (!*xe) {
-		err = -EFAULT;
+		err = -EFSCORRUPTED;
 		goto out;
 	}
 check:
@@ -622,7 +622,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
 	/* find entry with wanted name. */
 	here = __find_xattr(base_addr, last_base_addr, index, len, name);
 	if (!here) {
-		error = -EFAULT;
+		error = -EFSCORRUPTED;
 		goto exit;
 	}
 
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 7f5f369..de60c05 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
 			err = -ENOMEM;
 			goto error;
 		}
+		/* Avoid race with userspace read via bdev */
+		lock_buffer(bhs[n]);
 		memset(bhs[n]->b_data, 0, sb->s_blocksize);
 		set_buffer_uptodate(bhs[n]);
+		unlock_buffer(bhs[n]);
 		mark_buffer_dirty_inode(bhs[n], dir);
 
 		n++;
@@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
 	fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
 
 	de = (struct msdos_dir_entry *)bhs[0]->b_data;
+	/* Avoid race with userspace read via bdev */
+	lock_buffer(bhs[0]);
 	/* filling the new directory slots ("." and ".." entries) */
 	memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
 	memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
 	de[0].size = de[1].size = 0;
 	memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
 	set_buffer_uptodate(bhs[0]);
+	unlock_buffer(bhs[0]);
 	mark_buffer_dirty_inode(bhs[0], dir);
 
 	err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
 
 			/* fill the directory entry */
 			copy = min(size, sb->s_blocksize);
+			/* Avoid race with userspace read via bdev */
+			lock_buffer(bhs[n]);
 			memcpy(bhs[n]->b_data, slots, copy);
+			set_buffer_uptodate(bhs[n]);
+			unlock_buffer(bhs[n]);
+			mark_buffer_dirty_inode(bhs[n], dir);
 			slots += copy;
 			size -= copy;
-			set_buffer_uptodate(bhs[n]);
-			mark_buffer_dirty_inode(bhs[n], dir);
 			if (!size)
 				break;
 			n++;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index f58c0ca..4c6c635 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -390,8 +390,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
 				err = -ENOMEM;
 				goto error;
 			}
+			/* Avoid race with userspace read via bdev */
+			lock_buffer(c_bh);
 			memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
 			set_buffer_uptodate(c_bh);
+			unlock_buffer(c_bh);
 			mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
 			if (sb->s_flags & SB_SYNCHRONOUS)
 				err = sync_dirty_buffer(c_bh);
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 987c95b..97c2c52 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -45,7 +45,7 @@ void set_fs_pwd(struct fs_struct *fs, const struct path *path)
 	if (old_pwd.dentry)
 		path_put(&old_pwd);
 }
-EXPORT_SYMBOL(set_fs_pwd);
+EXPORT_SYMBOL_GPL(set_fs_pwd);
 
 static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
 {
@@ -91,7 +91,7 @@ void free_fs_struct(struct fs_struct *fs)
 	path_put(&fs->pwd);
 	kmem_cache_free(fs_cachep, fs);
 }
-EXPORT_SYMBOL(free_fs_struct);
+EXPORT_SYMBOL_GPL(free_fs_struct);
 
 void exit_fs(struct task_struct *tsk)
 {
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8f68181..f057c21 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -518,6 +518,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
 	rc = cuse_send_init(cc);
 	if (rc) {
 		fuse_dev_free(fud);
+		fuse_conn_put(&cc->fc);
 		return rc;
 	}
 	file->private_data = fud;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c14211a..24a6bc8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -332,7 +332,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
 	req->in.h.len = sizeof(struct fuse_in_header) +
 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
 	list_add_tail(&req->list, &fiq->pending);
-	wake_up_locked(&fiq->waitq);
+	wake_up(&fiq->waitq);
 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
@@ -344,16 +344,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
 	forget->forget_one.nodeid = nodeid;
 	forget->forget_one.nlookup = nlookup;
 
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	if (fiq->connected) {
 		fiq->forget_list_tail->next = forget;
 		fiq->forget_list_tail = forget;
-		wake_up_locked(&fiq->waitq);
+		wake_up(&fiq->waitq);
 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 	} else {
 		kfree(forget);
 	}
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 }
 
 static void flush_bg_queue(struct fuse_conn *fc)
@@ -366,10 +366,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
 		list_del(&req->list);
 		fc->active_background++;
-		spin_lock(&fiq->waitq.lock);
+		spin_lock(&fiq->lock);
 		req->in.h.unique = fuse_get_unique(fiq);
 		queue_request(fiq, req);
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 	}
 }
 
@@ -388,9 +388,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 	if (test_and_set_bit(FR_FINISHED, &req->flags))
 		goto put_request;
 
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	list_del_init(&req->intr_entry);
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 	WARN_ON(test_bit(FR_PENDING, &req->flags));
 	WARN_ON(test_bit(FR_SENT, &req->flags));
 	if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -428,16 +428,16 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 
 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	if (test_bit(FR_FINISHED, &req->flags)) {
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 		return;
 	}
 	if (list_empty(&req->intr_entry)) {
 		list_add_tail(&req->intr_entry, &fiq->interrupts);
-		wake_up_locked(&fiq->waitq);
+		wake_up(&fiq->waitq);
 	}
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
@@ -467,16 +467,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 		if (!err)
 			return;
 
-		spin_lock(&fiq->waitq.lock);
+		spin_lock(&fiq->lock);
 		/* Request is not yet in userspace, bail out */
 		if (test_bit(FR_PENDING, &req->flags)) {
 			list_del(&req->list);
-			spin_unlock(&fiq->waitq.lock);
+			spin_unlock(&fiq->lock);
 			__fuse_put_request(req);
 			req->out.h.error = -EINTR;
 			return;
 		}
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 	}
 
 	/*
@@ -491,9 +491,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 	struct fuse_iqueue *fiq = &fc->iq;
 
 	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	if (!fiq->connected) {
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 		req->out.h.error = -ENOTCONN;
 	} else {
 		req->in.h.unique = fuse_get_unique(fiq);
@@ -501,7 +501,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 		/* acquire extra reference, since request is still needed
 		   after request_end() */
 		__fuse_get_request(req);
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 
 		request_wait_answer(fc, req);
 		/* Pairs with smp_wmb() in request_end() */
@@ -634,12 +634,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
 
 	__clear_bit(FR_ISREPLY, &req->flags);
 	req->in.h.unique = unique;
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	if (fiq->connected) {
 		queue_request(fiq, req);
 		err = 0;
 	}
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 
 	return err;
 }
@@ -1083,12 +1083,12 @@ static int request_pending(struct fuse_iqueue *fiq)
  * Unlike other requests this is assembled on demand, without a need
  * to allocate a separate fuse_req structure.
  *
- * Called with fiq->waitq.lock held, releases it
+ * Called with fiq->lock held, releases it
  */
 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
 			       struct fuse_copy_state *cs,
 			       size_t nbytes, struct fuse_req *req)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
 	struct fuse_in_header ih;
 	struct fuse_interrupt_in arg;
@@ -1104,7 +1104,7 @@ __releases(fiq->waitq.lock)
 	ih.unique = req->intr_unique;
 	arg.unique = req->in.h.unique;
 
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 	if (nbytes < reqsize)
 		return -EINVAL;
 
@@ -1141,7 +1141,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
 				   struct fuse_copy_state *cs,
 				   size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
 	int err;
 	struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
@@ -1155,7 +1155,7 @@ __releases(fiq->waitq.lock)
 		.len = sizeof(ih) + sizeof(arg),
 	};
 
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 	kfree(forget);
 	if (nbytes < ih.len)
 		return -EINVAL;
@@ -1173,7 +1173,7 @@ __releases(fiq->waitq.lock)
 
 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
 				   struct fuse_copy_state *cs, size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
 	int err;
 	unsigned max_forgets;
@@ -1187,13 +1187,13 @@ __releases(fiq->waitq.lock)
 	};
 
 	if (nbytes < ih.len) {
-		spin_unlock(&fiq->waitq.lock);
+		spin_unlock(&fiq->lock);
 		return -EINVAL;
 	}
 
 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
 	head = dequeue_forget(fiq, max_forgets, &count);
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 
 	arg.count = count;
 	ih.len += count * sizeof(struct fuse_forget_one);
@@ -1223,7 +1223,7 @@ __releases(fiq->waitq.lock)
 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
 			    struct fuse_copy_state *cs,
 			    size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
 	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
 		return fuse_read_single_forget(fiq, cs, nbytes);
@@ -1252,16 +1252,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
 	unsigned reqsize;
 
  restart:
-	spin_lock(&fiq->waitq.lock);
-	err = -EAGAIN;
-	if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
-	    !request_pending(fiq))
-		goto err_unlock;
+	for (;;) {
+		spin_lock(&fiq->lock);
+		if (!fiq->connected || request_pending(fiq))
+			break;
+		spin_unlock(&fiq->lock);
 
-	err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		err = wait_event_interruptible_exclusive(fiq->waitq,
 				!fiq->connected || request_pending(fiq));
-	if (err)
-		goto err_unlock;
+		if (err)
+			return err;
+	}
 
 	if (!fiq->connected) {
 		err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
@@ -1285,7 +1288,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
 	req = list_entry(fiq->pending.next, struct fuse_req, list);
 	clear_bit(FR_PENDING, &req->flags);
 	list_del_init(&req->list);
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 
 	in = &req->in;
 	reqsize = in->h.len;
@@ -1342,7 +1345,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
 	return err;
 
  err_unlock:
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 	return err;
 }
 
@@ -2061,12 +2064,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
 	fiq = &fud->fc->iq;
 	poll_wait(file, &fiq->waitq, wait);
 
-	spin_lock(&fiq->waitq.lock);
+	spin_lock(&fiq->lock);
 	if (!fiq->connected)
 		mask = EPOLLERR;
 	else if (request_pending(fiq))
 		mask |= EPOLLIN | EPOLLRDNORM;
-	spin_unlock(&fiq->waitq.lock);
+	spin_unlock(&fiq->lock);
 
 	return mask;
 }
@@ -2157,15 +2160,15 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
 		fc->max_background = UINT_MAX;
 		flush_bg_queue(fc);
 
-		spin_lock(&fiq->waitq.lock);
+		spin_lock(&fiq->lock);
 		fiq->connected = 0;
 		list_for_each_entry(req, &fiq->pending, list)
 			clear_bit(FR_PENDING, &req->flags);
 		list_splice_tail_init(&fiq->pending, &to_end);
 		while (forget_pending(fiq))
 			kfree(dequeue_forget(fiq, 1, NULL));
-		wake_up_all_locked(&fiq->waitq);
-		spin_unlock(&fiq->waitq.lock);
+		wake_up_all(&fiq->waitq);
+		spin_unlock(&fiq->lock);
 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 		end_polls(fc);
 		wake_up_all(&fc->blocked_waitq);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4cfcb20..04a0cfc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1700,6 +1700,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
 		WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
 
 		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
 		return 0;
 	}
 
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 9cb45a5..5440c81 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -391,6 +391,9 @@ struct fuse_iqueue {
 	/** Connection established */
 	unsigned connected;
 
+	/** Lock protecting accesses to members of this structure */
+	spinlock_t lock;
+
 	/** Readers of the connection are waiting on this */
 	wait_queue_head_t waitq;
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index db9e60b..cb01831 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -585,6 +585,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
 static void fuse_iqueue_init(struct fuse_iqueue *fiq)
 {
 	memset(fiq, 0, sizeof(struct fuse_iqueue));
+	spin_lock_init(&fiq->lock);
 	init_waitqueue_head(&fiq->waitq);
 	INIT_LIST_HEAD(&fiq->pending);
 	INIT_LIST_HEAD(&fiq->interrupts);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index d14d71d..52fecced 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1630,6 +1630,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
 			brelse(dibh);
 			up_write(&ip->i_rw_mutex);
 			gfs2_trans_end(sdp);
+			buf_in_tr = false;
 		}
 		gfs2_glock_dq_uninit(rd_gh);
 		cond_resched();
diff --git a/fs/libfs.c b/fs/libfs.c
index 0fb590d..bd2d193d 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -86,58 +86,47 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 EXPORT_SYMBOL(dcache_dir_close);
 
 /* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
-				    struct list_head *from,
-				    int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and passed to caller via *<res>.
+ * If no such element exists, the anchor of list is returned
+ * and *<res> is set to NULL.
+ */
+static struct list_head *scan_positives(struct dentry *cursor,
+					struct list_head *p,
+					loff_t count,
+					struct dentry **res)
 {
-	unsigned *seq = &parent->d_inode->i_dir_seq, n;
-	struct dentry *res;
-	struct list_head *p;
-	bool skipped;
-	int i;
+	struct dentry *dentry = cursor->d_parent, *found = NULL;
 
-retry:
-	i = count;
-	skipped = false;
-	n = smp_load_acquire(seq) & ~1;
-	res = NULL;
-	rcu_read_lock();
-	for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+	spin_lock(&dentry->d_lock);
+	while ((p = p->next) != &dentry->d_subdirs) {
 		struct dentry *d = list_entry(p, struct dentry, d_child);
-		if (!simple_positive(d)) {
-			skipped = true;
-		} else if (!--i) {
-			res = d;
-			break;
+		// we must at least skip cursors, to avoid livelocks
+		if (d->d_flags & DCACHE_DENTRY_CURSOR)
+			continue;
+		if (simple_positive(d) && !--count) {
+			spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+			if (simple_positive(d))
+				found = dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			if (likely(found))
+				break;
+			count = 1;
+		}
+		if (need_resched()) {
+			list_move(&cursor->d_child, p);
+			p = &cursor->d_child;
+			spin_unlock(&dentry->d_lock);
+			cond_resched();
+			spin_lock(&dentry->d_lock);
 		}
 	}
-	rcu_read_unlock();
-	if (skipped) {
-		smp_rmb();
-		if (unlikely(*seq != n))
-			goto retry;
-	}
-	return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
-	struct dentry *parent = cursor->d_parent;
-	unsigned n, *seq = &parent->d_inode->i_dir_seq;
-	spin_lock(&parent->d_lock);
-	for (;;) {
-		n = *seq;
-		if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-			break;
-		cpu_relax();
-	}
-	__list_del(cursor->d_child.prev, cursor->d_child.next);
-	if (after)
-		list_add(&cursor->d_child, after);
-	else
-		list_add_tail(&cursor->d_child, &parent->d_subdirs);
-	smp_store_release(seq, n + 2);
-	spin_unlock(&parent->d_lock);
+	spin_unlock(&dentry->d_lock);
+	dput(*res);
+	*res = found;
+	return p;
 }
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -153,17 +142,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 			return -EINVAL;
 	}
 	if (offset != file->f_pos) {
-		file->f_pos = offset;
-		if (file->f_pos >= 2) {
-			struct dentry *cursor = file->private_data;
-			struct dentry *to;
-			loff_t n = file->f_pos - 2;
+		struct dentry *cursor = file->private_data;
+		struct dentry *to = NULL;
+		struct list_head *p;
 
-			inode_lock_shared(dentry->d_inode);
-			to = next_positive(dentry, &dentry->d_subdirs, n);
-			move_cursor(cursor, to ? &to->d_child : NULL);
-			inode_unlock_shared(dentry->d_inode);
+		file->f_pos = offset;
+		inode_lock_shared(dentry->d_inode);
+
+		if (file->f_pos > 2) {
+			p = scan_positives(cursor, &dentry->d_subdirs,
+					   file->f_pos - 2, &to);
+			spin_lock(&dentry->d_lock);
+			list_move(&cursor->d_child, p);
+			spin_unlock(&dentry->d_lock);
+		} else {
+			spin_lock(&dentry->d_lock);
+			list_del_init(&cursor->d_child);
+			spin_unlock(&dentry->d_lock);
 		}
+
+		dput(to);
+
+		inode_unlock_shared(dentry->d_inode);
 	}
 	return offset;
 }
@@ -185,25 +185,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
 	struct dentry *dentry = file->f_path.dentry;
 	struct dentry *cursor = file->private_data;
-	struct list_head *p = &cursor->d_child;
-	struct dentry *next;
-	bool moved = false;
+	struct list_head *anchor = &dentry->d_subdirs;
+	struct dentry *next = NULL;
+	struct list_head *p;
 
 	if (!dir_emit_dots(file, ctx))
 		return 0;
 
 	if (ctx->pos == 2)
-		p = &dentry->d_subdirs;
-	while ((next = next_positive(dentry, p, 1)) != NULL) {
+		p = anchor;
+	else
+		p = &cursor->d_child;
+
+	while ((p = scan_positives(cursor, p, 1, &next)) != anchor) {
 		if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
 			      d_inode(next)->i_ino, dt_type(d_inode(next))))
 			break;
-		moved = true;
-		p = &next->d_child;
 		ctx->pos++;
 	}
-	if (moved)
-		move_cursor(cursor, p);
+	spin_lock(&dentry->d_lock);
+	list_move_tail(&cursor->d_child, p);
+	spin_unlock(&dentry->d_lock);
+	dput(next);
+
 	return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 75fe92e..1624618 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -153,7 +153,7 @@ static int nfs_delegation_claim_opens(struct inode *inode,
 		/* Block nfs4_proc_unlck */
 		mutex_lock(&sp->so_delegreturn_mutex);
 		seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
-		err = nfs4_open_delegation_recall(ctx, state, stateid, type);
+		err = nfs4_open_delegation_recall(ctx, state, stateid);
 		if (!err)
 			err = nfs_delegation_claim_locks(ctx, state, stateid);
 		if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index bb1ef8c..c954778 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -61,7 +61,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
 
 /* NFSv4 delegation-related procedures */
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 3baeed0..afdf55a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
 	if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
 		nfs_file_set_open_context(file, ctx);
 	else
-		err = -ESTALE;
+		err = -EOPENSTALE;
 out:
 	return err;
 }
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 33824a0..29b7033 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -122,32 +122,49 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 }
 
 static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+			    const struct nfs_pgio_header *hdr,
+			    ssize_t dreq_len)
 {
-	int i;
-	ssize_t count;
+	struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
 
-	WARN_ON_ONCE(dreq->count >= dreq->max_count);
+	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+		return;
+	if (dreq->max_count >= dreq_len) {
+		dreq->max_count = dreq_len;
+		if (dreq->count > dreq_len)
+			dreq->count = dreq_len;
 
-	if (dreq->mirror_count == 1) {
-		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-		dreq->count += hdr->good_bytes;
-	} else {
-		/* mirrored writes */
-		count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-		if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-			count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-			dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-		}
-		/* update the dreq->count by finding the minimum agreed count from all
-		 * mirrors */
-		count = dreq->mirrors[0].count;
-
-		for (i = 1; i < dreq->mirror_count; i++)
-			count = min(count, dreq->mirrors[i].count);
-
-		dreq->count = count;
+		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+			dreq->error = hdr->error;
+		else /* Clear outstanding error if this is EOF */
+			dreq->error = 0;
 	}
+	if (mirror->count > dreq_len)
+		mirror->count = dreq_len;
+}
+
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+		       const struct nfs_pgio_header *hdr)
+{
+	struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
+	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+	ssize_t dreq_len = 0;
+
+	if (hdr_end > dreq->io_start)
+		dreq_len = hdr_end - dreq->io_start;
+
+	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
+
+	if (dreq_len > dreq->max_count)
+		dreq_len = dreq->max_count;
+
+	if (mirror->count < dreq_len)
+		mirror->count = dreq_len;
+	if (dreq->count < dreq_len)
+		dreq->count = dreq_len;
 }
 
 /*
@@ -400,15 +417,13 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 	unsigned long bytes = 0;
 	struct nfs_direct_req *dreq = hdr->dreq;
 
-	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-		goto out_put;
-
 	spin_lock(&dreq->lock);
-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
-		dreq->error = hdr->error;
-	else
-		nfs_direct_good_bytes(dreq, hdr);
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		spin_unlock(&dreq->lock);
+		goto out_put;
+	}
 
+	nfs_direct_count_bytes(dreq, hdr);
 	spin_unlock(&dreq->lock);
 
 	while (!list_empty(&hdr->pages)) {
@@ -428,7 +443,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 	hdr->release(hdr);
 }
 
-static void nfs_read_sync_pgio_error(struct list_head *head)
+static void nfs_read_sync_pgio_error(struct list_head *head, int error)
 {
 	struct nfs_page *req;
 
@@ -645,6 +660,9 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 	dreq->count = 0;
+	dreq->max_count = 0;
+	list_for_each_entry(req, &reqs, wb_list)
+		dreq->max_count += req->wb_bytes;
 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 	for (i = 0; i < dreq->mirror_count; i++)
@@ -664,8 +682,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
 		if (!nfs_pageio_add_request(&desc, req)) {
-			nfs_list_remove_request(req);
-			nfs_list_add_request(req, &failed);
+			nfs_list_move_request(req, &failed);
 			spin_lock(&cinfo.inode->i_lock);
 			dreq->flags = 0;
 			if (desc.pg_error < 0)
@@ -775,17 +792,16 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 	bool request_commit = false;
 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
-	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-		goto out_put;
-
 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
 	spin_lock(&dreq->lock);
+	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		spin_unlock(&dreq->lock);
+		goto out_put;
+	}
 
-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-		dreq->error = hdr->error;
-	if (dreq->error == 0) {
-		nfs_direct_good_bytes(dreq, hdr);
+	nfs_direct_count_bytes(dreq, hdr);
+	if (hdr->good_bytes != 0) {
 		if (nfs_write_need_commit(hdr)) {
 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
 				request_commit = true;
@@ -821,7 +837,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 	hdr->release(hdr);
 }
 
-static void nfs_write_sync_pgio_error(struct list_head *head)
+static void nfs_write_sync_pgio_error(struct list_head *head, int error)
 {
 	struct nfs_page *req;
 
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 4dc8878..a7bc4e0 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
 	struct rb_node **p, *parent;
 	int diff;
 
+	nfss->fscache_key = NULL;
+	nfss->fscache = NULL;
+	if (!(nfss->options & NFS_OPTION_FSCACHE))
+		return;
 	if (!uniq) {
 		uniq = "";
 		ulen = 1;
@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
 void nfs_fscache_init_inode(struct inode *inode)
 {
 	struct nfs_fscache_inode_auxdata auxdata;
+	struct nfs_server *nfss = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 
 	nfsi->fscache = NULL;
-	if (!S_ISREG(inode->i_mode))
+	if (!(nfss->fscache && S_ISREG(inode->i_mode)))
 		return;
 
 	memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 161ba2e..6363ea9 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
  */
 static inline const char *nfs_server_fscache_state(struct nfs_server *server)
 {
-	if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+	if (server->fscache)
 		return "yes";
 	return "no ";
 }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 63287d9..5b61520 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
 
 extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8f53455..86991bc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -754,9 +754,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
 
 static void nfs4_destroy_server(struct nfs_server *server)
 {
+	LIST_HEAD(freeme);
+
 	nfs_server_return_all_delegations(server);
 	unset_pnfs_layoutdriver(server);
-	nfs4_purge_state_owners(server);
+	nfs4_purge_state_owners(server, &freeme);
+	nfs4_free_state_owners(&freeme);
 }
 
 /*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 61abbb0..75d3cf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		switch (err) {
-		case -EPERM:
-		case -EACCES:
-		case -EDQUOT:
-		case -ENOSPC:
-		case -EROFS:
-			goto out_put_ctx;
 		default:
+			goto out_put_ctx;
+		case -ENOENT:
+		case -ESTALE:
+		case -EISDIR:
+		case -ENOTDIR:
+		case -ELOOP:
 			goto out_drop;
 		}
 	}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 31ae3bd..621e3cf 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2113,12 +2113,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
 		case -NFS4ERR_BAD_HIGH_SLOT:
 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 		case -NFS4ERR_DEADSESSION:
-			set_bit(NFS_DELEGATED_STATE, &state->flags);
 			nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
 			return -EAGAIN;
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_STALE_STATEID:
-			set_bit(NFS_DELEGATED_STATE, &state->flags);
 			/* Don't recall a delegation if it was lost */
 			nfs4_schedule_lease_recovery(server->nfs_client);
 			return -EAGAIN;
@@ -2139,7 +2137,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
 			return -EAGAIN;
 		case -NFS4ERR_DELAY:
 		case -NFS4ERR_GRACE:
-			set_bit(NFS_DELEGATED_STATE, &state->flags);
 			ssleep(1);
 			return -EAGAIN;
 		case -ENOMEM:
@@ -2155,8 +2152,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
 }
 
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
-		struct nfs4_state *state, const nfs4_stateid *stateid,
-		fmode_t type)
+		struct nfs4_state *state, const nfs4_stateid *stateid)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
 	struct nfs4_opendata *opendata;
@@ -2167,20 +2163,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
 	if (IS_ERR(opendata))
 		return PTR_ERR(opendata);
 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-	nfs_state_clear_delegation(state);
-	switch (type & (FMODE_READ|FMODE_WRITE)) {
-	case FMODE_READ|FMODE_WRITE:
-	case FMODE_WRITE:
+	if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
 		err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
 		if (err)
-			break;
+			goto out;
+	}
+	if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
 		err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
 		if (err)
-			break;
-		/* Fall through */
-	case FMODE_READ:
-		err = nfs4_open_recover_helper(opendata, FMODE_READ);
+			goto out;
 	}
+	if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
+		err = nfs4_open_recover_helper(opendata, FMODE_READ);
+		if (err)
+			goto out;
+	}
+	nfs_state_clear_delegation(state);
+out:
 	nfs4_opendata_put(opendata);
 	return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
 }
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3ba2087..c36ef75 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 /**
  * nfs4_purge_state_owners - Release all cached state owners
  * @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
  *
  * Called at umount time.  Remaining state owners will be on
  * the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
  */
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *tmp;
-	LIST_HEAD(doomed);
 
 	spin_lock(&clp->cl_lock);
 	list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
-		list_move(&sp->so_lru, &doomed);
+		list_move(&sp->so_lru, head);
 		nfs4_remove_state_owner_locked(sp);
 	}
 	spin_unlock(&clp->cl_lock);
+}
 
-	list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+	struct nfs4_state_owner *sp, *tmp;
+
+	list_for_each_entry_safe(sp, tmp, head, so_lru) {
 		list_del(&sp->so_lru);
 		nfs4_free_state_owner(sp);
 	}
@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
 	struct nfs4_state_owner *sp;
 	struct nfs_server *server;
 	struct rb_node *pos;
+	LIST_HEAD(freeme);
 	int status = 0;
 
 restart:
 	rcu_read_lock();
 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
-		nfs4_purge_state_owners(server);
+		nfs4_purge_state_owners(server, &freeme);
 		spin_lock(&clp->cl_lock);
 		for (pos = rb_first(&server->state_owners);
 		     pos != NULL;
@@ -1877,6 +1893,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
 		spin_unlock(&clp->cl_lock);
 	}
 	rcu_read_unlock();
+	nfs4_free_state_owners(&freeme);
 	return 0;
 }
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b7bde12..1c0227c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1171,7 +1171,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 		} else
 			*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
 	}
-	if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+	if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
 		*p++ = cpu_to_be32(label->lfs);
 		*p++ = cpu_to_be32(label->pi);
 		*p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 0ec6bce..9cf59e2 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -567,7 +567,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
 	}
 
 	hdr->res.fattr   = &hdr->fattr;
-	hdr->res.count   = count;
+	hdr->res.count   = 0;
 	hdr->res.eof     = 0;
 	hdr->res.verf    = &hdr->verf;
 	nfs_fattr_init(&hdr->fattr);
@@ -769,8 +769,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 	pageused = 0;
 	while (!list_empty(head)) {
 		req = nfs_list_entry(head->next);
-		nfs_list_remove_request(req);
-		nfs_list_add_request(req, &hdr->pages);
+		nfs_list_move_request(req, &hdr->pages);
 
 		if (!last_page || last_page != req->wb_page) {
 			pageused++;
@@ -962,8 +961,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
 	}
 	if (!nfs_can_coalesce_requests(prev, req, desc))
 		return 0;
-	nfs_list_remove_request(req);
-	nfs_list_add_request(req, &mirror->pg_list);
+	nfs_list_move_request(req, &mirror->pg_list);
 	mirror->pg_count += req->wb_bytes;
 	return 1;
 }
@@ -995,9 +993,8 @@ nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
 {
 	LIST_HEAD(head);
 
-	nfs_list_remove_request(req);
-	nfs_list_add_request(req, &head);
-	desc->pg_completion_ops->error_cleanup(&head);
+	nfs_list_move_request(req, &head);
+	desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
 }
 
 /**
@@ -1133,7 +1130,8 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
 
 	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
 		mirror = &desc->pg_mirrors[midx];
-		desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+		desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
+				desc->pg_error);
 	}
 }
 
@@ -1235,21 +1233,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
 		      struct nfs_pgio_header *hdr)
 {
-	LIST_HEAD(failed);
+	LIST_HEAD(pages);
 
 	desc->pg_io_completion = hdr->io_completion;
 	desc->pg_dreq = hdr->dreq;
-	while (!list_empty(&hdr->pages)) {
-		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+	list_splice_init(&hdr->pages, &pages);
+	while (!list_empty(&pages)) {
+		struct nfs_page *req = nfs_list_entry(pages.next);
 
-		nfs_list_remove_request(req);
 		if (!nfs_pageio_add_request(desc, req))
-			nfs_list_add_request(req, &failed);
+			break;
 	}
 	nfs_pageio_complete(desc);
-	if (!list_empty(&failed)) {
-		list_move(&failed, &hdr->pages);
-		return desc->pg_error < 0 ? desc->pg_error : -EIO;
+	if (!list_empty(&pages)) {
+		int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+		hdr->completion_ops->error_cleanup(&pages, err);
+		nfs_set_pgio_error(hdr, err, hdr->io_start);
+		return err;
 	}
 	return 0;
 }
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 4931c3a..c818f98 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1426,10 +1426,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
 	const nfs4_stateid *res_stateid = NULL;
 	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
-	if (ret == 0) {
-		arg_stateid = &args->stateid;
+	switch (ret) {
+	case -NFS4ERR_NOMATCHING_LAYOUT:
+		break;
+	case 0:
 		if (res->lrs_present)
 			res_stateid = &res->stateid;
+		/* Fallthrough */
+	default:
+		arg_stateid = &args->stateid;
 	}
 	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
 			res_stateid);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index e0c257b..0e0335e 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 		/* Emulate the eof flag, which isn't normally needed in NFSv2
 		 * as it is guaranteed to always return the file attributes
 		 */
-		if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+		if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+		    hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
 			hdr->res.eof = 1;
 	}
 	return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-	if (task->tk_status >= 0)
+	if (task->tk_status >= 0) {
+		hdr->res.count = hdr->args.count;
 		nfs_writeback_update_inode(hdr);
+	}
 	return 0;
 }
 
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 42dbf4f..afd447a 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -205,7 +205,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
 }
 
 static void
-nfs_async_read_error(struct list_head *head)
+nfs_async_read_error(struct list_head *head, int error)
 {
 	struct nfs_page	*req;
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6df9b85..d90efde 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
 	    data->acdirmin != nfss->acdirmin / HZ ||
 	    data->acdirmax != nfss->acdirmax / HZ ||
 	    data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+	    (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
 	    data->nfs_server.port != nfss->port ||
 	    data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
 	    !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 51d0b79..5ab9979 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1394,20 +1394,27 @@ static void nfs_redirty_request(struct nfs_page *req)
 	nfs_release_request(req);
 }
 
-static void nfs_async_write_error(struct list_head *head)
+static void nfs_async_write_error(struct list_head *head, int error)
 {
 	struct nfs_page	*req;
 
 	while (!list_empty(head)) {
 		req = nfs_list_entry(head->next);
 		nfs_list_remove_request(req);
+		if (nfs_error_is_fatal(error)) {
+			nfs_context_set_write_error(req->wb_context, error);
+			if (nfs_error_is_fatal_on_server(error)) {
+				nfs_write_error_remove_page(req);
+				continue;
+			}
+		}
 		nfs_redirty_request(req);
 	}
 }
 
 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
 {
-	nfs_async_write_error(&hdr->pages);
+	nfs_async_write_error(&hdr->pages, 0);
 	filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
 			hdr->args.offset + hdr->args.count - 1);
 }
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 63d701c..c8e9b70 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
 	enum dlm_status status;
 	int actions = 0;
 	int in_use;
-        u8 owner;
+	u8 owner;
+	int recovery_wait = 0;
 
 	mlog(0, "master_node = %d, valblk = %d\n", master_node,
 	     flags & LKM_VALBLK);
@@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
 		}
 		if (flags & LKM_CANCEL)
 			lock->cancel_pending = 0;
-		else
-			lock->unlock_pending = 0;
-
+		else {
+			if (!lock->unlock_pending)
+				recovery_wait = 1;
+			else
+				lock->unlock_pending = 0;
+		}
 	}
 
 	/* get an extra ref on lock.  if we are just switching
@@ -244,6 +248,17 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
 	spin_unlock(&res->spinlock);
 	wake_up(&res->wq);
 
+	if (recovery_wait) {
+		spin_lock(&res->spinlock);
+		/* Unlock request will directly succeed after owner dies,
+		 * and the lock is already removed from grant list. We have to
+		 * wait for RECOVERING done or we miss the chance to purge it
+		 * since the removement is much faster than RECOVERING proc.
+		 */
+		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+		spin_unlock(&res->spinlock);
+	}
+
 	/* let the caller's final dlm_lock_put handle the actual kfree */
 	if (actions & DLM_UNLOCK_FREE_LOCK) {
 		/* this should always be coupled with list removal */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bd34756..c492cbb 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
 	/* At this point, we know that no more recovery threads can be
 	 * launched, so wait for any recovery completion work to
 	 * complete. */
-	flush_workqueue(osb->ocfs2_wq);
+	if (osb->ocfs2_wq)
+		flush_workqueue(osb->ocfs2_wq);
 
 	/*
 	 * Now that recovery is shut down, and the osb is about to be
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 3020823..a46aff7 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
 	struct ocfs2_dinode *alloc = NULL;
 
 	cancel_delayed_work(&osb->la_enable_wq);
-	flush_workqueue(osb->ocfs2_wq);
+	if (osb->ocfs2_wq)
+		flush_workqueue(osb->ocfs2_wq);
 
 	if (osb->local_alloc_state == OCFS2_LA_UNUSED)
 		goto out;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3a24ce3..c146e12 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 	u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
 	int low_bucket = 0, bucket, high_bucket;
 	struct ocfs2_xattr_bucket *search;
-	u32 last_hash;
 	u64 blkno, lower_blkno = 0;
 
 	search = ocfs2_xattr_bucket_new(inode);
@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 		if (xh->xh_count)
 			xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-		last_hash = le32_to_cpu(xe->xe_name_hash);
-
 		/* record lower_blkno which may be the insert place. */
 		lower_blkno = blkno;
 
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index d0a8920..57cf4b6 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -540,7 +540,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
 			      struct ovl_cattr *attr, bool origin)
 {
 	int err;
-	const struct cred *old_cred;
+	const struct cred *old_cred, *hold_cred = NULL;
 	struct cred *override_cred;
 	struct dentry *parent = dentry->d_parent;
 
@@ -575,7 +575,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
 				goto out_revert_creds;
 			}
 		}
-		put_cred(override_creds(override_cred));
+		hold_cred = override_creds(override_cred);
 		put_cred(override_cred);
 
 		if (!ovl_dentry_is_whiteout(dentry))
@@ -584,7 +584,9 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
 			err = ovl_create_over_whiteout(dentry, inode, attr);
 	}
 out_revert_creds:
-	ovl_revert_creds(old_cred);
+	ovl_revert_creds(old_cred ?: hold_cred);
+	if (old_cred && hold_cred)
+		put_cred(hold_cred);
 	return err;
 }
 
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 54e5d17..6fe3038 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -230,9 +230,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
 	/* Encode an upper or lower file handle */
 	fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
 				ovl_dentry_upper(dentry), !enc_lower);
-	err = PTR_ERR(fh);
 	if (IS_ERR(fh))
-		goto fail;
+		return PTR_ERR(fh);
 
 	err = -EOVERFLOW;
 	if (fh->len > buflen)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 08881de..be6463e 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -401,7 +401,8 @@ static bool ovl_can_list(const char *s)
 		return true;
 
 	/* Never list trusted.overlay, list other trusted for superuser only */
-	return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+	return !ovl_is_private_xattr(s) &&
+	       ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index c5a78db..dc06228 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -70,6 +70,7 @@ struct ovl_fs {
 	bool workdir_locked;
 	/* Traps in ovl inode cache */
 	struct inode *upperdir_trap;
+	struct inode *workbasedir_trap;
 	struct inode *workdir_trap;
 	struct inode *indexdir_trap;
 	/* Inode numbers in all layers do not use the high xino_bits */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 5fcdab4..e4f7298 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -222,6 +222,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
 {
 	unsigned i;
 
+	iput(ofs->workbasedir_trap);
 	iput(ofs->indexdir_trap);
 	iput(ofs->workdir_trap);
 	iput(ofs->upperdir_trap);
@@ -1046,6 +1047,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
 	return 0;
 }
 
+/*
+ * Determine how we treat concurrent use of upperdir/workdir based on the
+ * index feature. This is papering over mount leaks of container runtimes,
+ * for example, an old overlay mount is leaked and now its upperdir is
+ * attempted to be used as a lower layer in a new overlay mount.
+ */
+static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
+{
+	if (ofs->config.index) {
+		pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+		       name);
+		return -EBUSY;
+	} else {
+		pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+			name);
+		return 0;
+	}
+}
+
 static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
 			 struct path *upperpath)
 {
@@ -1083,14 +1103,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
 	upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
 	ofs->upper_mnt = upper_mnt;
 
-	err = -EBUSY;
 	if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
 		ofs->upperdir_locked = true;
-	} else if (ofs->config.index) {
-		pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
-		goto out;
 	} else {
-		pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+		err = ovl_report_in_use(ofs, "upperdir");
+		if (err)
+			goto out;
 	}
 
 	err = 0;
@@ -1200,16 +1218,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
 
 	ofs->workbasedir = dget(workpath.dentry);
 
-	err = -EBUSY;
 	if (ovl_inuse_trylock(ofs->workbasedir)) {
 		ofs->workdir_locked = true;
-	} else if (ofs->config.index) {
-		pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
-		goto out;
 	} else {
-		pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+		err = ovl_report_in_use(ofs, "workdir");
+		if (err)
+			goto out;
 	}
 
+	err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
+			     "workdir");
+	if (err)
+		goto out;
+
 	err = ovl_make_workdir(sb, ofs, &workpath);
 
 out:
@@ -1328,16 +1349,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
 		if (err < 0)
 			goto out;
 
-		err = -EBUSY;
-		if (ovl_is_inuse(stack[i].dentry)) {
-			pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
-			goto out;
-		}
-
 		err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
 		if (err)
 			goto out;
 
+		if (ovl_is_inuse(stack[i].dentry)) {
+			err = ovl_report_in_use(ofs, "lowerdir");
+			if (err)
+				goto out;
+		}
+
 		mnt = clone_private_mount(&stack[i]);
 		err = PTR_ERR(mnt);
 		if (IS_ERR(mnt)) {
@@ -1484,8 +1505,8 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
  * - another layer of this overlayfs instance
  * - upper/work dir of any overlayfs instance
  */
-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
-			   const char *name)
+static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+			   struct dentry *dentry, const char *name)
 {
 	struct dentry *next = dentry, *parent;
 	int err = 0;
@@ -1497,13 +1518,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
 
 	/* Walk back ancestors to root (inclusive) looking for traps */
 	while (!err && parent != next) {
-		if (ovl_is_inuse(parent)) {
-			err = -EBUSY;
-			pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
-			       name);
-		} else if (ovl_lookup_trap_inode(sb, parent)) {
+		if (ovl_lookup_trap_inode(sb, parent)) {
 			err = -ELOOP;
 			pr_err("overlayfs: overlapping %s path\n", name);
+		} else if (ovl_is_inuse(parent)) {
+			err = ovl_report_in_use(ofs, name);
 		}
 		next = parent;
 		parent = dget_parent(next);
@@ -1524,7 +1543,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
 	int i, err;
 
 	if (ofs->upper_mnt) {
-		err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
+		err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
+				      "upperdir");
 		if (err)
 			return err;
 
@@ -1535,13 +1555,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
 		 * workbasedir.  In that case, we already have their traps in
 		 * inode cache and we will catch that case on lookup.
 		 */
-		err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
+		err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
 		if (err)
 			return err;
 	}
 
 	for (i = 0; i < ofs->numlower; i++) {
-		err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
+		err = ovl_check_layer(sb, ofs,
+				      ofs->lower_layers[i].mnt->mnt_root,
 				      "lowerdir");
 		if (err)
 			return err;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ad4370e..d538123 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -552,7 +552,7 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
 	unsigned long totalpages = totalram_pages + total_swap_pages;
 	unsigned long points = 0;
 
-	points = oom_badness(task, NULL, NULL, totalpages) *
+	points = oom_badness(task, NULL, NULL, totalpages, false) *
 					1000 / totalpages;
 	seq_printf(m, "%lu\n", points);
 
@@ -3070,7 +3070,7 @@ static ssize_t proc_sched_task_boost_write(struct file *file,
 	err = kstrtoint(strstrip(buffer), 0, &sched_boost);
 	if (err)
 		goto out;
-	if (sched_boost < 0 || sched_boost > 2) {
+	if (sched_boost < TASK_BOOST_NONE || sched_boost >= TASK_BOOST_END) {
 		err = -EINVAL;
 		goto out;
 	}
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 792c78a..64293df 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
+
 		if (!ppage || PageSlab(ppage))
 			pcount = 0;
 		else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
 
 		if (put_user(stable_page_flags(ppage), out)) {
 			ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
 		return -EINVAL;
 
 	while (count > 0) {
-		if (pfn_valid(pfn))
-			ppage = pfn_to_page(pfn);
-		else
-			ppage = NULL;
+		/*
+		 * TODO: ZONE_DEVICE support requires to identify
+		 * memmaps that were actually initialized.
+		 */
+		ppage = pfn_to_online_page(pfn);
 
 		if (ppage)
 			ino = page_cgroup_ino(ppage);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 8cf2218..6f90d91 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -330,10 +330,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
 		goto fail;
 	inode->i_mode = S_IFREG | 0444;
 	inode->i_fop = &pstore_file_operations;
-	private = kzalloc(sizeof(*private), GFP_KERNEL);
-	if (!private)
-		goto fail_alloc;
-	private->record = record;
 
 	switch (record->type) {
 	case PSTORE_TYPE_DMESG:
@@ -383,12 +379,16 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
 		break;
 	}
 
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
+	if (!private)
+		goto fail_inode;
+
 	dentry = d_alloc_name(root, name);
 	if (!dentry)
 		goto fail_private;
 
+	private->record = record;
 	inode->i_size = private->total_size = size;
-
 	inode->i_private = private;
 
 	if (record->time.tv_sec)
@@ -404,7 +404,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
 
 fail_private:
 	free_pstore_private(private);
-fail_alloc:
+fail_inode:
 	iput(inode);
 
 fail:
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 316c164..015d74e 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -162,6 +162,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
 	if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
 		   (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
 		   &header_length) == 3) {
+		time->tv_nsec *= 1000;
 		if (data_type == 'C')
 			*compressed = true;
 		else
@@ -169,6 +170,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
 	} else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
 			  (time64_t *)&time->tv_sec, &time->tv_nsec,
 			  &header_length) == 2) {
+		time->tv_nsec *= 1000;
 		*compressed = false;
 	} else {
 		time->tv_sec = 0;
diff --git a/fs/read_write.c b/fs/read_write.c
index 40b9b07..2f86e55 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -460,7 +460,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 	return ret;
 }
 
-EXPORT_SYMBOL(vfs_read);
+EXPORT_SYMBOL_GPL(vfs_read);
 
 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
@@ -560,7 +560,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 	return ret;
 }
 
-EXPORT_SYMBOL(vfs_write);
+EXPORT_SYMBOL_GPL(vfs_write);
 
 static inline loff_t file_pos_read(struct file *file)
 {
@@ -1892,10 +1892,7 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(vfs_clone_file_range);
 
-/*
- * Read a page's worth of file data into the page cache.  Return the page
- * locked.
- */
+/* Read a page's worth of file data into the page cache. */
 static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
 {
 	struct address_space *mapping;
@@ -1911,11 +1908,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
 		put_page(page);
 		return ERR_PTR(-EIO);
 	}
-	lock_page(page);
 	return page;
 }
 
 /*
+ * Lock two pages, ensuring that we lock in offset order if the pages are from
+ * the same file.
+ */
+static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+{
+	/* Always lock in order of increasing index. */
+	if (page1->index > page2->index)
+		swap(page1, page2);
+
+	lock_page(page1);
+	if (page1 != page2)
+		lock_page(page2);
+}
+
+/* Unlock two pages, being careful not to unlock the same page twice. */
+static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+{
+	unlock_page(page1);
+	if (page1 != page2)
+		unlock_page(page2);
+}
+
+/*
  * Compare extents of two files to see if they are the same.
  * Caller must have locked both inodes to prevent write races.
  */
@@ -1952,10 +1971,24 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
 		dest_page = vfs_dedupe_get_page(dest, destoff);
 		if (IS_ERR(dest_page)) {
 			error = PTR_ERR(dest_page);
-			unlock_page(src_page);
 			put_page(src_page);
 			goto out_error;
 		}
+
+		vfs_lock_two_pages(src_page, dest_page);
+
+		/*
+		 * Now that we've locked both pages, make sure they're still
+		 * mapped to the file data we're interested in.  If not,
+		 * someone is invalidating pages on us and we lose.
+		 */
+		if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
+		    src_page->mapping != src->i_mapping ||
+		    dest_page->mapping != dest->i_mapping) {
+			same = false;
+			goto unlock;
+		}
+
 		src_addr = kmap_atomic(src_page);
 		dest_addr = kmap_atomic(dest_page);
 
@@ -1967,8 +2000,8 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
 
 		kunmap_atomic(dest_addr);
 		kunmap_atomic(src_addr);
-		unlock_page(dest_page);
-		unlock_page(src_page);
+unlock:
+		vfs_unlock_two_pages(src_page, dest_page);
 		put_page(dest_page);
 		put_page(src_page);
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1dea7a8..05e58b5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		if (seq_has_overflowed(m))
 			goto Eoverflow;
+		p = m->op->next(m, p, &m->index);
 		if (pos + m->count > offset) {
 			m->from = offset - pos;
 			m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		pos += m->count;
 		m->count = 0;
-		p = m->op->next(m, p, &m->index);
 		if (pos == offset)
 			break;
 	}
diff --git a/fs/statfs.c b/fs/statfs.c
index f021662..56f655f 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
 	struct compat_statfs64 buf;
-	if (sizeof(ubuf->f_bsize) == 4) {
-		if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-		     kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-			return -EOVERFLOW;
-		/* f_files and f_ffree may be -1; it's okay
-		 * to stuff that into 32 bits */
-		if (kbuf->f_files != 0xffffffffffffffffULL
-		 && (kbuf->f_files & 0xffffffff00000000ULL))
-			return -EOVERFLOW;
-		if (kbuf->f_ffree != 0xffffffffffffffffULL
-		 && (kbuf->f_ffree & 0xffffffff00000000ULL))
-			return -EOVERFLOW;
-	}
+
+	if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+		return -EOVERFLOW;
+
 	memset(&buf, 0, sizeof(struct compat_statfs64));
 	buf.f_type = kbuf->f_type;
 	buf.f_bsize = kbuf->f_bsize;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index bf416e5..f15ac37 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1165,8 +1165,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
  *   o exact match, i.e. the found zero-level znode contains key @key, then %1
  *     is returned and slot number of the matched branch is stored in @n;
  *   o not exact match, which means that zero-level znode does not contain
- *     @key, then %0 is returned and slot number of the closest branch is stored
- *     in @n;
+ *     @key, then %0 is returned and slot number of the closest branch or %-1
+ *     is stored in @n; In this case calling tnc_next() is mandatory.
  *   o @key is so small that it is even less than the lowest key of the
  *     leftmost zero-level node, then %0 is returned and %0 is stored in @n.
  *
@@ -1883,13 +1883,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
 
 static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
 			    struct ubifs_dent_node *dent, uint32_t cookie,
-			    struct ubifs_znode **zn, int *n)
+			    struct ubifs_znode **zn, int *n, int exact)
 {
 	int err;
 	struct ubifs_znode *znode = *zn;
 	struct ubifs_zbranch *zbr;
 	union ubifs_key *dkey;
 
+	if (!exact) {
+		err = tnc_next(c, &znode, n);
+		if (err)
+			return err;
+	}
+
 	for (;;) {
 		zbr = &znode->zbranch[*n];
 		dkey = &zbr->key;
@@ -1931,7 +1937,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
 	if (unlikely(err < 0))
 		goto out_unlock;
 
-	err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+	err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
 
 out_unlock:
 	mutex_unlock(&c->tnc_mutex);
@@ -2718,7 +2724,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
 		if (unlikely(err < 0))
 			goto out_free;
 
-		err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+		err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
 		if (err)
 			goto out_free;
 	}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 7468c96..5a519bc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -884,6 +884,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 	/* len == 0 means wake all */
 	struct userfaultfd_wake_range range = { .len = 0, };
 	unsigned long new_flags;
+	bool still_valid;
 
 	WRITE_ONCE(ctx->released, true);
 
@@ -899,8 +900,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 	 * taking the mmap_sem for writing.
 	 */
 	down_write(&mm->mmap_sem);
-	if (!mmget_still_valid(mm))
-		goto skip_mm;
+	still_valid = mmget_still_valid(mm);
 	prev = NULL;
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		cond_resched();
@@ -911,22 +911,23 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 			continue;
 		}
 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
-		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
-				 new_flags, vma->anon_vma,
-				 vma->vm_file, vma->vm_pgoff,
-				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX,
-				 vma_get_anon_name(vma));
-		if (prev)
-			vma = prev;
-		else
-			prev = vma;
+		if (still_valid) {
+			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+					 new_flags, vma->anon_vma,
+					 vma->vm_file, vma->vm_pgoff,
+					 vma_policy(vma),
+					 NULL_VM_UFFD_CTX,
+					 vma_get_anon_name(vma));
+			if (prev)
+				vma = prev;
+			else
+				prev = vma;
+		}
 		vm_write_begin(vma);
 		WRITE_ONCE(vma->vm_flags, new_flags);
 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 		vm_write_end(vma);
 	}
-skip_mm:
 	up_write(&mm->mmap_sem);
 	mmput(mm);
 wakeup:
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index c6299f8..6410d3e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -191,6 +191,121 @@ xfs_attr_calc_size(
 	return nblks;
 }
 
+STATIC int
+xfs_attr_try_sf_addname(
+	struct xfs_inode	*dp,
+	struct xfs_da_args	*args)
+{
+
+	struct xfs_mount	*mp = dp->i_mount;
+	int			error, error2;
+
+	error = xfs_attr_shortform_addname(args);
+	if (error == -ENOSPC)
+		return error;
+
+	/*
+	 * Commit the shortform mods, and we're done.
+	 * NOTE: this is also the error path (EEXIST, etc).
+	 */
+	if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+		xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+	if (mp->m_flags & XFS_MOUNT_WSYNC)
+		xfs_trans_set_sync(args->trans);
+
+	error2 = xfs_trans_commit(args->trans);
+	args->trans = NULL;
+	return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+	struct xfs_da_args	*args)
+{
+	struct xfs_inode	*dp = args->dp;
+	struct xfs_buf          *leaf_bp = NULL;
+	int			error;
+
+	/*
+	 * If the attribute list is non-existent or a shortform list,
+	 * upgrade it to a single-leaf-block attribute list.
+	 */
+	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+	     dp->i_d.di_anextents == 0)) {
+
+		/*
+		 * Build initial attribute list (if required).
+		 */
+		if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+			xfs_attr_shortform_create(args);
+
+		/*
+		 * Try to add the attr to the attribute list in the inode.
+		 */
+		error = xfs_attr_try_sf_addname(dp, args);
+		if (error != -ENOSPC)
+			return error;
+
+		/*
+		 * It won't fit in the shortform, transform to a leaf block.
+		 * GROT: another possible req'mt for a double-split btree op.
+		 */
+		error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
+		if (error)
+			return error;
+
+		/*
+		 * Prevent the leaf buffer from being unlocked so that a
+		 * concurrent AIL push cannot grab the half-baked leaf
+		 * buffer and run into problems with the write verifier.
+		 * Once we're done rolling the transaction we can release
+		 * the hold and add the attr to the leaf.
+		 */
+		xfs_trans_bhold(args->trans, leaf_bp);
+		error = xfs_defer_finish(&args->trans);
+		xfs_trans_bhold_release(args->trans, leaf_bp);
+		if (error) {
+			xfs_trans_brelse(args->trans, leaf_bp);
+			return error;
+		}
+	}
+
+	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+		error = xfs_attr_leaf_addname(args);
+	else
+		error = xfs_attr_node_addname(args);
+	return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+	struct xfs_da_args      *args)
+{
+	struct xfs_inode	*dp = args->dp;
+	int			error;
+
+	if (!xfs_inode_hasattr(dp)) {
+		error = -ENOATTR;
+	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+		error = xfs_attr_shortform_remove(args);
+	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+		error = xfs_attr_leaf_removename(args);
+	} else {
+		error = xfs_attr_node_removename(args);
+	}
+
+	return error;
+}
+
 int
 xfs_attr_set(
 	struct xfs_inode	*dp,
@@ -200,11 +315,10 @@ xfs_attr_set(
 	int			flags)
 {
 	struct xfs_mount	*mp = dp->i_mount;
-	struct xfs_buf		*leaf_bp = NULL;
 	struct xfs_da_args	args;
 	struct xfs_trans_res	tres;
 	int			rsvd = (flags & ATTR_ROOT) != 0;
-	int			error, err2, local;
+	int			error, local;
 
 	XFS_STATS_INC(mp, xs_attr_set);
 
@@ -255,93 +369,17 @@ xfs_attr_set(
 	error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
 				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
 				       XFS_QMOPT_RES_REGBLKS);
-	if (error) {
-		xfs_iunlock(dp, XFS_ILOCK_EXCL);
-		xfs_trans_cancel(args.trans);
-		return error;
-	}
+	if (error)
+		goto out_trans_cancel;
 
 	xfs_trans_ijoin(args.trans, dp, 0);
-
-	/*
-	 * If the attribute list is non-existent or a shortform list,
-	 * upgrade it to a single-leaf-block attribute list.
-	 */
-	if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
-	    (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
-	     dp->i_d.di_anextents == 0)) {
-
-		/*
-		 * Build initial attribute list (if required).
-		 */
-		if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
-			xfs_attr_shortform_create(&args);
-
-		/*
-		 * Try to add the attr to the attribute list in
-		 * the inode.
-		 */
-		error = xfs_attr_shortform_addname(&args);
-		if (error != -ENOSPC) {
-			/*
-			 * Commit the shortform mods, and we're done.
-			 * NOTE: this is also the error path (EEXIST, etc).
-			 */
-			ASSERT(args.trans != NULL);
-
-			/*
-			 * If this is a synchronous mount, make sure that
-			 * the transaction goes to disk before returning
-			 * to the user.
-			 */
-			if (mp->m_flags & XFS_MOUNT_WSYNC)
-				xfs_trans_set_sync(args.trans);
-
-			if (!error && (flags & ATTR_KERNOTIME) == 0) {
-				xfs_trans_ichgtime(args.trans, dp,
-							XFS_ICHGTIME_CHG);
-			}
-			err2 = xfs_trans_commit(args.trans);
-			xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-			return error ? error : err2;
-		}
-
-		/*
-		 * It won't fit in the shortform, transform to a leaf block.
-		 * GROT: another possible req'mt for a double-split btree op.
-		 */
-		error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
-		if (error)
-			goto out;
-		/*
-		 * Prevent the leaf buffer from being unlocked so that a
-		 * concurrent AIL push cannot grab the half-baked leaf
-		 * buffer and run into problems with the write verifier.
-		 */
-		xfs_trans_bhold(args.trans, leaf_bp);
-		error = xfs_defer_finish(&args.trans);
-		if (error)
-			goto out;
-
-		/*
-		 * Commit the leaf transformation.  We'll need another (linked)
-		 * transaction to add the new attribute to the leaf, which
-		 * means that we have to hold & join the leaf buffer here too.
-		 */
-		error = xfs_trans_roll_inode(&args.trans, dp);
-		if (error)
-			goto out;
-		xfs_trans_bjoin(args.trans, leaf_bp);
-		leaf_bp = NULL;
-	}
-
-	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
-		error = xfs_attr_leaf_addname(&args);
-	else
-		error = xfs_attr_node_addname(&args);
+	error = xfs_attr_set_args(&args);
 	if (error)
-		goto out;
+		goto out_trans_cancel;
+	if (!args.trans) {
+		/* shortform attribute has already been committed */
+		goto out_unlock;
+	}
 
 	/*
 	 * If this is a synchronous mount, make sure that the
@@ -358,17 +396,14 @@ xfs_attr_set(
 	 */
 	xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
 	error = xfs_trans_commit(args.trans);
+out_unlock:
 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
 	return error;
 
-out:
-	if (leaf_bp)
-		xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
 	if (args.trans)
 		xfs_trans_cancel(args.trans);
-	xfs_iunlock(dp, XFS_ILOCK_EXCL);
-	return error;
+	goto out_unlock;
 }
 
 /*
@@ -423,17 +458,7 @@ xfs_attr_remove(
 	 */
 	xfs_trans_ijoin(args.trans, dp, 0);
 
-	if (!xfs_inode_hasattr(dp)) {
-		error = -ENOATTR;
-	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-		ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
-		error = xfs_attr_shortform_remove(&args);
-	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
-		error = xfs_attr_leaf_removename(&args);
-	} else {
-		error = xfs_attr_node_removename(&args);
-	}
-
+	error = xfs_attr_remove_args(&args);
 	if (error)
 		goto out;
 
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
similarity index 97%
rename from fs/xfs/xfs_attr.h
rename to fs/xfs/libxfs/xfs_attr.h
index 033ff8c..cc04ee0 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
 		 unsigned char *value, int *valuelenp, int flags);
 int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
 		 unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args);
 int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
 int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
 		  int flags, struct attrlist_cursor_kern *cursor);
 
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3a496ff..38dc0b4 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
 	return -EFSCORRUPTED;
 }
 
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+	struct xfs_inode	*ip,
+	int			size,
+	int			*version)
+{
+	switch (ip->i_d.di_format) {
+	case XFS_DINODE_FMT_DEV:
+		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+		break;
+	case XFS_DINODE_FMT_LOCAL:
+	case XFS_DINODE_FMT_EXTENTS:
+	case XFS_DINODE_FMT_BTREE:
+		ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+		if (!ip->i_d.di_forkoff)
+			ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+		else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+			*version = 2;
+		break;
+	default:
+		ASSERT(0);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /*
  * Convert inode from non-attributed to attributed.
  * Must not be in a transaction, ip must not be locked.
@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
 
 	xfs_trans_ijoin(tp, ip, 0);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-	switch (ip->i_d.di_format) {
-	case XFS_DINODE_FMT_DEV:
-		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
-		break;
-	case XFS_DINODE_FMT_LOCAL:
-	case XFS_DINODE_FMT_EXTENTS:
-	case XFS_DINODE_FMT_BTREE:
-		ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
-		if (!ip->i_d.di_forkoff)
-			ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
-		else if (mp->m_flags & XFS_MOUNT_ATTR2)
-			version = 2;
-		break;
-	default:
-		ASSERT(0);
-		error = -EINVAL;
+	error = xfs_bmap_set_attrforkoff(ip, size, &version);
+	if (error)
 		goto trans_cancel;
-	}
-
 	ASSERT(ip->i_afp == NULL);
 	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
 	ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -1178,7 +1189,10 @@ xfs_iread_extents(
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
 	level = be16_to_cpu(block->bb_level);
-	ASSERT(level > 0);
+	if (unlikely(level == 0)) {
+		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+		return -EFSCORRUPTED;
+	}
 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 	bno = be64_to_cpu(*pp);
 
@@ -3827,15 +3841,28 @@ xfs_bmapi_read(
 	XFS_STATS_INC(mp, xs_blk_mapr);
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (!ifp) {
+		/* No CoW fork?  Return a hole. */
+		if (whichfork == XFS_COW_FORK) {
+			mval->br_startoff = bno;
+			mval->br_startblock = HOLESTARTBLOCK;
+			mval->br_blockcount = len;
+			mval->br_state = XFS_EXT_NORM;
+			*nmap = 1;
+			return 0;
+		}
 
-	/* No CoW fork?  Return a hole. */
-	if (whichfork == XFS_COW_FORK && !ifp) {
-		mval->br_startoff = bno;
-		mval->br_startblock = HOLESTARTBLOCK;
-		mval->br_blockcount = len;
-		mval->br_state = XFS_EXT_NORM;
-		*nmap = 1;
-		return 0;
+		/*
+		 * A missing attr ifork implies that the inode says we're in
+		 * extents or btree format but failed to pass the inode fork
+		 * verifier while trying to load it.  Treat that as a file
+		 * corruption too.
+		 */
+#ifdef DEBUG
+		xfs_alert(mp, "%s: inode %llu missing fork %d",
+				__func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+		return -EFSCORRUPTED;
 	}
 
 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b6e9b63..488dc88 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -183,6 +183,7 @@ void	xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
 		xfs_filblks_t len);
 void	xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int	xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int	xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
 void	xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void	__xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
 		xfs_filblks_t len, struct xfs_owner_info *oinfo,
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index e792b16..c52beee 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -266,13 +266,15 @@ xfs_defer_trans_roll(
 
 	trace_xfs_defer_trans_roll(tp, _RET_IP_);
 
-	/* Roll the transaction. */
+	/*
+	 * Roll the transaction.  Rolling always given a new transaction (even
+	 * if committing the old one fails!) to hand back to the caller, so we
+	 * join the held resources to the new transaction so that we always
+	 * return with the held resources joined to @tpp, no matter what
+	 * happened.
+	 */
 	error = xfs_trans_roll(tpp);
 	tp = *tpp;
-	if (error) {
-		trace_xfs_defer_trans_roll_error(tp, error);
-		return error;
-	}
 
 	/* Rejoin the joined inodes. */
 	for (i = 0; i < ipcount; i++)
@@ -284,6 +286,8 @@ xfs_defer_trans_roll(
 		xfs_trans_bhold(tp, bplist[i]);
 	}
 
+	if (error)
+		trace_xfs_defer_trans_roll_error(tp, error);
 	return error;
 }
 
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 87e6dd53..a1af984 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 
 /*
  * Ensure that the given in-core dquot has a buffer on disk backing it, and
- * return the buffer. This is called when the bmapi finds a hole.
+ * return the buffer locked and held. This is called when the bmapi finds a
+ * hole.
  */
 STATIC int
 xfs_dquot_disk_alloc(
@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
 	 * If everything succeeds, the caller of this function is returned a
 	 * buffer that is locked and held to the transaction.  The caller
 	 * is responsible for unlocking any buffer passed back, either
-	 * manually or by committing the transaction.
+	 * manually or by committing the transaction.  On error, the buffer is
+	 * released and not passed back.
 	 */
 	xfs_trans_bhold(tp, bp);
 	error = xfs_defer_finish(tpp);
-	tp = *tpp;
 	if (error) {
-		xfs_buf_relse(bp);
+		xfs_trans_bhold_release(*tpp, bp);
+		xfs_trans_brelse(*tpp, bp);
 		return error;
 	}
 	*bpp = bp;
@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
 	struct xfs_buf		**bpp)
 {
 	struct xfs_trans	*tp;
-	struct xfs_buf		*bp;
 	int			error;
 
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
 	if (error)
 		goto err;
 
-	error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
+	error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
 	if (error)
 		goto err_cancel;
 
@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
 		 * Buffer was held to the transaction, so we have to unlock it
 		 * manually here because we're not passing it back.
 		 */
-		xfs_buf_relse(bp);
+		xfs_buf_relse(*bpp);
+		*bpp = NULL;
 		goto err;
 	}
-	*bpp = bp;
 	return 0;
 
 err_cancel:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 74047bd..e427ad0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
 
 out_cancel:
 	xfs_trans_cancel(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_dqrele:
 	xfs_qm_dqrele(udqp);
 	xfs_qm_dqrele(gdqp);
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index c50ef7e..1d4ef06 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1472,8 +1472,11 @@ struct acpi_pptt_processor {
 
 /* Flags */
 
-#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)	/* Physical package */
-#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (2)	/* ACPI Processor ID valid */
+#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (1<<1)
+#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD  (1<<2)	/* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_LEAF_NODE            (1<<3)	/* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_IDENTICAL            (1<<4)	/* ACPI 6.3 */
 
 /* 1: Cache Type Structure */
 
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7..e9f20b8 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-	int order;
-
-	size--;
-	size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-	order = fls(size);
-#else
-	order = fls64(size);
-#endif
-	return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)						\
-(								\
-	__builtin_constant_p(n) ? (				\
-		((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :	\
-		(((n) < (1UL << PAGE_SHIFT)) ? 0 :		\
-		 ilog2((n) - 1) - PAGE_SHIFT + 1)		\
-	) :							\
-	__get_order(n)						\
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+	if (__builtin_constant_p(size)) {
+		if (!size)
+			return BITS_PER_LONG - PAGE_SHIFT;
+
+		if (size < (1UL << PAGE_SHIFT))
+			return 0;
+
+		return ilog2((size) - 1) - PAGE_SHIFT + 1;
+	}
+
+	size--;
+	size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+	return fls(size);
+#else
+	return fls64(size);
+#endif
+}
 
 #endif	/* __ASSEMBLY__ */
 
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index f9c6e0e..fa117e1 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -174,7 +174,13 @@ struct drm_device {
 	 * races and imprecision over longer time periods, hence exposing a
 	 * hardware vblank counter is always recommended.
 	 *
-	 * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
+	 * This is the statically configured device wide maximum. The driver
+	 * can instead choose to use a runtime configurable per-crtc value
+	 * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
+	 * must be left at zero. See drm_crtc_set_max_vblank_count() on how
+	 * to use the per-crtc value.
+	 *
+	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
 	 */
 	u32 max_vblank_count;           /**< size of vblank counter register */
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index decd918..e0d711e 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1166,6 +1166,7 @@ struct drm_dp_aux {
 	struct device *dev;
 	struct drm_crtc *crtc;
 	struct mutex hw_mutex;
+	struct mutex i2c_mutex;
 	struct work_struct crc_work;
 	u8 crc_count;
 	ssize_t (*transfer)(struct drm_dp_aux *aux,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a960..e9c6763 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -129,6 +129,26 @@ struct drm_vblank_crtc {
 	 */
 	u32 last;
 	/**
+	 * @max_vblank_count:
+	 *
+	 * Maximum value of the vblank registers for this crtc. This value +1
+	 * will result in a wrap-around of the vblank register. It is used
+	 * by the vblank core to handle wrap-arounds.
+	 *
+	 * If set to zero the vblank core will try to guess the elapsed vblanks
+	 * between times when the vblank interrupt is disabled through
+	 * high-precision timestamps. That approach is suffering from small
+	 * races and imprecision over longer time periods, hence exposing a
+	 * hardware vblank counter is always recommended.
+	 *
+	 * This is the runtime configurable per-crtc maximum set through
+	 * drm_crtc_set_max_vblank_count(). If this is used the driver
+	 * must leave the device wide &drm_device.max_vblank_count at zero.
+	 *
+	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
+	 */
+	u32 max_vblank_count;
+	/**
 	 * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
 	 * For legacy driver bit 2 additionally tracks whether an additional
 	 * temporary vblank reference has been acquired to paper over the
@@ -206,4 +226,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode);
 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+				   u32 max_vblank_count);
 #endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b3..fd965ff 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
 	INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+	INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
diff --git a/include/dt-bindings/clock/qcom,camcc-lagoon.h b/include/dt-bindings/clock/qcom,camcc-lagoon.h
new file mode 100644
index 0000000..a1733f5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-lagoon.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_LAGOON_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0			0
+#define CAM_CC_PLL0_OUT_EVEN		1
+#define CAM_CC_PLL1			2
+#define CAM_CC_PLL1_OUT_EVEN		3
+#define CAM_CC_PLL2			4
+#define CAM_CC_PLL2_OUT_MAIN		5
+#define CAM_CC_PLL3			6
+#define CAM_CC_BPS_AHB_CLK		7
+#define CAM_CC_BPS_AREG_CLK		8
+#define CAM_CC_BPS_AXI_CLK		9
+#define CAM_CC_BPS_CLK			10
+#define CAM_CC_BPS_CLK_SRC		11
+#define CAM_CC_CAMNOC_ATB_CLK		12
+#define CAM_CC_CAMNOC_AXI_CLK		13
+#define CAM_CC_CCI_0_CLK		14
+#define CAM_CC_CCI_0_CLK_SRC		15
+#define CAM_CC_CCI_1_CLK		16
+#define CAM_CC_CCI_1_CLK_SRC		17
+#define CAM_CC_CORE_AHB_CLK		18
+#define CAM_CC_CPAS_AHB_CLK		19
+#define CAM_CC_CPHY_RX_CLK_SRC		20
+#define CAM_CC_CSI0PHYTIMER_CLK		21
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC	22
+#define CAM_CC_CSI1PHYTIMER_CLK		23
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC	24
+#define CAM_CC_CSI2PHYTIMER_CLK		25
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC	26
+#define CAM_CC_CSI3PHYTIMER_CLK		27
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC	28
+#define CAM_CC_CSIPHY0_CLK		29
+#define CAM_CC_CSIPHY1_CLK		30
+#define CAM_CC_CSIPHY2_CLK		31
+#define CAM_CC_CSIPHY3_CLK		32
+#define CAM_CC_FAST_AHB_CLK_SRC		33
+#define CAM_CC_ICP_APB_CLK		34
+#define CAM_CC_ICP_ATB_CLK		35
+#define CAM_CC_ICP_CLK			36
+#define CAM_CC_ICP_CLK_SRC		37
+#define CAM_CC_ICP_CTI_CLK		38
+#define CAM_CC_ICP_TS_CLK		39
+#define CAM_CC_IFE_0_AXI_CLK		40
+#define CAM_CC_IFE_0_CLK		41
+#define CAM_CC_IFE_0_CLK_SRC		42
+#define CAM_CC_IFE_0_CPHY_RX_CLK	43
+#define CAM_CC_IFE_0_CSID_CLK		44
+#define CAM_CC_IFE_0_CSID_CLK_SRC	45
+#define CAM_CC_IFE_0_DSP_CLK		46
+#define CAM_CC_IFE_1_AXI_CLK		47
+#define CAM_CC_IFE_1_CLK		48
+#define CAM_CC_IFE_1_CLK_SRC		49
+#define CAM_CC_IFE_1_CPHY_RX_CLK	50
+#define CAM_CC_IFE_1_CSID_CLK		51
+#define CAM_CC_IFE_1_CSID_CLK_SRC	52
+#define CAM_CC_IFE_1_DSP_CLK		53
+#define CAM_CC_IFE_2_AXI_CLK		54
+#define CAM_CC_IFE_2_CLK		55
+#define CAM_CC_IFE_2_CLK_SRC		56
+#define CAM_CC_IFE_2_CPHY_RX_CLK	57
+#define CAM_CC_IFE_2_CSID_CLK		58
+#define CAM_CC_IFE_2_CSID_CLK_SRC	59
+#define CAM_CC_IFE_2_DSP_CLK		60
+#define CAM_CC_IFE_LITE_CLK		61
+#define CAM_CC_IFE_LITE_CLK_SRC		62
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK	63
+#define CAM_CC_IFE_LITE_CSID_CLK	64
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC	65
+#define CAM_CC_IPE_0_AHB_CLK		66
+#define CAM_CC_IPE_0_AREG_CLK		67
+#define CAM_CC_IPE_0_AXI_CLK		68
+#define CAM_CC_IPE_0_CLK		69
+#define CAM_CC_IPE_0_CLK_SRC		70
+#define CAM_CC_JPEG_CLK			71
+#define CAM_CC_JPEG_CLK_SRC		72
+#define CAM_CC_LRME_CLK			73
+#define CAM_CC_LRME_CLK_SRC		74
+#define CAM_CC_MCLK0_CLK		75
+#define CAM_CC_MCLK0_CLK_SRC		76
+#define CAM_CC_MCLK1_CLK		77
+#define CAM_CC_MCLK1_CLK_SRC		78
+#define CAM_CC_MCLK2_CLK		79
+#define CAM_CC_MCLK2_CLK_SRC		80
+#define CAM_CC_MCLK3_CLK		81
+#define CAM_CC_MCLK3_CLK_SRC		82
+#define CAM_CC_MCLK4_CLK		83
+#define CAM_CC_MCLK4_CLK_SRC		84
+#define CAM_CC_SLOW_AHB_CLK_SRC		85
+#define CAM_CC_SOC_AHB_CLK		86
+#define CAM_CC_SYS_TMR_CLK		87
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-lagoon.h b/include/dt-bindings/clock/qcom,dispcc-lagoon.h
new file mode 100644
index 0000000..e833be47
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-lagoon.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_LAGOON_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0				0
+#define DISP_CC_MDSS_AHB_CLK			1
+#define DISP_CC_MDSS_AHB_CLK_SRC		2
+#define DISP_CC_MDSS_BYTE0_CLK			3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC		4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC		5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK		6
+#define DISP_CC_MDSS_DP_AUX_CLK			7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC		8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK		9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC		10
+#define DISP_CC_MDSS_DP_LINK_CLK		11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC		12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC	13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK		14
+#define DISP_CC_MDSS_DP_PIXEL_CLK		15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC		16
+#define DISP_CC_MDSS_ESC0_CLK			17
+#define DISP_CC_MDSS_ESC0_CLK_SRC		18
+#define DISP_CC_MDSS_MDP_CLK			19
+#define DISP_CC_MDSS_MDP_CLK_SRC		20
+#define DISP_CC_MDSS_MDP_LUT_CLK		21
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK		22
+#define DISP_CC_MDSS_PCLK0_CLK			23
+#define DISP_CC_MDSS_PCLK0_CLK_SRC		24
+#define DISP_CC_MDSS_ROT_CLK			25
+#define DISP_CC_MDSS_ROT_CLK_SRC		26
+#define DISP_CC_MDSS_RSCC_AHB_CLK		27
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK		28
+#define DISP_CC_MDSS_VSYNC_CLK			29
+#define DISP_CC_MDSS_VSYNC_CLK_SRC		30
+#define DISP_CC_SLEEP_CLK			31
+#define DISP_CC_XO_CLK				32
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-bengal.h b/include/dt-bindings/clock/qcom,gcc-bengal.h
index 6e07413..4da52fc 100644
--- a/include/dt-bindings/clock/qcom,gcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gcc-bengal.h
@@ -173,6 +173,8 @@
 #define GCC_CAMSS_CPHY_0_CLK					165
 #define GCC_CAMSS_CPHY_1_CLK					166
 #define GCC_CAMSS_CPHY_2_CLK					167
+#define GCC_UFS_CLKREF_CLK					168
+#define GCC_DISP_GPLL0_CLK_SRC					169
 
 /* GCC resets */
 #define GCC_QUSB2PHY_PRIM_BCR					0
@@ -183,5 +185,7 @@
 #define GCC_VCODEC0_BCR						6
 #define GCC_VENUS_BCR						7
 #define GCC_VIDEO_INTERFACE_BCR					8
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR				9
+#define GCC_USB3_PHY_PRIM_SP0_BCR				10
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-lagoon.h b/include/dt-bindings/clock/qcom,gcc-lagoon.h
new file mode 100644
index 0000000..c3aeb8d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-lagoon.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_LAGOON_H
+
+/* GCC clocks */
+#define GPLL0					0
+#define GPLL0_OUT_EVEN				1
+#define GPLL0_OUT_ODD				2
+#define GPLL6					3
+#define GPLL6_OUT_EVEN				4
+#define GPLL7					5
+#define GCC_AGGRE_CNOC_PERIPH_CENTER_AHB_CLK	6
+#define GCC_AGGRE_NOC_CENTER_AHB_CLK		7
+#define GCC_AGGRE_NOC_PCIE_SF_AXI_CLK		8
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK		9
+#define GCC_AGGRE_NOC_WLAN_AXI_CLK		10
+#define GCC_AGGRE_UFS_PHY_AXI_CLK		11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK		12
+#define GCC_BOOT_ROM_AHB_CLK			13
+#define GCC_CAMERA_AHB_CLK			14
+#define GCC_CAMERA_AXI_CLK			15
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK		16
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK		17
+#define GCC_CAMERA_XO_CLK			18
+#define GCC_CE1_AHB_CLK				19
+#define GCC_CE1_AXI_CLK				20
+#define GCC_CE1_CLK				21
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK		22
+#define GCC_CPUSS_AHB_CLK			23
+#define GCC_CPUSS_AHB_CLK_SRC			24
+#define GCC_CPUSS_AHB_DIV_CLK_SRC		25
+#define GCC_CPUSS_GNOC_CLK			26
+#define GCC_CPUSS_RBCPR_CLK			27
+#define GCC_DDRSS_GPU_AXI_CLK			28
+#define GCC_DISP_AHB_CLK			29
+#define GCC_DISP_AXI_CLK			30
+#define GCC_DISP_CC_SLEEP_CLK			31
+#define GCC_DISP_CC_XO_CLK			32
+#define GCC_DISP_GPLL0_CLK			33
+#define GCC_DISP_THROTTLE_AXI_CLK		34
+#define GCC_DISP_XO_CLK				35
+#define GCC_GP1_CLK				36
+#define GCC_GP1_CLK_SRC				37
+#define GCC_GP2_CLK				38
+#define GCC_GP2_CLK_SRC				39
+#define GCC_GP3_CLK				40
+#define GCC_GP3_CLK_SRC				41
+#define GCC_GPU_CFG_AHB_CLK			42
+#define GCC_GPU_GPLL0_CLK			43
+#define GCC_GPU_GPLL0_DIV_CLK			44
+#define GCC_GPU_MEMNOC_GFX_CLK			45
+#define GCC_GPU_SNOC_DVM_GFX_CLK		46
+#define GCC_NPU_AXI_CLK				47
+#define GCC_NPU_BWMON_AXI_CLK			48
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK		49
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK		50
+#define GCC_NPU_CFG_AHB_CLK			51
+#define GCC_NPU_DMA_CLK				52
+#define GCC_NPU_GPLL0_CLK			53
+#define GCC_NPU_GPLL0_DIV_CLK			54
+#define GCC_PCIE_0_AUX_CLK			55
+#define GCC_PCIE_0_AUX_CLK_SRC			56
+#define GCC_PCIE_0_CFG_AHB_CLK			57
+#define GCC_PCIE_0_MSTR_AXI_CLK			58
+#define GCC_PCIE_0_PIPE_CLK			59
+#define GCC_PCIE_0_SLV_AXI_CLK			60
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK		61
+#define GCC_PCIE_PHY_RCHNG_CLK			62
+#define GCC_PCIE_PHY_RCHNG_CLK_SRC		63
+#define GCC_PDM2_CLK				64
+#define GCC_PDM2_CLK_SRC			65
+#define GCC_PDM_AHB_CLK				66
+#define GCC_PDM_XO4_CLK				67
+#define GCC_PRNG_AHB_CLK			68
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK		69
+#define GCC_QUPV3_WRAP0_CORE_CLK		70
+#define GCC_QUPV3_WRAP0_S0_CLK			71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC		72
+#define GCC_QUPV3_WRAP0_S1_CLK			73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC		74
+#define GCC_QUPV3_WRAP0_S2_CLK			75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC		76
+#define GCC_QUPV3_WRAP0_S3_CLK			77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC		78
+#define GCC_QUPV3_WRAP0_S4_CLK			79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC		80
+#define GCC_QUPV3_WRAP0_S5_CLK			81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC		82
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK		83
+#define GCC_QUPV3_WRAP1_CORE_CLK		84
+#define GCC_QUPV3_WRAP1_S0_CLK			85
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC		86
+#define GCC_QUPV3_WRAP1_S1_CLK			87
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC		88
+#define GCC_QUPV3_WRAP1_S2_CLK			89
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC		90
+#define GCC_QUPV3_WRAP1_S3_CLK			91
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC		92
+#define GCC_QUPV3_WRAP1_S4_CLK			93
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC		94
+#define GCC_QUPV3_WRAP1_S5_CLK			95
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC		96
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK		97
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK		98
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK		99
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK		100
+#define GCC_SDCC1_AHB_CLK			101
+#define GCC_SDCC1_APPS_CLK			102
+#define GCC_SDCC1_APPS_CLK_SRC			103
+#define GCC_SDCC1_ICE_CORE_CLK			104
+#define GCC_SDCC1_ICE_CORE_CLK_SRC		105
+#define GCC_SDCC2_AHB_CLK			106
+#define GCC_SDCC2_APPS_CLK			107
+#define GCC_SDCC2_APPS_CLK_SRC			108
+#define GCC_SYS_NOC_CPUSS_AHB_CLK		109
+#define GCC_UFS_MEM_CLKREF_CLK			110
+#define GCC_UFS_PHY_AHB_CLK			111
+#define GCC_UFS_PHY_AXI_CLK			112
+#define GCC_UFS_PHY_AXI_CLK_SRC			113
+#define GCC_UFS_PHY_ICE_CORE_CLK		114
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC		115
+#define GCC_UFS_PHY_PHY_AUX_CLK			116
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC		117
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK		118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK		119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK		120
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK		121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC		122
+#define GCC_USB30_PRIM_MASTER_CLK		123
+#define GCC_USB30_PRIM_MASTER_CLK_SRC		124
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK		125
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC	126
+#define GCC_USB30_PRIM_MOCK_UTMI_DIV_CLK_SRC	127
+#define GCC_USB3_PRIM_CLKREF_CLK		128
+#define GCC_USB30_PRIM_SLEEP_CLK		129
+#define GCC_USB3_PRIM_PHY_AUX_CLK		130
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC		131
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK		132
+#define GCC_USB3_PRIM_PHY_PIPE_CLK		133
+#define GCC_VIDEO_AHB_CLK			134
+#define GCC_VIDEO_AXI_CLK			135
+#define GCC_VIDEO_THROTTLE_AXI_CLK		136
+#define GCC_VIDEO_XO_CLK			137
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK		138
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK		139
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK	140
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK	141
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK		142
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR			0
+#define GCC_QUSB2PHY_SEC_BCR			1
+#define GCC_SDCC1_BCR				2
+#define GCC_SDCC2_BCR				3
+#define GCC_UFS_PHY_BCR				4
+#define GCC_USB30_PRIM_BCR			5
+#define GCC_PCIE_0_BCR				6
+#define GCC_PCIE_0_PHY_BCR			7
+#define GCC_QUPV3_WRAPPER_0_BCR			8
+#define GCC_QUPV3_WRAPPER_1_BCR			9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-lagoon.h b/include/dt-bindings/clock/qcom,gpucc-lagoon.h
new file mode 100644
index 0000000..6ad5527
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-lagoon.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_LAGOON_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0						0
+#define GPU_CC_PLL0_OUT_EVEN					1
+#define GPU_CC_PLL1						2
+#define GPU_CC_PLL1_OUT_EVEN					3
+#define GPU_CC_ACD_AHB_CLK					4
+#define GPU_CC_ACD_CXO_CLK					5
+#define GPU_CC_AHB_CLK						6
+#define GPU_CC_CRC_AHB_CLK					7
+#define GPU_CC_CX_GFX3D_CLK					8
+#define GPU_CC_CX_GFX3D_SLV_CLK					9
+#define GPU_CC_CX_GMU_CLK					10
+#define GPU_CC_CX_SNOC_DVM_CLK					11
+#define GPU_CC_CXO_AON_CLK					12
+#define GPU_CC_CXO_CLK						13
+#define GPU_CC_GMU_CLK_SRC					14
+#define GPU_CC_GX_CXO_CLK					15
+#define GPU_CC_GX_GFX3D_CLK					16
+#define GPU_CC_GX_GFX3D_CLK_SRC					17
+#define GPU_CC_GX_GMU_CLK					18
+#define GPU_CC_GX_VSENSE_CLK					19
+#define GPU_CC_SLEEP_CLK					20
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,npucc-lagoon.h b/include/dt-bindings/clock/qcom,npucc-lagoon.h
new file mode 100644
index 0000000..bb9c49c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,npucc-lagoon.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_NPU_CC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_NPU_CC_LAGOON_H
+
+/* NPU_CC clocks */
+#define NPU_CC_PLL0			0
+#define NPU_CC_PLL0_OUT_EVEN		1
+#define NPU_CC_PLL1			2
+#define NPU_CC_PLL1_OUT_EVEN		3
+#define NPU_CC_BTO_CORE_CLK		5
+#define NPU_CC_BWMON_CLK		6
+#define NPU_CC_CAL_HM0_CDC_CLK		7
+#define NPU_CC_CAL_HM0_CLK		8
+#define NPU_CC_CAL_HM0_CLK_SRC		9
+#define NPU_CC_CAL_HM0_PERF_CNT_CLK	10
+#define NPU_CC_CORE_CLK			11
+#define NPU_CC_CORE_CLK_SRC		12
+#define NPU_CC_DSP_AHBM_CLK		13
+#define NPU_CC_DSP_AHBS_CLK		14
+#define NPU_CC_DSP_AXI_CLK		15
+#define NPU_CC_NOC_AHB_CLK		16
+#define NPU_CC_NOC_AXI_CLK		17
+#define NPU_CC_NOC_DMA_CLK		18
+#define NPU_CC_RSC_XO_CLK		19
+#define NPU_CC_S2P_CLK			20
+#define NPU_CC_XO_CLK			21
+#define NPU_CC_XO_CLK_SRC		22
+#define NPU_DSP_CORE_CLK_SRC		23
+#define NPU_Q6SS_PLL			24
+
+/* NPU_CC resets */
+#define NPU_CC_CAL_HM0_BCR		0
+#define NPU_CC_CORE_BCR			1
+#define NPU_CC_DSP_BCR			2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c0846fb..bdef14d 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -47,43 +47,43 @@
 #define RPM_XO_A2				29
 
 /* SMD RPM clocks */
-#define RPM_SMD_XO_CLK_SRC				0
+#define RPM_SMD_XO_CLK_SRC			0
 #define RPM_SMD_XO_A_CLK_SRC			1
-#define RPM_SMD_PCNOC_CLK				2
-#define RPM_SMD_PCNOC_A_CLK				3
-#define RPM_SMD_SNOC_CLK				4
-#define RPM_SMD_SNOC_A_CLK				5
-#define RPM_SMD_BIMC_CLK				6
-#define RPM_SMD_BIMC_A_CLK				7
-#define RPM_SMD_QDSS_CLK				8
-#define RPM_SMD_QDSS_A_CLK				9
+#define RPM_SMD_PCNOC_CLK			2
+#define RPM_SMD_PCNOC_A_CLK			3
+#define RPM_SMD_SNOC_CLK			4
+#define RPM_SMD_SNOC_A_CLK			5
+#define RPM_SMD_BIMC_CLK			6
+#define RPM_SMD_BIMC_A_CLK			7
+#define RPM_SMD_QDSS_CLK			8
+#define RPM_SMD_QDSS_A_CLK			9
 #define RPM_SMD_IPA_CLK				10
-#define RPM_SMD_IPA_A_CLK				11
+#define RPM_SMD_IPA_A_CLK			11
 #define RPM_SMD_QUP_CLK				12
-#define RPM_SMD_QUP_A_CLK				13
-#define RPM_SMD_MMRT_CLK				14
-#define RPM_SMD_MMRT_A_CLK				15
-#define RPM_SMD_MMNRT_CLK				16
-#define RPM_SMD_MMNRT_A_CLK				17
-#define RPM_SMD_SNOC_PERIPH_CLK				18
-#define RPM_SMD_SNOC_PERIPH_A_CLK				19
-#define RPM_SMD_SNOC_LPASS_CLK				20
-#define RPM_SMD_SNOC_LPASS_A_CLK				21
+#define RPM_SMD_QUP_A_CLK			13
+#define RPM_SMD_MMRT_CLK			14
+#define RPM_SMD_MMRT_A_CLK			15
+#define RPM_SMD_MMNRT_CLK			16
+#define RPM_SMD_MMNRT_A_CLK			17
+#define RPM_SMD_SNOC_PERIPH_CLK			18
+#define RPM_SMD_SNOC_PERIPH_A_CLK		19
+#define RPM_SMD_SNOC_LPASS_CLK			20
+#define RPM_SMD_SNOC_LPASS_A_CLK		21
 #define RPM_SMD_BB_CLK1				22
-#define RPM_SMD_BB_CLK1_A				23
-#define RPM_SMD_BB_CLK2					24
-#define RPM_SMD_BB_CLK2_A				25
+#define RPM_SMD_BB_CLK1_A			23
+#define RPM_SMD_BB_CLK2				24
+#define RPM_SMD_BB_CLK2_A			25
 #define RPM_SMD_RF_CLK1				26
-#define RPM_SMD_RF_CLK1_A				27
+#define RPM_SMD_RF_CLK1_A			27
 #define RPM_SMD_RF_CLK2				28
-#define RPM_SMD_RF_CLK2_A				29
-#define RPM_SMD_BB_CLK1_PIN				30
+#define RPM_SMD_RF_CLK2_A			29
+#define RPM_SMD_BB_CLK1_PIN			30
 #define RPM_SMD_BB_CLK1_A_PIN			31
-#define RPM_SMD_BB_CLK2_PIN				32
+#define RPM_SMD_BB_CLK2_PIN			32
 #define RPM_SMD_BB_CLK2_A_PIN			33
-#define RPM_SMD_RF_CLK1_PIN				34
+#define RPM_SMD_RF_CLK1_PIN			34
 #define RPM_SMD_RF_CLK1_A_PIN			35
-#define RPM_SMD_RF_CLK2_PIN				36
+#define RPM_SMD_RF_CLK2_PIN			36
 #define RPM_SMD_RF_CLK2_A_PIN			37
 #define RPM_SMD_PNOC_CLK			38
 #define RPM_SMD_PNOC_A_CLK			39
@@ -109,8 +109,10 @@
 #define RPM_SMD_DIV_A_CLK1			59
 #define RPM_SMD_DIV_CLK2			60
 #define RPM_SMD_DIV_A_CLK2			61
-#define RPM_SMD_DIFF_CLK			62
-#define RPM_SMD_DIFF_A_CLK			63
+#define RPM_SMD_DIV_CLK3			61
+#define RPM_SMD_DIV_A_CLK3			62
+#define RPM_SMD_DIFF_CLK			63
+#define RPM_SMD_DIFF_A_CLK			64
 #define RPM_SMD_CXO_D0_PIN			64
 #define RPM_SMD_CXO_D0_A_PIN			65
 #define RPM_SMD_CXO_D1_PIN			66
@@ -125,8 +127,8 @@
 #define RPM_SMD_QPIC_A_CLK			75
 #define RPM_SMD_CE1_CLK				76
 #define RPM_SMD_CE1_A_CLK			77
-#define RPM_SMD_BIMC_GPU_CLK				78
-#define RPM_SMD_BIMC_GPU_A_CLK				79
+#define RPM_SMD_BIMC_GPU_CLK			78
+#define RPM_SMD_BIMC_GPU_A_CLK			79
 #define RPM_SMD_LN_BB_CLK			80
 #define RPM_SMD_LN_BB_CLK_A			81
 #define RPM_SMD_LN_BB_CLK_PIN			82
@@ -135,72 +137,78 @@
 #define RPM_SMD_RF_CLK3_A			85
 #define RPM_SMD_RF_CLK3_PIN			86
 #define RPM_SMD_RF_CLK3_A_PIN			87
-#define RPM_SMD_LN_BB_CLK1				88
-#define RPM_SMD_LN_BB_CLK1_A				89
-#define RPM_SMD_LN_BB_CLK2				90
-#define RPM_SMD_LN_BB_CLK2_A				91
-#define RPM_SMD_LN_BB_CLK3				92
-#define RPM_SMD_LN_BB_CLK3_A				93
-#define PNOC_MSMBUS_CLK				94
-#define PNOC_MSMBUS_A_CLK			95
-#define PNOC_KEEPALIVE_A_CLK			96
-#define SNOC_MSMBUS_CLK				97
-#define SNOC_MSMBUS_A_CLK			98
-#define BIMC_MSMBUS_CLK				99
-#define BIMC_MSMBUS_A_CLK			100
-#define PNOC_USB_CLK				101
-#define PNOC_USB_A_CLK				102
-#define SNOC_USB_CLK				103
-#define SNOC_USB_A_CLK				104
-#define BIMC_USB_CLK				105
-#define BIMC_USB_A_CLK				106
-#define SNOC_WCNSS_A_CLK			107
-#define BIMC_WCNSS_A_CLK			108
-#define MCD_CE1_CLK				109
-#define QCEDEV_CE1_CLK				110
-#define QCRYPTO_CE1_CLK				111
-#define QSEECOM_CE1_CLK				112
-#define SCM_CE1_CLK				113
-#define CXO_SMD_OTG_CLK				114
-#define CXO_SMD_LPM_CLK				115
-#define CXO_SMD_PIL_PRONTO_CLK			116
-#define CXO_SMD_PIL_MSS_CLK			117
-#define CXO_SMD_WLAN_CLK			118
-#define CXO_SMD_PIL_LPASS_CLK			119
-#define CXO_SMD_PIL_CDSP_CLK			120
-#define CNOC_MSMBUS_CLK				121
-#define CNOC_MSMBUS_A_CLK				122
-#define CNOC_KEEPALIVE_A_CLK				123
-#define SNOC_KEEPALIVE_A_CLK				124
-#define CPP_MMNRT_MSMBUS_CLK				125
-#define CPP_MMNRT_MSMBUS_A_CLK				126
-#define JPEG_MMNRT_MSMBUS_CLK				127
-#define JPEG_MMNRT_MSMBUS_A_CLK				128
-#define VENUS_MMNRT_MSMBUS_CLK				129
-#define VENUS_MMNRT_MSMBUS_A_CLK			130
-#define ARM9_MMNRT_MSMBUS_CLK				131
-#define ARM9_MMNRT_MSMBUS_A_CLK				132
-#define MDP_MMRT_MSMBUS_CLK				133
-#define MDP_MMRT_MSMBUS_A_CLK				134
-#define VFE_MMRT_MSMBUS_CLK				135
-#define VFE_MMRT_MSMBUS_A_CLK				136
-#define QUP0_MSMBUS_SNOC_PERIPH_CLK			137
-#define QUP0_MSMBUS_SNOC_PERIPH_A_CLK			138
-#define QUP1_MSMBUS_SNOC_PERIPH_CLK			139
-#define QUP1_MSMBUS_SNOC_PERIPH_A_CLK			140
-#define QUP2_MSMBUS_SNOC_PERIPH_CLK                     141
-#define QUP2_MSMBUS_SNOC_PERIPH_A_CLK                   142
-#define DAP_MSMBUS_SNOC_PERIPH_CLK			143
-#define DAP_MSMBUS_SNOC_PERIPH_A_CLK			144
-#define SDC1_MSMBUS_SNOC_PERIPH_CLK			145
-#define SDC1_MSMBUS_SNOC_PERIPH_A_CLK			146
-#define SDC2_MSMBUS_SNOC_PERIPH_CLK			147
-#define SDC2_MSMBUS_SNOC_PERIPH_A_CLK			148
-#define CRYPTO_MSMBUS_SNOC_PERIPH_CLK			149
-#define CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK			150
-#define SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK			151
-#define SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK		152
-#define SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK			153
-#define SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK		154
+#define RPM_SMD_LN_BB_CLK1			88
+#define RPM_SMD_LN_BB_CLK1_A			89
+#define RPM_SMD_LN_BB_CLK2			90
+#define RPM_SMD_LN_BB_CLK2_A			91
+#define RPM_SMD_LN_BB_CLK3			92
+#define RPM_SMD_LN_BB_CLK3_A			93
+#define RPM_SMD_MMAXI_CLK			94
+#define RPM_SMD_MMAXI_A_CLK			95
+#define RPM_SMD_AGGR1_NOC_CLK			96
+#define RPM_SMD_AGGR1_NOC_A_CLK			97
+#define RPM_SMD_AGGR2_NOC_CLK			98
+#define RPM_SMD_AGGR2_NOC_A_CLK			99
+#define PNOC_MSMBUS_CLK				100
+#define PNOC_MSMBUS_A_CLK			101
+#define PNOC_KEEPALIVE_A_CLK			102
+#define SNOC_MSMBUS_CLK				103
+#define SNOC_MSMBUS_A_CLK			104
+#define BIMC_MSMBUS_CLK				105
+#define BIMC_MSMBUS_A_CLK			106
+#define PNOC_USB_CLK				107
+#define PNOC_USB_A_CLK				108
+#define SNOC_USB_CLK				109
+#define SNOC_USB_A_CLK				110
+#define BIMC_USB_CLK				111
+#define BIMC_USB_A_CLK				112
+#define SNOC_WCNSS_A_CLK			113
+#define BIMC_WCNSS_A_CLK			114
+#define MCD_CE1_CLK				115
+#define QCEDEV_CE1_CLK				116
+#define QCRYPTO_CE1_CLK				117
+#define QSEECOM_CE1_CLK				118
+#define SCM_CE1_CLK				119
+#define CXO_SMD_OTG_CLK				120
+#define CXO_SMD_LPM_CLK				121
+#define CXO_SMD_PIL_PRONTO_CLK			122
+#define CXO_SMD_PIL_MSS_CLK			123
+#define CXO_SMD_WLAN_CLK			124
+#define CXO_SMD_PIL_LPASS_CLK			125
+#define CXO_SMD_PIL_CDSP_CLK			126
+#define CNOC_MSMBUS_CLK				127
+#define CNOC_MSMBUS_A_CLK			128
+#define CNOC_KEEPALIVE_A_CLK			129
+#define SNOC_KEEPALIVE_A_CLK			130
+#define CPP_MMNRT_MSMBUS_CLK			131
+#define CPP_MMNRT_MSMBUS_A_CLK			132
+#define JPEG_MMNRT_MSMBUS_CLK			133
+#define JPEG_MMNRT_MSMBUS_A_CLK			134
+#define VENUS_MMNRT_MSMBUS_CLK			135
+#define VENUS_MMNRT_MSMBUS_A_CLK		136
+#define ARM9_MMNRT_MSMBUS_CLK			137
+#define ARM9_MMNRT_MSMBUS_A_CLK			138
+#define MDP_MMRT_MSMBUS_CLK			139
+#define MDP_MMRT_MSMBUS_A_CLK			140
+#define VFE_MMRT_MSMBUS_CLK			141
+#define VFE_MMRT_MSMBUS_A_CLK			142
+#define QUP0_MSMBUS_SNOC_PERIPH_CLK		143
+#define QUP0_MSMBUS_SNOC_PERIPH_A_CLK		144
+#define QUP1_MSMBUS_SNOC_PERIPH_CLK		145
+#define QUP1_MSMBUS_SNOC_PERIPH_A_CLK		146
+#define QUP2_MSMBUS_SNOC_PERIPH_CLK             147
+#define QUP2_MSMBUS_SNOC_PERIPH_A_CLK           148
+#define DAP_MSMBUS_SNOC_PERIPH_CLK		149
+#define DAP_MSMBUS_SNOC_PERIPH_A_CLK		150
+#define SDC1_MSMBUS_SNOC_PERIPH_CLK		151
+#define SDC1_MSMBUS_SNOC_PERIPH_A_CLK		152
+#define SDC2_MSMBUS_SNOC_PERIPH_CLK		153
+#define SDC2_MSMBUS_SNOC_PERIPH_A_CLK		154
+#define CRYPTO_MSMBUS_SNOC_PERIPH_CLK		155
+#define CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK		156
+#define SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK		157
+#define SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK	158
+#define SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK		159
+#define SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK	160
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,videocc-lagoon.h b/include/dt-bindings/clock/qcom,videocc-lagoon.h
new file mode 100644
index 0000000..0508249
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-lagoon.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_LAGOON_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_LAGOON_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0			0
+#define VIDEO_PLL0_OUT_EVEN             1
+#define VIDEO_CC_IRIS_AHB_CLK		2
+#define VIDEO_CC_IRIS_CLK_SRC		3
+#define VIDEO_CC_MVS0_AXI_CLK		4
+#define VIDEO_CC_MVS0_CORE_CLK		5
+#define VIDEO_CC_MVSC_CORE_CLK		6
+#define VIDEO_CC_MVSC_CTL_AXI_CLK	7
+#define VIDEO_CC_SLEEP_CLK		8
+#define VIDEO_CC_SLEEP_CLK_SRC		9
+#define VIDEO_CC_VENUS_AHB_CLK		10
+#define VIDEO_CC_XO_CLK			11
+#define VIDEO_CC_XO_CLK_SRC		12
+
+#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index f47edbd..38abbee 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -222,4 +222,22 @@
 
 #define ADC_MAX_CHANNEL				0xc0
 
+/* VADC scale function index */
+#define ADC_SCALE_DEFAULT			0x0
+#define ADC_SCALE_THERM_100K_PULLUP		0x1
+#define ADC_SCALE_PMIC_THERM			0x2
+#define ADC_SCALE_XOTHERM			0x3
+#define ADC_SCALE_PMI_CHG_TEMP			0x4
+#define ADC_SCALE_HW_CALIB_DEFAULT		0x5
+#define ADC_SCALE_HW_CALIB_THERM_100K_PULLUP	0x6
+#define ADC_SCALE_HW_CALIB_XOTHERM		0x7
+#define ADC_SCALE_HW_CALIB_PMIC_THERM		0x8
+#define ADC_SCALE_HW_CALIB_CUR			0x9
+#define ADC_SCALE_HW_CALIB_PM5_CHG_TEMP		0xA
+#define ADC_SCALE_HW_CALIB_PM5_SMB_TEMP		0xB
+#define ADC_SCALE_HW_CALIB_BATT_THERM_100K	0xC
+#define ADC_SCALE_HW_CALIB_BATT_THERM_30K	0xD
+#define ADC_SCALE_HW_CALIB_BATT_THERM_400K	0xE
+#define ADC_SCALE_HW_CALIB_PM5_SMB1398_TEMP	0xF
+
 #endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index e38cc59..7bfba5b 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -41,6 +41,9 @@
 #define	MSM_BUS_FAB_GEM_NOC 6156
 #define	MSM_BUS_FAB_NPU_NOC 6157
 #define	MSM_BUS_FAB_QUP_VIRT 6158
+#define	MSM_BUS_FAB_GPU_VIRT 6159
+#define	MSM_BUS_FAB_MMNRT_VIRT 6160
+#define	MSM_BUS_FAB_MMRT_VIRT 6161
 
 #define	MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
 #define	MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -285,6 +288,10 @@
 #define	MSM_BUS_MASTER_ANOC_PCIE_GEM_NOC 175
 #define	MSM_BUS_MASTER_QUP_CORE_0 176
 #define	MSM_BUS_MASTER_QUP_CORE_1 177
+#define	MSM_BUS_MASTER_SNOC_BIMC_RT 178
+#define	MSM_BUS_MASTER_SNOC_BIMC_NRT 179
+#define	MSM_BUS_MASTER_GPU_CDSP_PROC 180
+#define	MSM_BUS_MASTER_ANOC_SNOC 181
 
 #define	MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define	MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -678,6 +685,16 @@
 #define	MSM_BUS_SLAVE_QUP_CORE_0 823
 #define	MSM_BUS_SLAVE_QUP_CORE_1 824
 #define	MSM_BUS_SLAVE_EMMC_CFG 825
+#define	MSM_BUS_SLAVE_CDSP_THROTTLE_CFG 826
+#define	MSM_BUS_SLAVE_CAMERA_NRT_THROTTLE_CFG 827
+#define	MSM_BUS_SLAVE_CAMERA_RT_THROTTLE_CFG 828
+#define	MSM_BUS_SLAVE_GPU_CFG 829
+#define	MSM_BUS_SLAVE_GPU_THROTTLE_CFG 830
+#define	MSM_BUS_SLAVE_QM_MPU_CFG 831
+#define	MSM_BUS_SLAVE_SNOC_BIMC_NRT 832
+#define	MSM_BUS_SLAVE_SNOC_BIMC_RT 833
+#define	MSM_BUS_SLAVE_ANOC_SNOC 834
+#define	MSM_BUS_SLAVE_GPU_CDSP_BIMC 835
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
@@ -865,6 +882,16 @@
 #define	ICBID_MASTER_LPASS_LPAIF 159
 #define	ICBID_MASTER_LPASS_LEC 160
 #define	ICBID_MASTER_LPASS_ANOC_BIMC 161
+#define	ICBID_MASTER_SNOC_BIMC_RT 163
+#define	ICBID_MASTER_SNOC_BIMC_NRT 164
+#define	ICBID_MASTER_VIDEO_PROC	168
+#define	ICBID_MASTER_QUP_CORE_0 170
+#define	ICBID_MASTER_QUP_CORE_1 171
+#define	ICBID_MASTER_GPU_CDSP_PROC 165
+#define	ICBID_MASTER_QUP_0 166
+#define	ICBID_MASTER_UFS_MEM 167
+#define	ICBID_MASTER_CAMNOC_SF 172
+#define	ICBID_MASTER_CAMNOC_HF 173
 
 #define	ICBID_SLAVE_EBI1 0
 #define	ICBID_SLAVE_APPSS_L2 1
@@ -1125,4 +1152,23 @@
 #define	ICBID_SLAVE_PCNOC_S_10 245
 #define	ICBID_SLAVE_PCNOC_S_11 246
 #define	ICBID_SLAVE_LPASS_ANOC_BIMC 247
+#define	ICBID_SLAVE_SNOC_BIMC_NRT 248
+#define	ICBID_SLAVE_SNOC_BIMC_RT 249
+#define	ICBID_SLAVE_QUP_0 250
+#define	ICBID_SLAVE_UFS_MEM_CFG 251
+#define	ICBID_SLAVE_VSENSE_CTRL_CFG 252
+#define	ICBID_SLAVE_QUP_CORE_0 253
+#define	ICBID_SLAVE_QUP_CORE_1 254
+#define	ICBID_SLAVE_GPU_CDSP_BIMC 255
+#define	ICBID_SLAVE_AHB2PHY_USB 256
+#define	ICBID_SLAVE_APSS_THROTTLE_CFG 257
+#define	ICBID_SLAVE_CAMERA_NRT_THROTTLE_CFG 258
+#define	ICBID_SLAVE_CDSP_THROTTLE_CFG 259
+#define	ICBID_SLAVE_DDR_PHY_CFG 260
+#define	ICBID_SLAVE_DDR_SS_CFG 261
+#define	ICBID_SLAVE_GPU_CFG 262
+#define	ICBID_SLAVE_GPU_THROTTLE_CFG 263
+#define	ICBID_SLAVE_MAPSS 264
+#define	ICBID_SLAVE_MDSP_MPU_CFG 265
+#define	ICBID_SLAVE_CAMERA_RT_THROTTLE_CFG 266
 #endif
diff --git a/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h b/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h
new file mode 100644
index 0000000..47a4a78
--- /dev/null
+++ b/include/dt-bindings/phy/qcom,usb3-11nm-qmp-combo.h
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H
+#define _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H
+
+#define USB3PHY_QSERDES_COM_ATB_SEL1				0x0000
+#define USB3PHY_QSERDES_COM_ATB_SEL2				0x0004
+#define USB3PHY_QSERDES_COM_FREQ_UPDATE				0x0008
+#define USB3PHY_QSERDES_COM_BG_TIMER				0x000C
+#define USB3PHY_QSERDES_COM_SSC_EN_CENTER			0x0010
+#define USB3PHY_QSERDES_COM_SSC_ADJ_PER1			0x0014
+#define USB3PHY_QSERDES_COM_SSC_ADJ_PER2			0x0018
+#define USB3PHY_QSERDES_COM_SSC_PER1				0x001C
+#define USB3PHY_QSERDES_COM_SSC_PER2				0x0020
+#define USB3PHY_QSERDES_COM_SSC_STEP_SIZE1			0x0024
+#define USB3PHY_QSERDES_COM_SSC_STEP_SIZE2			0x0028
+#define USB3PHY_QSERDES_COM_POST_DIV				0x002C
+#define USB3PHY_QSERDES_COM_POST_DIV_MUX			0x0030
+#define USB3PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN			0x0034
+#define USB3PHY_QSERDES_COM_CLK_ENABLE1				0x0038
+#define USB3PHY_QSERDES_COM_SYS_CLK_CTRL			0x003C
+#define USB3PHY_QSERDES_COM_SYSCLK_BUF_ENABLE			0x0040
+#define USB3PHY_QSERDES_COM_PLL_EN				0x0044
+#define USB3PHY_QSERDES_COM_PLL_IVCO				0x0048
+#define USB3PHY_QSERDES_COM_LOCK_CMP1_MODE0			0x004C
+#define USB3PHY_QSERDES_COM_LOCK_CMP2_MODE0			0x0050
+#define USB3PHY_QSERDES_COM_LOCK_CMP3_MODE0			0x0054
+#define USB3PHY_QSERDES_COM_LOCK_CMP1_MODE1			0x0058
+#define USB3PHY_QSERDES_COM_LOCK_CMP2_MODE1			0x005C
+#define USB3PHY_QSERDES_COM_LOCK_CMP3_MODE1			0x0060
+#define USB3PHY_QSERDES_COM_CMN_RSVD0				0x0064
+#define USB3PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL		0x0068
+#define USB3PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS		0x006C
+#define USB3PHY_QSERDES_COM_BG_TRIM				0x0070
+#define USB3PHY_QSERDES_COM_CLK_EP_DIV				0x0074
+#define USB3PHY_QSERDES_COM_CP_CTRL_MODE0			0x0078
+#define USB3PHY_QSERDES_COM_CP_CTRL_MODE1			0x007C
+#define USB3PHY_QSERDES_COM_CMN_RSVD1				0x0080
+#define USB3PHY_QSERDES_COM_PLL_RCTRL_MODE0			0x0084
+#define USB3PHY_QSERDES_COM_PLL_RCTRL_MODE1			0x0088
+#define USB3PHY_QSERDES_COM_CMN_RSVD2				0x008C
+#define USB3PHY_QSERDES_COM_PLL_CCTRL_MODE0			0x0090
+#define USB3PHY_QSERDES_COM_PLL_CCTRL_MODE1			0x0094
+#define USB3PHY_QSERDES_COM_CMN_RSVD3				0x0098
+#define USB3PHY_QSERDES_COM_PLL_CNTRL				0x009C
+#define USB3PHY_QSERDES_COM_PHASE_SEL_CTRL			0x00A0
+#define USB3PHY_QSERDES_COM_PHASE_SEL_DC			0x00A4
+#define USB3PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM			0x00A8
+#define USB3PHY_QSERDES_COM_SYSCLK_EN_SEL			0x00AC
+#define USB3PHY_QSERDES_COM_CML_SYSCLK_SEL			0x00B0
+#define USB3PHY_QSERDES_COM_RESETSM_CNTRL			0x00B4
+#define USB3PHY_QSERDES_COM_RESETSM_CNTRL2			0x00B8
+#define USB3PHY_QSERDES_COM_RESTRIM_CTRL			0x00BC
+#define USB3PHY_QSERDES_COM_RESTRIM_CTRL2			0x00C0
+#define USB3PHY_QSERDES_COM_RESCODE_DIV_NUM			0x00C4
+#define USB3PHY_QSERDES_COM_LOCK_CMP_EN				0x00C8
+#define USB3PHY_QSERDES_COM_LOCK_CMP_CFG			0x00CC
+#define USB3PHY_QSERDES_COM_DEC_START_MODE0			0x00D0
+#define USB3PHY_QSERDES_COM_DEC_START_MODE1			0x00D4
+#define USB3PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL			0x00D8
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START1_MODE0		0x00DC
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START2_MODE0		0x00E0
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START3_MODE0		0x00E4
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START1_MODE1		0x00E8
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START2_MODE1		0x00EC
+#define USB3PHY_QSERDES_COM_DIV_FRAC_START3_MODE1		0x00F0
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MINVAL1			0x00F4
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MINVAL2			0x00F8
+#define USB3PHY_QSERDES_COM_CMN_RSVD4				0x00FC
+#define USB3PHY_QSERDES_COM_INTEGLOOP_INITVAL			0x0100
+#define USB3PHY_QSERDES_COM_INTEGLOOP_EN			0x0104
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0		0x0108
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0		0x010C
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1		0x0110
+#define USB3PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1		0x0114
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAXVAL1			0x0118
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAXVAL2			0x011C
+#define USB3PHY_QSERDES_COM_RES_TRIM_CONTROL2			0x0120
+#define USB3PHY_QSERDES_COM_VCO_TUNE_CTRL			0x0124
+#define USB3PHY_QSERDES_COM_VCO_TUNE_MAP			0x0128
+#define USB3PHY_QSERDES_COM_VCO_TUNE1_MODE0			0x012C
+#define USB3PHY_QSERDES_COM_VCO_TUNE2_MODE0			0x0130
+#define USB3PHY_QSERDES_COM_VCO_TUNE1_MODE1			0x0134
+#define USB3PHY_QSERDES_COM_VCO_TUNE2_MODE1			0x0138
+#define USB3PHY_QSERDES_COM_VCO_TUNE_INITVAL1			0x013C
+#define USB3PHY_QSERDES_COM_VCO_TUNE_INITVAL2			0x0140
+#define USB3PHY_QSERDES_COM_VCO_TUNE_TIMER1			0x0144
+#define USB3PHY_QSERDES_COM_VCO_TUNE_TIMER2			0x0148
+#define USB3PHY_QSERDES_COM_SAR					0x014C
+#define USB3PHY_QSERDES_COM_SAR_CLK				0x0150
+#define USB3PHY_QSERDES_COM_SAR_CODE_OUT_STATUS			0x0154
+#define USB3PHY_QSERDES_COM_SAR_CODE_READY_STATUS		0x0158
+#define USB3PHY_QSERDES_COM_CMN_STATUS				0x015C
+#define USB3PHY_QSERDES_COM_RESET_SM_STATUS			0x0160
+#define USB3PHY_QSERDES_COM_RESTRIM_CODE_STATUS			0x0164
+#define USB3PHY_QSERDES_COM_PLLCAL_CODE1_STATUS			0x0168
+#define USB3PHY_QSERDES_COM_PLLCAL_CODE2_STATUS			0x016C
+#define USB3PHY_QSERDES_COM_BG_CTRL				0x0170
+#define USB3PHY_QSERDES_COM_CLK_SELECT				0x0174
+#define USB3PHY_QSERDES_COM_HSCLK_SEL				0x0178
+#define USB3PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS		0x017C
+#define USB3PHY_QSERDES_COM_PLL_ANALOG				0x0180
+#define USB3PHY_QSERDES_COM_CORECLK_DIV				0x0184
+#define USB3PHY_QSERDES_COM_SW_RESET				0x0188
+#define USB3PHY_QSERDES_COM_CORE_CLK_EN				0x018C
+#define USB3PHY_QSERDES_COM_C_READY_STATUS			0x0190
+#define USB3PHY_QSERDES_COM_CMN_CONFIG				0x0194
+#define USB3PHY_QSERDES_COM_CMN_RATE_OVERRIDE			0x0198
+#define USB3PHY_QSERDES_COM_SVS_MODE_CLK_SEL			0x019C
+#define USB3PHY_QSERDES_COM_DEBUG_BUS0				0x01A0
+#define USB3PHY_QSERDES_COM_DEBUG_BUS1				0x01A4
+#define USB3PHY_QSERDES_COM_DEBUG_BUS2				0x01A8
+#define USB3PHY_QSERDES_COM_DEBUG_BUS3				0x01AC
+#define USB3PHY_QSERDES_COM_DEBUG_BUS_SEL			0x01B0
+#define USB3PHY_QSERDES_COM_CMN_MISC1				0x01B4
+#define USB3PHY_QSERDES_COM_CMN_MISC2				0x01B8
+#define USB3PHY_QSERDES_COM_CORECLK_DIV_MODE1			0x01BC
+#define USB3PHY_QSERDES_COM_CMN_RSVD5				0x01C0
+#define USB3PHY_QSERDES_TXA_BIST_MODE_LANENO			0x0200
+#define USB3PHY_QSERDES_TXA_BIST_INVERT				0x0204
+#define USB3PHY_QSERDES_TXA_CLKBUF_ENABLE			0x0208
+#define USB3PHY_QSERDES_TXA_TX_EMP_POST1_LVL			0x020C
+#define USB3PHY_QSERDES_TXA_TX_POST2_EMPH			0x0210
+#define USB3PHY_QSERDES_TXA_TX_BOOST_LVL_UP_DN			0x0214
+#define USB3PHY_QSERDES_TXA_TX_IDLE_LVL_LARGE_AMP		0x0218
+#define USB3PHY_QSERDES_TXA_TX_DRV_LVL				0x021C
+#define USB3PHY_QSERDES_TXA_TX_DRV_LVL_OFFSET			0x0220
+#define USB3PHY_QSERDES_TXA_RESET_TSYNC_EN			0x0224
+#define USB3PHY_QSERDES_TXA_PRE_STALL_LDO_BOOST_EN		0x0228
+#define USB3PHY_QSERDES_TXA_TX_BAND				0x022C
+#define USB3PHY_QSERDES_TXA_SLEW_CNTL				0x0230
+#define USB3PHY_QSERDES_TXA_INTERFACE_SELECT			0x0234
+#define USB3PHY_QSERDES_TXA_LPB_EN				0x0238
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_TX			0x023C
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_RX			0x0240
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX		0x0244
+#define USB3PHY_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX		0x0248
+#define USB3PHY_QSERDES_TXA_PERL_LENGTH1			0x024C
+#define USB3PHY_QSERDES_TXA_PERL_LENGTH2			0x0250
+#define USB3PHY_QSERDES_TXA_SERDES_BYP_EN_OUT			0x0254
+#define USB3PHY_QSERDES_TXA_DEBUG_BUS_SEL			0x0258
+#define USB3PHY_QSERDES_TXA_TRANSCEIVER_BIAS_EN			0x025C
+#define USB3PHY_QSERDES_TXA_HIGHZ_DRVR_EN			0x0260
+#define USB3PHY_QSERDES_TXA_TX_POL_INV				0x0264
+#define USB3PHY_QSERDES_TXA_PARRATE_REC_DETECT_IDLE_EN		0x0268
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN1			0x026C
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN2			0x0270
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN3			0x0274
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN4			0x0278
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN5			0x027C
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN6			0x0280
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN7			0x0284
+#define USB3PHY_QSERDES_TXA_BIST_PATTERN8			0x0288
+#define USB3PHY_QSERDES_TXA_LANE_MODE_1				0x028C
+#define USB3PHY_QSERDES_TXA_LANE_MODE_2				0x0290
+#define USB3PHY_QSERDES_TXA_LANE_MODE_3				0x0294
+#define USB3PHY_QSERDES_TXA_ATB_SEL1				0x0298
+#define USB3PHY_QSERDES_TXA_ATB_SEL2				0x029C
+#define USB3PHY_QSERDES_TXA_RCV_DETECT_LVL			0x02A0
+#define USB3PHY_QSERDES_TXA_RCV_DETECT_LVL_2			0x02A4
+#define USB3PHY_QSERDES_TXA_PRBS_SEED1				0x02A8
+#define USB3PHY_QSERDES_TXA_PRBS_SEED2				0x02AC
+#define USB3PHY_QSERDES_TXA_PRBS_SEED3				0x02B0
+#define USB3PHY_QSERDES_TXA_PRBS_SEED4				0x02B4
+#define USB3PHY_QSERDES_TXA_RESET_GEN				0x02B8
+#define USB3PHY_QSERDES_TXA_RESET_GEN_MUXES			0x02BC
+#define USB3PHY_QSERDES_TXA_TRAN_DRVR_EMP_EN			0x02C0
+#define USB3PHY_QSERDES_TXA_TX_INTERFACE_MODE			0x02C4
+#define USB3PHY_QSERDES_TXA_PWM_CTRL				0x02C8
+#define USB3PHY_QSERDES_TXA_PWM_ENCODED_OR_DATA			0x02CC
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND2		0x02D0
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND2		0x02D4
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND2		0x02D8
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND2		0x02DC
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND0_1		0x02E0
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND0_1		0x02E4
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND0_1		0x02E8
+#define USB3PHY_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND0_1		0x02EC
+#define USB3PHY_QSERDES_TXA_VMODE_CTRL1				0x02F0
+#define USB3PHY_QSERDES_TXA_ALOG_OBSV_BUS_CTRL_1		0x02F4
+#define USB3PHY_QSERDES_TXA_BIST_STATUS				0x02F8
+#define USB3PHY_QSERDES_TXA_BIST_ERROR_COUNT1			0x02FC
+#define USB3PHY_QSERDES_TXA_BIST_ERROR_COUNT2			0x0300
+#define USB3PHY_QSERDES_TXA_ALOG_OBSV_BUS_STATUS_1		0x0304
+#define USB3PHY_QSERDES_TXA_DIG_BKUP_CTRL			0x0308
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN_HALF			0x0400
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN_QUARTER		0x0404
+#define USB3PHY_QSERDES_RXA_UCDR_FO_GAIN			0x0408
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN_HALF			0x040C
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN_QUARTER		0x0410
+#define USB3PHY_QSERDES_RXA_UCDR_SO_GAIN			0x0414
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN_HALF		0x0418
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN_QUARTER		0x041C
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_FO_GAIN			0x0420
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN_HALF		0x0424
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN_QUARTER		0x0428
+#define USB3PHY_QSERDES_RXA_UCDR_SVS_SO_GAIN			0x042C
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN		0x0430
+#define USB3PHY_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE	0x0434
+#define USB3PHY_QSERDES_RXA_UCDR_FO_TO_SO_DELAY			0x0438
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW		0x043C
+#define USB3PHY_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH		0x0440
+#define USB3PHY_QSERDES_RXA_UCDR_PI_CONTROLS			0x0444
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_THRESH1			0x0448
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_THRESH2			0x044C
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_GAIN1			0x0450
+#define USB3PHY_QSERDES_RXA_UCDR_SB2_GAIN2			0x0454
+#define USB3PHY_QSERDES_RXA_AUX_CONTROL				0x0458
+#define USB3PHY_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE		0x045C
+#define USB3PHY_QSERDES_RXA_RCLK_AUXDATA_SEL			0x0460
+#define USB3PHY_QSERDES_RXA_AC_JTAG_ENABLE			0x0464
+#define USB3PHY_QSERDES_RXA_AC_JTAG_INITP			0x0468
+#define USB3PHY_QSERDES_RXA_AC_JTAG_INITN			0x046C
+#define USB3PHY_QSERDES_RXA_AC_JTAG_LVL				0x0470
+#define USB3PHY_QSERDES_RXA_AC_JTAG_MODE			0x0474
+#define USB3PHY_QSERDES_RXA_AC_JTAG_RESET			0x0478
+#define USB3PHY_QSERDES_RXA_RX_TERM_BW				0x047C
+#define USB3PHY_QSERDES_RXA_RX_RCVR_IQ_EN			0x0480
+#define USB3PHY_QSERDES_RXA_RX_IDAC_I_DC_OFFSETS		0x0484
+#define USB3PHY_QSERDES_RXA_RX_IDAC_IBAR_DC_OFFSETS		0x0488
+#define USB3PHY_QSERDES_RXA_RX_IDAC_Q_DC_OFFSETS		0x048C
+#define USB3PHY_QSERDES_RXA_RX_IDAC_QBAR_DC_OFFSETS		0x0490
+#define USB3PHY_QSERDES_RXA_RX_IDAC_A_DC_OFFSETS		0x0494
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ABAR_DC_OFFSETS		0x0498
+#define USB3PHY_QSERDES_RXA_RX_IDAC_EN				0x049C
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ENABLES			0x04A0
+#define USB3PHY_QSERDES_RXA_RX_IDAC_SIGN			0x04A4
+#define USB3PHY_QSERDES_RXA_RX_HIGHZ_HIGHRATE			0x04A8
+#define USB3PHY_QSERDES_RXA_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x04AC
+#define USB3PHY_QSERDES_RXA_DFE_1				0x04B0
+#define USB3PHY_QSERDES_RXA_DFE_2				0x04B4
+#define USB3PHY_QSERDES_RXA_DFE_3				0x04B8
+#define USB3PHY_QSERDES_RXA_VGA_CAL_CNTRL1			0x04BC
+#define USB3PHY_QSERDES_RXA_VGA_CAL_CNTRL2			0x04C0
+#define USB3PHY_QSERDES_RXA_GM_CAL				0x04C4
+#define USB3PHY_QSERDES_RXA_RX_EQ_GAIN2_LSB			0x04C8
+#define USB3PHY_QSERDES_RXA_RX_EQ_GAIN2_MSB			0x04CC
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL1		0x04D0
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2		0x04D4
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3		0x04D8
+#define USB3PHY_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4		0x04DC
+#define USB3PHY_QSERDES_RXA_RX_IDAC_TSETTLE_LOW			0x04E0
+#define USB3PHY_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH		0x04E4
+#define USB3PHY_QSERDES_RXA_RX_IDAC_MEASURE_TIME		0x04E8
+#define USB3PHY_QSERDES_RXA_RX_IDAC_ACCUMULATOR			0x04EC
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_LSB			0x04F0
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_MSB			0x04F4
+#define USB3PHY_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x04F8
+#define USB3PHY_QSERDES_RXA_RX_OFFSET_ADAPTOR_CNTRL2		0x04FC
+#define USB3PHY_QSERDES_RXA_SIGDET_ENABLES			0x0500
+#define USB3PHY_QSERDES_RXA_SIGDET_CNTRL			0x0504
+#define USB3PHY_QSERDES_RXA_SIGDET_LVL				0x0508
+#define USB3PHY_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL		0x050C
+#define USB3PHY_QSERDES_RXA_RX_BAND				0x0510
+#define USB3PHY_QSERDES_RXA_CDR_FREEZE_UP_DN			0x0514
+#define USB3PHY_QSERDES_RXA_CDR_RESET_OVERRIDE			0x0518
+#define USB3PHY_QSERDES_RXA_RX_INTERFACE_MODE			0x051C
+#define USB3PHY_QSERDES_RXA_JITTER_GEN_MODE			0x0520
+#define USB3PHY_QSERDES_RXA_BUJ_AMP				0x0524
+#define USB3PHY_QSERDES_RXA_SJ_AMP1				0x0528
+#define USB3PHY_QSERDES_RXA_SJ_AMP2				0x052C
+#define USB3PHY_QSERDES_RXA_SJ_PER1				0x0530
+#define USB3PHY_QSERDES_RXA_SJ_PER2				0x0534
+#define USB3PHY_QSERDES_RXA_BUJ_STEP_FREQ1			0x0538
+#define USB3PHY_QSERDES_RXA_BUJ_STEP_FREQ2			0x053C
+#define USB3PHY_QSERDES_RXA_PPM_OFFSET1				0x0540
+#define USB3PHY_QSERDES_RXA_PPM_OFFSET2				0x0544
+#define USB3PHY_QSERDES_RXA_SIGN_PPM_PERIOD1			0x0548
+#define USB3PHY_QSERDES_RXA_SIGN_PPM_PERIOD2			0x054C
+#define USB3PHY_QSERDES_RXA_RX_PWM_ENABLE_AND_DATA		0x0550
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR1_TIMEOUT_COUNT		0x0554
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR2_TIMEOUT_COUNT		0x0558
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR3_TIMEOUT_COUNT		0x055C
+#define USB3PHY_QSERDES_RXA_RX_PWM_GEAR4_TIMEOUT_COUNT		0x0560
+#define USB3PHY_QSERDES_RXA_RX_MODE_00				0x0564
+#define USB3PHY_QSERDES_RXA_RX_MODE_01				0x0568
+#define USB3PHY_QSERDES_RXA_RX_MODE_10				0x056C
+#define USB3PHY_QSERDES_RXA_ALOG_OBSV_BUS_CTRL_1		0x0570
+#define USB3PHY_QSERDES_RXA_PI_CTRL1				0x0574
+#define USB3PHY_QSERDES_RXA_PI_CTRL2				0x0578
+#define USB3PHY_QSERDES_RXA_PI_QUAD				0x057C
+#define USB3PHY_QSERDES_RXA_IDATA1				0x0580
+#define USB3PHY_QSERDES_RXA_IDATA2				0x0584
+#define USB3PHY_QSERDES_RXA_AUX_DATA1				0x0588
+#define USB3PHY_QSERDES_RXA_AUX_DATA2				0x058C
+#define USB3PHY_QSERDES_RXA_AC_JTAG_OUTP			0x0590
+#define USB3PHY_QSERDES_RXA_AC_JTAG_OUTN			0x0594
+#define USB3PHY_QSERDES_RXA_RX_SIGDET				0x0598
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_I			0x059C
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_IBAR			0x05A0
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_Q			0x05A4
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_QBAR			0x05A8
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_A			0x05AC
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_ABAR			0x05B0
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_SM_ON			0x05B4
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_CAL_DONE		0x05B8
+#define USB3PHY_QSERDES_RXA_IDAC_STATUS_SIGNERROR		0x05BC
+#define USB3PHY_QSERDES_RXA_READ_EQCODE				0x05C0
+#define USB3PHY_QSERDES_RXA_READ_OFFSETCODE			0x05C4
+#define USB3PHY_QSERDES_RXA_IA_ERROR_COUNTER_LOW		0x05C8
+#define USB3PHY_QSERDES_RXA_IA_ERROR_COUNTER_HIGH		0x05CC
+#define USB3PHY_QSERDES_RXA_VGA_READ_CODE			0x05D0
+#define USB3PHY_QSERDES_RXA_DFE_TAP1_READ_CODE			0x05D4
+#define USB3PHY_QSERDES_RXA_DFE_TAP2_READ_CODE			0x05D8
+#define USB3PHY_QSERDES_RXA_ALOG_OBSV_BUS_STATUS_1		0x05DC
+#define USB3PHY_QSERDES_TXB_BIST_MODE_LANENO			0x0600
+#define USB3PHY_QSERDES_TXB_BIST_INVERT				0x0604
+#define USB3PHY_QSERDES_TXB_CLKBUF_ENABLE			0x0608
+#define USB3PHY_QSERDES_TXB_TX_EMP_POST1_LVL			0x060C
+#define USB3PHY_QSERDES_TXB_TX_POST2_EMPH			0x0610
+#define USB3PHY_QSERDES_TXB_TX_BOOST_LVL_UP_DN			0x0614
+#define USB3PHY_QSERDES_TXB_TX_IDLE_LVL_LARGE_AMP		0x0618
+#define USB3PHY_QSERDES_TXB_TX_DRV_LVL				0x061C
+#define USB3PHY_QSERDES_TXB_TX_DRV_LVL_OFFSET			0x0620
+#define USB3PHY_QSERDES_TXB_RESET_TSYNC_EN			0x0624
+#define USB3PHY_QSERDES_TXB_PRE_STALL_LDO_BOOST_EN		0x0628
+#define USB3PHY_QSERDES_TXB_TX_BAND				0x062C
+#define USB3PHY_QSERDES_TXB_SLEW_CNTL				0x0630
+#define USB3PHY_QSERDES_TXB_INTERFACE_SELECT			0x0634
+#define USB3PHY_QSERDES_TXB_LPB_EN				0x0638
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_TX			0x063C
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_RX			0x0640
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX		0x0644
+#define USB3PHY_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX		0x0648
+#define USB3PHY_QSERDES_TXB_PERL_LENGTH1			0x064C
+#define USB3PHY_QSERDES_TXB_PERL_LENGTH2			0x0650
+#define USB3PHY_QSERDES_TXB_SERDES_BYP_EN_OUT			0x0654
+#define USB3PHY_QSERDES_TXB_DEBUG_BUS_SEL			0x0658
+#define USB3PHY_QSERDES_TXB_TRANSCEIVER_BIAS_EN			0x065C
+#define USB3PHY_QSERDES_TXB_HIGHZ_DRVR_EN			0x0660
+#define USB3PHY_QSERDES_TXB_TX_POL_INV				0x0664
+#define USB3PHY_QSERDES_TXB_PARRATE_REC_DETECT_IDLE_EN		0x0668
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN1			0x066C
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN2			0x0670
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN3			0x0674
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN4			0x0678
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN5			0x067C
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN6			0x0680
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN7			0x0684
+#define USB3PHY_QSERDES_TXB_BIST_PATTERN8			0x0688
+#define USB3PHY_QSERDES_TXB_LANE_MODE_1				0x068C
+#define USB3PHY_QSERDES_TXB_LANE_MODE_2				0x0690
+#define USB3PHY_QSERDES_TXB_LANE_MODE_3				0x0694
+#define USB3PHY_QSERDES_TXB_ATB_SEL1				0x0698
+#define USB3PHY_QSERDES_TXB_ATB_SEL2				0x069C
+#define USB3PHY_QSERDES_TXB_RCV_DETECT_LVL			0x06A0
+#define USB3PHY_QSERDES_TXB_RCV_DETECT_LVL_2			0x06A4
+#define USB3PHY_QSERDES_TXB_PRBS_SEED1				0x06A8
+#define USB3PHY_QSERDES_TXB_PRBS_SEED2				0x06AC
+#define USB3PHY_QSERDES_TXB_PRBS_SEED3				0x06B0
+#define USB3PHY_QSERDES_TXB_PRBS_SEED4				0x06B4
+#define USB3PHY_QSERDES_TXB_RESET_GEN				0x06B8
+#define USB3PHY_QSERDES_TXB_RESET_GEN_MUXES			0x06BC
+#define USB3PHY_QSERDES_TXB_TRAN_DRVR_EMP_EN			0x06C0
+#define USB3PHY_QSERDES_TXB_TX_INTERFACE_MODE			0x06C4
+#define USB3PHY_QSERDES_TXB_PWM_CTRL				0x06C8
+#define USB3PHY_QSERDES_TXB_PWM_ENCODED_OR_DATA			0x06CC
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND2		0x06D0
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND2		0x06D4
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND2		0x06D8
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND2		0x06DC
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND0_1		0x06E0
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND0_1		0x06E4
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND0_1		0x06E8
+#define USB3PHY_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND0_1		0x06EC
+#define USB3PHY_QSERDES_TXB_VMODE_CTRL1				0x06F0
+#define USB3PHY_QSERDES_TXB_ALOG_OBSV_BUS_CTRL_1		0x06F4
+#define USB3PHY_QSERDES_TXB_BIST_STATUS				0x06F8
+#define USB3PHY_QSERDES_TXB_BIST_ERROR_COUNT1			0x06FC
+#define USB3PHY_QSERDES_TXB_BIST_ERROR_COUNT2			0x0700
+#define USB3PHY_QSERDES_TXB_ALOG_OBSV_BUS_STATUS_1		0x0704
+#define USB3PHY_QSERDES_TXB_DIG_BKUP_CTRL			0x0708
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN_HALF			0x0800
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN_QUARTER		0x0804
+#define USB3PHY_QSERDES_RXB_UCDR_FO_GAIN			0x0808
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN_HALF			0x080C
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN_QUARTER		0x0810
+#define USB3PHY_QSERDES_RXB_UCDR_SO_GAIN			0x0814
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN_HALF		0x0818
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN_QUARTER		0x081C
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_FO_GAIN			0x0820
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN_HALF		0x0824
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN_QUARTER		0x0828
+#define USB3PHY_QSERDES_RXB_UCDR_SVS_SO_GAIN			0x082C
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN		0x0830
+#define USB3PHY_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE	0x0834
+#define USB3PHY_QSERDES_RXB_UCDR_FO_TO_SO_DELAY			0x0838
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW		0x083C
+#define USB3PHY_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH		0x0840
+#define USB3PHY_QSERDES_RXB_UCDR_PI_CONTROLS			0x0844
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_THRESH1			0x0848
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_THRESH2			0x084C
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_GAIN1			0x0850
+#define USB3PHY_QSERDES_RXB_UCDR_SB2_GAIN2			0x0854
+#define USB3PHY_QSERDES_RXB_AUX_CONTROL				0x0858
+#define USB3PHY_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE		0x085C
+#define USB3PHY_QSERDES_RXB_RCLK_AUXDATA_SEL			0x0860
+#define USB3PHY_QSERDES_RXB_AC_JTAG_ENABLE			0x0864
+#define USB3PHY_QSERDES_RXB_AC_JTAG_INITP			0x0868
+#define USB3PHY_QSERDES_RXB_AC_JTAG_INITN			0x086C
+#define USB3PHY_QSERDES_RXB_AC_JTAG_LVL				0x0870
+#define USB3PHY_QSERDES_RXB_AC_JTAG_MODE			0x0874
+#define USB3PHY_QSERDES_RXB_AC_JTAG_RESET			0x0878
+#define USB3PHY_QSERDES_RXB_RX_TERM_BW				0x087C
+#define USB3PHY_QSERDES_RXB_RX_RCVR_IQ_EN			0x0880
+#define USB3PHY_QSERDES_RXB_RX_IDAC_I_DC_OFFSETS		0x0884
+#define USB3PHY_QSERDES_RXB_RX_IDAC_IBAR_DC_OFFSETS		0x0888
+#define USB3PHY_QSERDES_RXB_RX_IDAC_Q_DC_OFFSETS		0x088C
+#define USB3PHY_QSERDES_RXB_RX_IDAC_QBAR_DC_OFFSETS		0x0890
+#define USB3PHY_QSERDES_RXB_RX_IDAC_A_DC_OFFSETS		0x0894
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ABAR_DC_OFFSETS		0x0898
+#define USB3PHY_QSERDES_RXB_RX_IDAC_EN				0x089C
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ENABLES			0x08A0
+#define USB3PHY_QSERDES_RXB_RX_IDAC_SIGN			0x08A4
+#define USB3PHY_QSERDES_RXB_RX_HIGHZ_HIGHRATE			0x08A8
+#define USB3PHY_QSERDES_RXB_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x08AC
+#define USB3PHY_QSERDES_RXB_DFE_1				0x08B0
+#define USB3PHY_QSERDES_RXB_DFE_2				0x08B4
+#define USB3PHY_QSERDES_RXB_DFE_3				0x08B8
+#define USB3PHY_QSERDES_RXB_VGA_CAL_CNTRL1			0x08BC
+#define USB3PHY_QSERDES_RXB_VGA_CAL_CNTRL2			0x08C0
+#define USB3PHY_QSERDES_RXB_GM_CAL				0x08C4
+#define USB3PHY_QSERDES_RXB_RX_EQ_GAIN2_LSB			0x08C8
+#define USB3PHY_QSERDES_RXB_RX_EQ_GAIN2_MSB			0x08CC
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL1		0x08D0
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2		0x08D4
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3		0x08D8
+#define USB3PHY_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4		0x08DC
+#define USB3PHY_QSERDES_RXB_RX_IDAC_TSETTLE_LOW			0x08E0
+#define USB3PHY_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH		0x08E4
+#define USB3PHY_QSERDES_RXB_RX_IDAC_MEASURE_TIME		0x08E8
+#define USB3PHY_QSERDES_RXB_RX_IDAC_ACCUMULATOR			0x08EC
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_LSB			0x08F0
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_MSB			0x08F4
+#define USB3PHY_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x08F8
+#define USB3PHY_QSERDES_RXB_RX_OFFSET_ADAPTOR_CNTRL2		0x08FC
+#define USB3PHY_QSERDES_RXB_SIGDET_ENABLES			0x0900
+#define USB3PHY_QSERDES_RXB_SIGDET_CNTRL			0x0904
+#define USB3PHY_QSERDES_RXB_SIGDET_LVL				0x0908
+#define USB3PHY_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL		0x090C
+#define USB3PHY_QSERDES_RXB_RX_BAND				0x0910
+#define USB3PHY_QSERDES_RXB_CDR_FREEZE_UP_DN			0x0914
+#define USB3PHY_QSERDES_RXB_CDR_RESET_OVERRIDE			0x0918
+#define USB3PHY_QSERDES_RXB_RX_INTERFACE_MODE			0x091C
+#define USB3PHY_QSERDES_RXB_JITTER_GEN_MODE			0x0920
+#define USB3PHY_QSERDES_RXB_BUJ_AMP				0x0924
+#define USB3PHY_QSERDES_RXB_SJ_AMP1				0x0928
+#define USB3PHY_QSERDES_RXB_SJ_AMP2				0x092C
+#define USB3PHY_QSERDES_RXB_SJ_PER1				0x0930
+#define USB3PHY_QSERDES_RXB_SJ_PER2				0x0934
+#define USB3PHY_QSERDES_RXB_BUJ_STEP_FREQ1			0x0938
+#define USB3PHY_QSERDES_RXB_BUJ_STEP_FREQ2			0x093C
+#define USB3PHY_QSERDES_RXB_PPM_OFFSET1				0x0940
+#define USB3PHY_QSERDES_RXB_PPM_OFFSET2				0x0944
+#define USB3PHY_QSERDES_RXB_SIGN_PPM_PERIOD1			0x0948
+#define USB3PHY_QSERDES_RXB_SIGN_PPM_PERIOD2			0x094C
+#define USB3PHY_QSERDES_RXB_RX_PWM_ENABLE_AND_DATA		0x0950
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR1_TIMEOUT_COUNT		0x0954
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR2_TIMEOUT_COUNT		0x0958
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR3_TIMEOUT_COUNT		0x095C
+#define USB3PHY_QSERDES_RXB_RX_PWM_GEAR4_TIMEOUT_COUNT		0x0960
+#define USB3PHY_QSERDES_RXB_RX_MODE_00				0x0964
+#define USB3PHY_QSERDES_RXB_RX_MODE_01				0x0968
+#define USB3PHY_QSERDES_RXB_RX_MODE_10				0x096C
+#define USB3PHY_QSERDES_RXB_ALOG_OBSV_BUS_CTRL_1		0x0970
+#define USB3PHY_QSERDES_RXB_PI_CTRL1				0x0974
+#define USB3PHY_QSERDES_RXB_PI_CTRL2				0x0978
+#define USB3PHY_QSERDES_RXB_PI_QUAD				0x097C
+#define USB3PHY_QSERDES_RXB_IDATA1				0x0980
+#define USB3PHY_QSERDES_RXB_IDATA2				0x0984
+#define USB3PHY_QSERDES_RXB_AUX_DATA1				0x0988
+#define USB3PHY_QSERDES_RXB_AUX_DATA2				0x098C
+#define USB3PHY_QSERDES_RXB_AC_JTAG_OUTP			0x0990
+#define USB3PHY_QSERDES_RXB_AC_JTAG_OUTN			0x0994
+#define USB3PHY_QSERDES_RXB_RX_SIGDET				0x0998
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_I			0x099C
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_IBAR			0x09A0
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_Q			0x09A4
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_QBAR			0x09A8
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_A			0x09AC
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_ABAR			0x09B0
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_SM_ON			0x09B4
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_CAL_DONE		0x09B8
+#define USB3PHY_QSERDES_RXB_IDAC_STATUS_SIGNERROR		0x09BC
+#define USB3PHY_QSERDES_RXB_READ_EQCODE				0x09C0
+#define USB3PHY_QSERDES_RXB_READ_OFFSETCODE			0x09C4
+#define USB3PHY_QSERDES_RXB_IA_ERROR_COUNTER_LOW		0x09C8
+#define USB3PHY_QSERDES_RXB_IA_ERROR_COUNTER_HIGH		0x09CC
+#define USB3PHY_QSERDES_RXB_VGA_READ_CODE			0x09D0
+#define USB3PHY_QSERDES_RXB_DFE_TAP1_READ_CODE			0x09D4
+#define USB3PHY_QSERDES_RXB_DFE_TAP2_READ_CODE			0x09D8
+#define USB3PHY_QSERDES_RXB_ALOG_OBSV_BUS_STATUS_1		0x09DC
+#define USB3PHY_PCS_MISC_TYPEC_CTRL				0x0A00
+#define USB3PHY_PCS_MISC_TYPEC_STATUS				0x0A04
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE0_INDEX			0x0A08
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE1_INDEX			0x0A0C
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE2_INDEX			0x0A10
+#define USB3PHY_PCS_MISC_DEBUG_BUS_BYTE3_INDEX			0x0A14
+#define USB3PHY_PCS_MISC_PLACEHOLDER_STATUS			0x0A18
+#define USB3PHY_PCS_MISC_DEBUG_BUS_0_STATUS			0x0A1C
+#define USB3PHY_PCS_MISC_DEBUG_BUS_1_STATUS			0x0A20
+#define USB3PHY_PCS_MISC_DEBUG_BUS_2_STATUS			0x0A24
+#define USB3PHY_PCS_MISC_DEBUG_BUS_3_STATUS			0x0A28
+#define USB3PHY_PCS_MISC_BIST_CTRL				0x0A2C
+#define USB3PHY_PCS_SW_RESET					0x0C00
+#define USB3PHY_PCS_POWER_DOWN_CONTROL				0x0C04
+#define USB3PHY_PCS_START_CONTROL				0x0C08
+#define USB3PHY_PCS_TXMGN_V0					0x0C0C
+#define USB3PHY_PCS_TXMGN_V1					0x0C10
+#define USB3PHY_PCS_TXMGN_V2					0x0C14
+#define USB3PHY_PCS_TXMGN_V3					0x0C18
+#define USB3PHY_PCS_TXMGN_V4					0x0C1C
+#define USB3PHY_PCS_TXMGN_LS					0x0C20
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V0				0x0C24
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V0				0x0C28
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V1				0x0C2C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V1				0x0C30
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V2				0x0C34
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V2				0x0C38
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V3				0x0C3C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V3				0x0C40
+#define USB3PHY_PCS_TXDEEMPH_M6DB_V4				0x0C44
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_V4				0x0C48
+#define USB3PHY_PCS_TXDEEMPH_M6DB_LS				0x0C4C
+#define USB3PHY_PCS_TXDEEMPH_M3P5DB_LS				0x0C50
+#define USB3PHY_PCS_ENDPOINT_REFCLK_DRIVE			0x0C54
+#define USB3PHY_PCS_RX_IDLE_DTCT_CNTRL				0x0C58
+#define USB3PHY_PCS_RATE_SLEW_CNTRL				0x0C5C
+#define USB3PHY_PCS_POWER_STATE_CONFIG1				0x0C60
+#define USB3PHY_PCS_POWER_STATE_CONFIG2				0x0C64
+#define USB3PHY_PCS_POWER_STATE_CONFIG3				0x0C68
+#define USB3PHY_PCS_POWER_STATE_CONFIG4				0x0C6C
+#define USB3PHY_PCS_RCVR_DTCT_DLY_P1U2_L			0x0C70
+#define USB3PHY_PCS_RCVR_DTCT_DLY_P1U2_H			0x0C74
+#define USB3PHY_PCS_RCVR_DTCT_DLY_U3_L				0x0C78
+#define USB3PHY_PCS_RCVR_DTCT_DLY_U3_H				0x0C7C
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG1				0x0C80
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG2				0x0C84
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG3				0x0C88
+#define USB3PHY_PCS_TSYNC_RSYNC_TIME				0x0C8C
+#define USB3PHY_PCS_SIGDET_LOW_2_IDLE_TIME			0x0C90
+#define USB3PHY_PCS_BEACON_2_IDLE_TIME_L			0x0C94
+#define USB3PHY_PCS_BEACON_2_IDLE_TIME_H			0x0C98
+#define USB3PHY_PCS_PWRUP_RESET_DLY_TIME_SYSCLK			0x0C9C
+#define USB3PHY_PCS_PWRUP_RESET_DLY_TIME_AUXCLK			0x0CA0
+#define USB3PHY_PCS_LP_WAKEUP_DLY_TIME_AUXCLK			0x0CA4
+#define USB3PHY_PCS_PLL_LOCK_CHK_DLY_TIME			0x0CA8
+#define USB3PHY_PCS_LFPS_DET_HIGH_COUNT_VAL			0x0CAC
+#define USB3PHY_PCS_LFPS_TX_ECSTART_EQTLOCK			0x0CB0
+#define USB3PHY_PCS_LFPS_TX_END_CNT_P2U3_START			0x0CB4
+#define USB3PHY_PCS_RXEQTRAINING_WAIT_TIME			0x0CB8
+#define USB3PHY_PCS_RXEQTRAINING_RUN_TIME			0x0CBC
+#define USB3PHY_PCS_TXONESZEROS_RUN_LENGTH			0x0CC0
+#define USB3PHY_PCS_FLL_CNTRL1					0x0CC4
+#define USB3PHY_PCS_FLL_CNTRL2					0x0CC8
+#define USB3PHY_PCS_FLL_CNT_VAL_L				0x0CCC
+#define USB3PHY_PCS_FLL_CNT_VAL_H_TOL				0x0CD0
+#define USB3PHY_PCS_FLL_MAN_CODE				0x0CD4
+#define USB3PHY_PCS_AUTONOMOUS_MODE_CTRL			0x0CD8
+#define USB3PHY_PCS_LFPS_RXTERM_IRQ_CLEAR			0x0CDC
+#define USB3PHY_PCS_ARCVR_DTCT_EN_PERIOD			0x0CE0
+#define USB3PHY_PCS_ARCVR_DTCT_CM_DLY				0x0CE4
+#define USB3PHY_PCS_ALFPS_DEGLITCH_VAL				0x0CE8
+#define USB3PHY_PCS_INSIG_SW_CTRL1				0x0CEC
+#define USB3PHY_PCS_INSIG_SW_CTRL2				0x0CF0
+#define USB3PHY_PCS_INSIG_SW_CTRL3				0x0CF4
+#define USB3PHY_PCS_INSIG_MX_CTRL1				0x0CF8
+#define USB3PHY_PCS_INSIG_MX_CTRL2				0x0CFC
+#define USB3PHY_PCS_INSIG_MX_CTRL3				0x0D00
+#define USB3PHY_PCS_OUTSIG_SW_CTRL1				0x0D04
+#define USB3PHY_PCS_OUTSIG_MX_CTRL1				0x0D08
+#define USB3PHY_PCS_CLK_DEBUG_BYPASS_CTRL			0x0D0C
+#define USB3PHY_PCS_TEST_CONTROL				0x0D10
+#define USB3PHY_PCS_TEST_CONTROL2				0x0D14
+#define USB3PHY_PCS_TEST_CONTROL3				0x0D18
+#define USB3PHY_PCS_TEST_CONTROL4				0x0D1C
+#define USB3PHY_PCS_TEST_CONTROL5				0x0D20
+#define USB3PHY_PCS_TEST_CONTROL6				0x0D24
+#define USB3PHY_PCS_TEST_CONTROL7				0x0D28
+#define USB3PHY_PCS_COM_RESET_CONTROL				0x0D2C
+#define USB3PHY_PCS_BIST_CTRL					0x0D30
+#define USB3PHY_PCS_PRBS_POLY0					0x0D34
+#define USB3PHY_PCS_PRBS_POLY1					0x0D38
+#define USB3PHY_PCS_PRBS_SEED0					0x0D3C
+#define USB3PHY_PCS_PRBS_SEED1					0x0D40
+#define USB3PHY_PCS_FIXED_PAT_CTRL				0x0D44
+#define USB3PHY_PCS_FIXED_PAT0					0x0D48
+#define USB3PHY_PCS_FIXED_PAT1					0x0D4C
+#define USB3PHY_PCS_FIXED_PAT2					0x0D50
+#define USB3PHY_PCS_FIXED_PAT3					0x0D54
+#define USB3PHY_PCS_COM_CLK_SWITCH_CTRL				0x0D58
+#define USB3PHY_PCS_ELECIDLE_DLY_SEL				0x0D5C
+#define USB3PHY_PCS_SPARE1					0x0D60
+#define USB3PHY_PCS_BIST_CHK_ERR_CNT_L_STATUS			0x0D64
+#define USB3PHY_PCS_BIST_CHK_ERR_CNT_H_STATUS			0x0D68
+#define USB3PHY_PCS_BIST_CHK_STATUS				0x0D6C
+#define USB3PHY_PCS_LFPS_RXTERM_IRQ_SOURCE_STATUS		0x0D70
+#define USB3PHY_PCS_PCS_STATUS					0x0D74
+#define USB3PHY_PCS_PCS_STATUS2					0x0D78
+#define USB3PHY_PCS_PCS_STATUS3					0x0D7C
+#define USB3PHY_PCS_COM_RESET_STATUS				0x0D80
+#define USB3PHY_PCS_OSC_DTCT_STATUS				0x0D84
+#define USB3PHY_PCS_REVISION_ID0				0x0D88
+#define USB3PHY_PCS_REVISION_ID1				0x0D8C
+#define USB3PHY_PCS_REVISION_ID2				0x0D90
+#define USB3PHY_PCS_REVISION_ID3				0x0D94
+#define USB3PHY_PCS_DEBUG_BUS_0_STATUS				0x0D98
+#define USB3PHY_PCS_DEBUG_BUS_1_STATUS				0x0D9C
+#define USB3PHY_PCS_DEBUG_BUS_2_STATUS				0x0DA0
+#define USB3PHY_PCS_DEBUG_BUS_3_STATUS				0x0DA4
+#define USB3PHY_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB		0x0DA8
+#define USB3PHY_PCS_OSC_DTCT_ACTIONS				0x0DAC
+#define USB3PHY_PCS_SIGDET_CNTRL				0x0DB0
+#define USB3PHY_PCS_IDAC_CAL_CNTRL				0x0DB4
+#define USB3PHY_PCS_CMN_ACK_OUT_SEL				0x0DB8
+#define USB3PHY_PCS_PLL_LOCK_CHK_DLY_TIME_SYSCLK		0x0DBC
+#define USB3PHY_PCS_AUTONOMOUS_MODE_STATUS			0x0DC0
+#define USB3PHY_PCS_ENDPOINT_REFCLK_CNTRL			0x0DC4
+#define USB3PHY_PCS_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK		0x0DC8
+#define USB3PHY_PCS_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK		0x0DCC
+#define USB3PHY_PCS_EPCLK_DLY_COUNT_VAL_L			0x0DD0
+#define USB3PHY_PCS_EPCLK_DLY_COUNT_VAL_H			0x0DD4
+#define USB3PHY_PCS_RX_SIGDET_LVL				0x0DD8
+#define USB3PHY_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB		0x0DDC
+#define USB3PHY_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB		0x0DE0
+#define USB3PHY_PCS_AUTONOMOUS_MODE_CTRL2			0x0DE4
+#define USB3PHY_PCS_RXTERMINATION_DLY_SEL			0x0DE8
+#define USB3PHY_PCS_LFPS_PER_TIMER_VAL				0x0DEC
+#define USB3PHY_PCS_SIGDET_STARTUP_TIMER_VAL			0x0DF0
+#define USB3PHY_PCS_LOCK_DETECT_CONFIG4				0x0DF4
+#define USB3PHY_PCS_RX_SIGDET_DTCT_CNTRL			0x0DF8
+#define USB3PHY_PCS_PCS_STATUS4					0x0DFC
+#define USB3PHY_PCS_PCS_STATUS4_CLEAR				0x0E00
+#define USB3PHY_PCS_DEC_ERROR_COUNT_STATUS			0x0E04
+#define USB3PHY_PCS_COMMA_POS_STATUS				0x0E08
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG1				0x0E0C
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG2				0x0E10
+#define USB3PHY_PCS_REFGEN_REQ_CONFIG3				0x0E14
+
+#endif /* _DT_BINDINGS_PHY_QCOM_11NM_QMP_COMBO_USB_H */
diff --git a/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h b/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h
new file mode 100644
index 0000000..c50659e
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpm-smd-regulator.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015, 2017, 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef __QCOM_RPM_SMD_REGULATOR_H
+#define __QCOM_RPM_SMD_REGULATOR_H
+
+#define RPM_SMD_REGULATOR_LEVEL_NONE		0
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION	16
+#define RPM_SMD_REGULATOR_LEVEL_RETENTION_PLUS	32
+#define RPM_SMD_REGULATOR_LEVEL_MIN_SVS		48
+#define RPM_SMD_REGULATOR_LEVEL_LOW_SVS		64
+#define RPM_SMD_REGULATOR_LEVEL_SVS		128
+#define RPM_SMD_REGULATOR_LEVEL_SVS_PLUS	192
+#define RPM_SMD_REGULATOR_LEVEL_NOM		256
+#define RPM_SMD_REGULATOR_LEVEL_NOM_PLUS	320
+#define RPM_SMD_REGULATOR_LEVEL_TURBO		384
+#define RPM_SMD_REGULATOR_LEVEL_TURBO_NO_CPR	416
+#define RPM_SMD_REGULATOR_LEVEL_BINNING		512
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 90ac450..561fefc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu);
 void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
 
 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)	((k)->arch.vgic.initialized)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b4d23b3..59a416d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1291,10 +1291,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
 #endif
 
 #ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
 int find_acpi_cpu_topology(unsigned int cpu, int level);
 int find_acpi_cpu_topology_package(unsigned int cpu);
 int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
 #else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+	return -EINVAL;
+}
 static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
 {
 	return -EINVAL;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0801ef9..efa15cf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -435,6 +435,7 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
 extern void bio_put(struct bio *);
 
+extern void bio_clone_crypt_key(struct bio *dst, const struct bio *src);
 extern void __bio_clone_fast(struct bio *, struct bio *);
 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1da59c1..2885dce 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -114,6 +114,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
 typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef void (cleanup_rq_fn)(struct request *);
 
 
 struct blk_mq_ops {
@@ -165,6 +166,12 @@ struct blk_mq_ops {
 	/* Called from inside blk_get_request() */
 	void (*initialize_rq_fn)(struct request *rq);
 
+	/*
+	 * Called before freeing one request which isn't completed yet,
+	 * and usually for freeing the driver private data
+	 */
+	cleanup_rq_fn		*cleanup_rq;
+
 	map_queues_fn		*map_queues;
 
 #ifdef CONFIG_BLK_DEBUG_FS
@@ -324,4 +331,10 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
+static inline void blk_mq_cleanup_rq(struct request *rq)
+{
+	if (rq->q->mq_ops->cleanup_rq)
+		rq->q->mq_ops->cleanup_rq(rq);
+}
+
 #endif
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 71aef72..6fb7f29 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
 
 #else	/* !CONFIG_GENERIC_BUG */
 
+static inline void *find_bug(unsigned long bugaddr)
+{
+	return NULL;
+}
+
 static inline enum bug_trap_type report_bug(unsigned long bug_addr,
 					    struct pt_regs *regs)
 {
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb2..11cdc7c 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
 
 static inline void ceph_buffer_put(struct ceph_buffer *b)
 {
-	kref_put(&b->kref, ceph_buffer_release);
+	if (b)
+		kref_put(&b->kref, ceph_buffer_release);
 }
 
 extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 37d9611..6e44890 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -429,6 +429,7 @@ static inline void cpufreq_resume(void) {}
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST			(0)
 #define CPUFREQ_NOTIFY			(1)
+#define CPUFREQ_INCOMPATIBLE	(6)
 
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 707235f..2a73593 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -99,6 +99,7 @@ enum cpuhp_state {
 	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_RCUTREE_DYING,
 	CPUHP_AP_IRQ_GIC_STARTING,
+	CPUHP_AP_EDAC_PMU_STARTING,
 	CPUHP_AP_IRQ_HIP04_STARTING,
 	CPUHP_AP_IRQ_ARMADA_XP_STARTING,
 	CPUHP_AP_IRQ_BCM2836_STARTING,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 659164d..2f3d54e 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -62,7 +62,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
 					    struct request *rq,
 					    union map_info *map_context,
 					    struct request **clone);
-typedef void (*dm_release_clone_request_fn) (struct request *clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone,
+					     union map_info *map_context);
 
 /*
  * Returns:
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 3b908e5..2cf6fa6 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -142,7 +142,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xCAA
+#define APPS_EVENT_LAST_ID		0xCB4
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			130
@@ -916,7 +916,7 @@ static const uint32_t msg_bld_masks_25[] = {
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x1C9A,	/* EQUIP ID 1 */
+	0x1CB2,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a2bf4a6b..d10cc29 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -119,6 +119,7 @@ struct elevator_mq_ops {
 	struct request *(*next_request)(struct request_queue *, struct request *);
 	void (*init_icq)(struct io_cq *);
 	void (*exit_icq)(struct io_cq *);
+	void (*elevator_registered_fn)(struct request_queue *q);
 };
 
 #define ELV_NAME_MAX	(16)
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index aa027f7..cd649b1 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -83,6 +83,9 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
 	struct em_cap_state *cs;
 	int i, cpu;
 
+	if (!sum_util)
+		return 0;
+
 	/*
 	 * In order to predict the capacity state, map the utilization of the
 	 * most utilized CPU of the performance domain to a requested frequency,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3705c6f..89557544 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -466,7 +466,12 @@ struct sock_fprog_kern {
 	struct sock_filter	*filter;
 };
 
+#define BPF_BINARY_HEADER_MAGIC	0x05de0e82
+
 struct bpf_binary_header {
+#ifdef CONFIG_CFI_CLANG
+	u32 magic;
+#endif
 	u32 pages;
 	/* Some arches need word alignment for their instructions */
 	u8 image[] __aligned(4);
@@ -506,7 +511,62 @@ struct sk_filter {
 	struct bpf_prog	*prog;
 };
 
-#define BPF_PROG_RUN(filter, ctx)  (*(filter)->bpf_func)(ctx, (filter)->insnsi)
+#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
+/*
+ * With JIT, the kernel makes an indirect call to dynamically generated
+ * code. Use bpf_call_func to perform additional validation of the call
+ * target to narrow down attack surface. Architectures implementing BPF
+ * JIT can override arch_bpf_jit_check_func for arch-specific checking.
+ */
+extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
+
+static inline unsigned int __bpf_call_func(const struct bpf_prog *prog,
+					   const void *ctx)
+{
+	/* Call interpreter with CFI checking. */
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline struct bpf_binary_header *
+bpf_jit_binary_hdr(const struct bpf_prog *fp);
+
+static inline unsigned int __nocfi bpf_call_func(const struct bpf_prog *prog,
+						 const void *ctx)
+{
+	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
+
+	if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
+		return __bpf_call_func(prog, ctx);
+
+	/*
+	 * We are about to call dynamically generated code. Check that the
+	 * page has bpf_binary_header with a valid magic to limit possible
+	 * call targets.
+	 */
+	BUG_ON(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
+		!arch_bpf_jit_check_func(prog));
+
+	/* Call jited function without CFI checking. */
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
+{
+	hdr->magic = BPF_BINARY_HEADER_MAGIC;
+}
+#else
+static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
+					 const void *ctx)
+{
+	return prog->bpf_func(ctx, prog->insnsi);
+}
+
+static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
+{
+}
+#endif
+
+#define BPF_PROG_RUN(filter, ctx)  bpf_call_func(filter, ctx)
 
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 39745b8..b3115d1 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq)
 	return -EINVAL;
 }
 
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
-		       unsigned int gpio_offset, unsigned int pin_offset,
-		       unsigned int npins)
-{
-	WARN_ON(1);
-	return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
-			struct pinctrl_dev *pctldev,
-			unsigned int gpio_offset, const char *pin_group)
-{
-	WARN_ON(1);
-	return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-	WARN_ON(1);
-}
-
 static inline int devm_gpio_request(struct device *dev, unsigned gpio,
 				    const char *label)
 {
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index acc4279..412098b 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -222,7 +222,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
 	might_sleep();
 
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 
 static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -230,7 +230,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
 	might_sleep();
 
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(descs);
 }
 
 static inline struct gpio_desc *__must_check
@@ -283,7 +283,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
 	might_sleep();
 
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 
 static inline void devm_gpiod_put_array(struct device *dev,
@@ -292,32 +292,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
 	might_sleep();
 
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(descs);
 }
 
 
 static inline int gpiod_get_direction(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 static inline int gpiod_direction_input(struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 
@@ -325,7 +325,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 static inline int gpiod_get_value(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 static inline int gpiod_get_array_value(unsigned int array_size,
@@ -333,25 +333,25 @@ static inline int gpiod_get_array_value(unsigned int array_size,
 					int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 static inline void gpiod_set_value(struct gpio_desc *desc, int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 static inline void gpiod_set_array_value(unsigned int array_size,
 					 struct gpio_desc **desc_array,
 					 int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 }
 static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -359,27 +359,27 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
 					    int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value(unsigned int array_size,
 					     struct gpio_desc **desc_array,
 					     int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 
 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -387,25 +387,25 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
 				     int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
 					    struct gpio_desc **desc_array,
 					    int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 }
 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -413,55 +413,55 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
 					       int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
 						int value)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
 						struct gpio_desc **desc_array,
 						int *value_array)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc_array);
 	return 0;
 }
 
 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 
 static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -ENOSYS;
 }
 
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 static inline int gpiod_cansleep(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return 0;
 }
 
 static inline int gpiod_to_irq(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -EINVAL;
 }
 
@@ -469,7 +469,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
 					  const char *name)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -EINVAL;
 }
 
@@ -481,7 +481,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
 static inline int desc_to_gpio(const struct gpio_desc *desc)
 {
 	/* GPIO can never have been requested */
-	WARN_ON(1);
+	WARN_ON(desc);
 	return -EINVAL;
 }
 
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8b3e5e8..d44a783 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -414,7 +414,6 @@ struct hid_global {
 
 struct hid_local {
 	unsigned usage[HID_MAX_USAGES]; /* usage array */
-	u8 usage_size[HID_MAX_USAGES]; /* usage size array */
 	unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
 	unsigned usage_index;
 	unsigned usage_minimum;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9493d4a..8fde789 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -225,7 +225,7 @@ enum hwmon_power_attributes {
 #define HWMON_P_LABEL			BIT(hwmon_power_label)
 #define HWMON_P_ALARM			BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM		BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM		BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM		BIT(hwmon_power_min_alarm)
 #define HWMON_P_MAX_ALARM		BIT(hwmon_power_max_alarm)
 #define HWMON_P_LCRIT_ALARM		BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM		BIT(hwmon_power_crit_alarm)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index da3a837..c834782 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -3189,16 +3189,16 @@ struct element {
 	u8 id;
 	u8 datalen;
 	u8 data[];
-};
+} __packed;
 
 /* element iteration helpers */
-#define for_each_element(element, _data, _datalen)			\
-	for (element = (void *)(_data);					\
-	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
-		sizeof(*element) &&					\
-	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
-		sizeof(*element) + element->datalen;			\
-	     element = (void *)(element->data + element->datalen))
+#define for_each_element(_elem, _data, _datalen)			\
+	for (_elem = (const struct element *)(_data);			\
+	     (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=	\
+		(int)sizeof(*_elem) &&					\
+	     (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=	\
+		(int)sizeof(*_elem) + _elem->datalen;			\
+	     _elem = (const struct element *)(_elem->data + _elem->datalen))
 
 #define for_each_element_id(element, _id, data, datalen)		\
 	for_each_element(element, data, datalen)			\
@@ -3235,7 +3235,7 @@ struct element {
 static inline bool for_each_element_completed(const struct element *element,
 					      const void *data, size_t datalen)
 {
-	return (u8 *)element == (u8 *)data + datalen;
+	return (const u8 *)element == (const u8 *)data + datalen;
 }
 
 #endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index 847e853..a223531 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -314,6 +314,8 @@ void __init parse_early_options(char *cmdline);
 /* Data marked not to be saved by software suspend */
 #define __nosavedata __section(.data..nosave)
 
+#define __rticdata  __attribute__((section(".bss.rtic")))
+
 #ifdef MODULE
 #define __exit_p(x) x
 #else
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 33fd4e9..dcab89e 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -13,6 +13,9 @@
 #include "linux/msm_gsi.h"
 
 #define IPA_APPS_MAX_BW_IN_MBPS 700
+#define IPA_BW_THRESHOLD_MAX 3
+
+#define IPA_MAX_CH_STATS_SUPPORTED 5
 /**
  * enum ipa_transport_type
  * transport type: either GSI or SPS
@@ -501,10 +504,13 @@ typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
  *			use ipa_get_wdi_sap_stats structure
  * @IPA_SET_WIFI_QUOTA: set quota limit on STA -
  *			use ipa_set_wifi_quota structure
+ * @IPA_SET_WLAN_BW: set wlan BW -
+ *			use ipa_set_wlan_bw structure
  */
 enum ipa_wdi_meter_evt_type {
 	IPA_GET_WDI_SAP_STATS,
 	IPA_SET_WIFI_QUOTA,
+	IPA_INFORM_WLAN_BW,
 };
 
 struct ipa_get_wdi_sap_stats {
@@ -540,6 +546,19 @@ struct ipa_set_wifi_quota {
 	uint8_t set_valid;
 };
 
+/**
+ * struct ipa_inform_wlan_bw - structure used for
+ *                                   IPA_INFORM_WLAN_BW.
+ *
+ * @index:       Indicate which bw-index hit
+ * @throughput:  throughput usage
+ *
+ */
+struct ipa_inform_wlan_bw {
+	uint8_t  index;
+	uint64_t throughput;
+};
+
 typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
 		       void *data);
 
@@ -893,6 +912,33 @@ struct IpaHwBamStats_t {
 } __packed;
 
 /**
+ * struct IpaOffloadStatschannel_info - channel info for uC
+ * stats
+ * @dir: Direction of the channel ID DIR_CONSUMER =0,
+ * DIR_PRODUCER = 1
+ * @ch_id: GSI ch_id of the IPA endpoint for which stats need
+ * to be calculated, 0xFF means invalid channel or disable stats
+ * on already stats enabled channel
+ */
+struct IpaOffloadStatschannel_info {
+	u8 dir;
+	u8 ch_id;
+} __packed;
+
+/**
+ * struct IpaHwOffloadStatsAllocCmdData_t - protocol info for uC
+ * stats start
+ * @protocol: Enum that indicates the protocol type
+ * @ch_id_info: GSI ch_id and dir of the IPA endpoint for which stats
+ * need to be calculated
+ */
+struct IpaHwOffloadStatsAllocCmdData_t {
+	u32 protocol;
+	struct IpaOffloadStatschannel_info
+		ch_id_info[IPA_MAX_CH_STATS_SUPPORTED];
+} __packed;
+
+/**
  * struct IpaHwRingStats_t - Structure holding the Ring statistics
  *
  * @ringFull : Number of times Transfer Ring got full - For In Ch: Good,
@@ -913,6 +959,17 @@ struct IpaHwRingStats_t {
 } __packed;
 
 /**
+ * struct ipa_uc_dbg_ring_stats - uC dbg stats info for each
+ * offloading protocol
+ * @ring: ring stats for each channel
+ * @ch_num: number of ch supported for given protocol
+ */
+struct ipa_uc_dbg_ring_stats {
+	struct IpaHwRingStats_t ring[IPA_MAX_CH_STATS_SUPPORTED];
+	u8 num_ch;
+};
+
+/**
  * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel
  * structures
  *
@@ -1154,6 +1211,32 @@ struct ipa_wdi_buffer_info {
 };
 
 /**
+ * struct  ipa_wdi_bw_info - address info of a WLAN allocated buffer
+ * @threshold: throughput wants to be monitored
+ * @num: number of threshold entries
+ * @stop: true to stop monitoring
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_bw_info {
+	uint64_t threshold[IPA_BW_THRESHOLD_MAX];
+	int num;
+	bool stop;
+};
+
+/**
+ * struct  ipa_wdi_tx_info - sw tx info from WLAN
+ * @sta_tx: sw tx stats on sta interface
+ * @ap_tx: sw tx stats on ap interface
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_tx_info {
+	uint64_t sta_tx;
+	uint64_t ap_tx;
+};
+
+/**
  * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
  *
  * @ipa_ep_num: IPA EP pipe number
@@ -1421,6 +1504,8 @@ int ipa_disable_wdi_pipe(u32 clnt_hdl);
 int ipa_resume_wdi_pipe(u32 clnt_hdl);
 int ipa_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
 u16 ipa_get_smem_restr_bytes(void);
 int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
 		uint64_t num_bytes);
@@ -1568,6 +1653,13 @@ enum ipa_transport_type ipa_get_transport_type(void);
 struct device *ipa_get_dma_dev(void);
 struct iommu_domain *ipa_get_smmu_domain(void);
 
+int ipa_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
+int ipa_uc_debug_stats_dealloc(uint32_t protocol);
+void ipa_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats);
+int ipa_get_prot_id(enum ipa_client_type client);
+
 int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count);
 
 const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info
@@ -1625,6 +1717,11 @@ int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
  * Returns: 0 on success, negative on failure
  */
 int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+
+/**
+ * ipa_get_lan_rx_napi - returns true if NAPI is enabled in the LAN RX dp
+ */
+bool ipa_get_lan_rx_napi(void);
 #else /* (CONFIG_IPA || CONFIG_IPA3) */
 
 /*
@@ -2357,6 +2454,16 @@ static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return -EPERM;
 }
 
+static inline int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	return -EPERM;
+}
+
 static inline int ipa_get_ep_mapping(enum ipa_client_type client)
 {
 	return -EPERM;
@@ -2468,6 +2575,32 @@ static inline int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
 {
 	return -EPERM;
 }
+
+static inline int ipa_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
+{
+	return -EPERM;
+}
+static inline int ipa_uc_debug_stats_dealloc(uint32_t protocol)
+{
+	return -EPERM;
+}
+
+static inline void ipa_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats)
+{
+}
+
+static inline int ipa_get_prot_id(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_get_lan_rx_napi(void)
+{
+	return false;
+}
+
 #endif /* (CONFIG_IPA || CONFIG_IPA3) */
 
 #endif /* _IPA_H_ */
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index fed0082..c1497c5 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -347,6 +347,29 @@ int ipa_wdi_release_smmu_mapping(u32 num_buffers,
  */
 int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats);
 
+
+/**
+ * ipa_wdi_bw_monitor() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info);
+
+/**
+ * ipa_wdi_sw_stats() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info);
+
 #else /* (CONFIG_IPA || CONFIG_IPA3) */
 
 static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
@@ -415,6 +438,16 @@ static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
 	return -EPERM;
 }
 
+static inline int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return -EPERM;
+}
+
 #endif /* CONFIG_IPA3 */
 
 #endif /* _IPA_WDI3_H_ */
diff --git a/include/linux/ipa_wigig.h b/include/linux/ipa_wigig.h
index a9fac9e..4a261d7 100644
--- a/include/linux/ipa_wigig.h
+++ b/include/linux/ipa_wigig.h
@@ -39,10 +39,12 @@ struct ipa_wigig_init_in_params {
  *
  * @is_uC_ready: is uC ready. No API should be called until uC is ready.
  * @uc_db_pa: physical address of IPA uC doorbell
+ * @lan_rx_napi_enable: if we use NAPI in the LAN rx
  */
 struct ipa_wigig_init_out_params {
 	bool is_uc_ready;
 	phys_addr_t uc_db_pa;
+	bool lan_rx_napi_enable;
 };
 
 /*
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index cbd9d849..88e1e63 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
 			resource_size_t hw_addr, resource_size_t size);
 int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
 resource_size_t logic_pio_to_hwaddr(unsigned long pio);
 unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
 
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 1c4b37c..83768b3 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -38,7 +38,7 @@ enum MHI_CB {
 };
 
 /**
- * enum MHI_DEBUG_LEVL - various debugging level
+ * enum MHI_DEBUG_LEVEL - various debugging level
  */
 enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_VERBOSE,
@@ -46,6 +46,7 @@ enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_ERROR,
 	MHI_MSG_LVL_CRITICAL,
 	MHI_MSG_LVL_MASK_ALL,
+	MHI_MSG_LVL_MAX,
 };
 
 /**
@@ -138,6 +139,52 @@ struct image_info {
 	u32 entries;
 };
 
+/* rddm header info */
+
+#define MAX_RDDM_TABLE_SIZE 6
+
+/**
+ * struct rddm_table_info - rddm table info
+ * @base_address - Start offset of the file
+ * @actual_phys_address - phys addr offset of file
+ * @size - size of file
+ * @description - file description
+ * @file_name - name of file
+ */
+struct rddm_table_info {
+	u64 base_address;
+	u64 actual_phys_address;
+	u64 size;
+	char description[20];
+	char file_name[20];
+};
+
+/**
+ * struct rddm_header - rddm header
+ * @version - header ver
+ * @header_size - size of header
+ * @rddm_table_info - array of rddm table info
+ */
+struct rddm_header {
+	u32 version;
+	u32 header_size;
+	struct rddm_table_info table_info[MAX_RDDM_TABLE_SIZE];
+};
+
+/**
+ * struct file_info - keeping track of file info while traversing the rddm
+ * table header
+ * @file_offset - current file offset
+ * @seg_idx - mhi buf seg array index
+ * @rem_seg_len - remaining length of the segment containing current file
+ */
+struct file_info {
+	u8 *file_offset;
+	u32 file_size;
+	u32 seg_idx;
+	u32 rem_seg_len;
+};
+
 /**
  * struct mhi_controller - Master controller structure for external modem
  * @dev: Device associated with this controller
@@ -252,6 +299,7 @@ struct mhi_controller {
 	struct mhi_ctxt *mhi_ctxt;
 
 	u32 timeout_ms;
+	u32 m2_timeout_ms; /* wait time for host to continue suspend after m2 */
 
 	/* caller should grab pm_mutex for suspend/resume operations */
 	struct mutex pm_mutex;
@@ -265,6 +313,7 @@ struct mhi_controller {
 	enum mhi_dev_state dev_state;
 	enum mhi_dev_state saved_dev_state;
 	bool wake_set;
+	bool ignore_override;
 	atomic_t dev_wake;
 	atomic_t alloc_size;
 	atomic_t pending_pkts;
@@ -281,7 +330,6 @@ struct mhi_controller {
 	/* worker for different state transitions */
 	struct work_struct st_worker;
 	struct work_struct fw_worker;
-	struct work_struct syserr_worker;
 	struct work_struct low_priority_worker;
 	wait_queue_head_t state_event;
 
@@ -325,6 +373,7 @@ struct mhi_controller {
 	enum MHI_DEBUG_LEVEL log_lvl;
 
 	/* controller specific data */
+	bool power_down;
 	void *priv_data;
 	void *log_buf;
 	struct dentry *dentry;
@@ -688,6 +737,12 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
 
 /**
+ * mhi_dump_sfr - Print SFR string from RDDM table.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_dump_sfr(struct mhi_controller *mhi_cntrl);
+
+/**
  * mhi_get_remote_time_sync - Get external soc time relative to local soc time
  * using MMIO method.
  * @mhi_dev: Device associated with the channels
@@ -786,7 +841,12 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
 
 #else
 
-#define MHI_VERB(fmt, ...)
+#define MHI_VERB(fmt, ...) do { \
+		if (mhi_cntrl->log_buf && \
+		    (mhi_cntrl->log_lvl <= MHI_MSG_LVL_VERBOSE)) \
+			ipc_log_string(mhi_cntrl->log_buf, "[D][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+} while (0)
 
 #endif
 
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index a28262b..3387294 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -692,6 +692,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
 
 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
 
+/*
+ * May be called from host driver's system/runtime suspend/resume callbacks,
+ * to know if SDIO IRQs has been claimed.
+ */
+static inline bool sdio_irq_claimed(struct mmc_host *host)
+{
+	return host->sdio_irqs > 0;
+}
+
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
 	host->ops->enable_sdio_irq(host, 0);
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
index 2b6ea00..9091e52 100644
--- a/include/linux/msm_adreno_devfreq.h
+++ b/include/linux/msm_adreno_devfreq.h
@@ -56,6 +56,7 @@ struct devfreq_msm_adreno_tz_data {
 		s32 *p_down;
 		unsigned int *index;
 		uint64_t *ib;
+		bool floating;
 	} bus;
 	unsigned int device_id;
 	bool is_64;
diff --git a/include/linux/msm_drm_notify.h b/include/linux/msm_drm_notify.h
deleted file mode 100644
index 67bb67b..0000000
--- a/include/linux/msm_drm_notify.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
- */
-#ifndef _MSM_DRM_NOTIFY_H_
-#define _MSM_DRM_NOTIFY_H_
-
-#include <linux/notifier.h>
-
-/* A hardware display blank change occurred */
-#define MSM_DRM_EVENT_BLANK			0x01
-/* A hardware display blank early change occurred */
-#define MSM_DRM_EARLY_EVENT_BLANK		0x02
-
-enum {
-	/* panel: power on */
-	MSM_DRM_BLANK_UNBLANK,
-	/* panel: power off */
-	MSM_DRM_BLANK_POWERDOWN,
-};
-
-enum msm_drm_display_id {
-	/* primary display */
-	MSM_DRM_PRIMARY_DISPLAY,
-	/* external display */
-	MSM_DRM_EXTERNAL_DISPLAY,
-	MSM_DRM_DISPLAY_MAX
-};
-
-struct msm_drm_notifier {
-	enum msm_drm_display_id id;
-	void *data;
-};
-
-#ifdef CONFIG_DRM_MSM
-int msm_drm_register_client(struct notifier_block *nb);
-int msm_drm_unregister_client(struct notifier_block *nb);
-#else
-static inline int msm_drm_register_client(struct notifier_block *nb)
-{
-	return 0;
-}
-
-static inline int msm_drm_unregister_client(struct notifier_block *nb)
-{
-	return 0;
-}
-#endif
-#endif
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index e27572d..ad69430 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
 	list_add_tail(&req->wb_list, head);
 }
 
+/**
+ * nfs_list_move_request - Move a request to a new list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_move_request(struct nfs_page *req, struct list_head *head)
+{
+	list_move_tail(&req->wb_list, head);
+}
 
 /**
  * nfs_list_remove_request - Remove a request from its wb_list
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bd1c889..cab24a1 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1539,7 +1539,7 @@ struct nfs_commit_data {
 };
 
 struct nfs_pgio_completion_ops {
-	void	(*error_cleanup)(struct list_head *head);
+	void	(*error_cleanup)(struct list_head *head, int);
 	void	(*init_hdr)(struct nfs_pgio_header *hdr);
 	void	(*completion)(struct nfs_pgio_header *hdr);
 	void	(*reschedule_io)(struct nfs_pgio_header *hdr);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 1c49829..92ea284c 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -38,6 +38,12 @@ struct oom_control {
 	 */
 	const int order;
 
+	/*
+	 * Only kill positive adj tasks. Used to behave more like Android's
+	 * lowmemorykiller.
+	 */
+	const bool only_positive_adj;
+
 	/* Used by oom implementation, do not set */
 	unsigned long totalpages;
 	struct task_struct *chosen;
@@ -99,7 +105,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm);
 
 extern unsigned long oom_badness(struct task_struct *p,
 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
-		unsigned long totalpages);
+		unsigned long totalpages, bool only_positive_adj);
 
 extern bool out_of_memory(struct oom_control *oc);
 
@@ -117,14 +123,18 @@ extern void dump_tasks(struct mem_cgroup *memcg,
 		       const nodemask_t *nodemask);
 
 #ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
-extern bool should_ulmk_retry(void);
+extern bool should_ulmk_retry(gfp_t gfp);
 extern void ulmk_update_last_kill(void);
+extern void ulmk_watchdog_fn(struct timer_list *t);
+extern void ulmk_watchdog_pet(struct timer_list *t);
 #else
-static inline bool should_ulmk_retry(void)
+static inline bool should_ulmk_retry(gfp_t gfp)
 {
 	return false;
 }
 static inline void ulmk_update_last_kill(void) {}
+static inline void ulmk_watchdog_fn(struct timer_list *t) {}
+static inline void ulmk_watchdog_pet(struct timer_list *t) {}
 #endif
 
 /* sysctls */
@@ -135,4 +145,6 @@ extern int sysctl_reap_mem_on_sigkill;
 
 /* calls for LMK reaper */
 extern void add_to_oom_reaper(struct task_struct *p);
+extern void check_panic_on_foreground_kill(struct task_struct *p);
+#define ULMK_MAGIC "lmkd"
 #endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index f84f167..e01df427 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -19,6 +19,7 @@ struct page_ext_operations {
 enum page_ext_flags {
 	PAGE_EXT_DEBUG_GUARD,
 	PAGE_EXT_OWNER,
+	PAGE_EXT_PG_FREE,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	PAGE_EXT_YOUNG,
 	PAGE_EXT_IDLE,
diff --git a/include/linux/pmic-voter.h b/include/linux/pmic-voter.h
index ea39053..a247274 100644
--- a/include/linux/pmic-voter.h
+++ b/include/linux/pmic-voter.h
@@ -21,6 +21,8 @@ enum votable_type {
 bool is_client_vote_enabled(struct votable *votable, const char *client_str);
 bool is_client_vote_enabled_locked(struct votable *votable,
 							const char *client_str);
+bool is_override_vote_enabled(struct votable *votable);
+bool is_override_vote_enabled_locked(struct votable *votable);
 int get_client_vote(struct votable *votable, const char *client_str);
 int get_client_vote_locked(struct votable *votable, const char *client_str);
 int get_effective_result(struct votable *votable);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index d75eb6d..8188789 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -342,6 +342,8 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL,
 	POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
 	POWER_SUPPLY_PROP_SKIN_HEALTH,
+	POWER_SUPPLY_PROP_AICL_DONE,
+	POWER_SUPPLY_PROP_VOLTAGE_STEP,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
@@ -389,7 +391,6 @@ enum power_supply_type {
 	POWER_SUPPLY_TYPE_BMS,			/* Battery Monitor System */
 	POWER_SUPPLY_TYPE_PARALLEL,		/* Parallel Path */
 	POWER_SUPPLY_TYPE_MAIN,			/* Main Path */
-	POWER_SUPPLY_TYPE_WIPOWER,		/* Wipower */
 	POWER_SUPPLY_TYPE_UFP,			/* Type-C UFP */
 	POWER_SUPPLY_TYPE_DFP,			/* Type-C DFP */
 	POWER_SUPPLY_TYPE_CHARGE_PUMP,		/* Charge Pump */
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8096c4e..cb7cde3 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -24,6 +24,7 @@ void psi_memstall_leave(unsigned long *flags);
 int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
 
 void psi_emergency_trigger(void);
+bool psi_is_trigger_active(void);
 
 #ifdef CONFIG_CGROUPS
 int psi_cgroup_alloc(struct cgroup *cgrp);
@@ -46,6 +47,10 @@ static inline void psi_memstall_enter(unsigned long *flags) {}
 static inline void psi_memstall_leave(unsigned long *flags) {}
 
 static inline void psi_emergency_trigger(void){}
+static inline bool psi_is_trigger_active(void)
+{
+	return false;
+}
 
 #ifdef CONFIG_CGROUPS
 static inline int psi_cgroup_alloc(struct cgroup *cgrp)
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 07aaf9b..538ec2b 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/kref.h>
 #include <linux/wait.h>
+#include <linux/timer.h>
 
 #ifdef CONFIG_PSI
 
@@ -123,6 +124,10 @@ struct psi_trigger {
 
 	/* Refcounting to prevent premature destruction */
 	struct kref refcount;
+
+	/* Task that created the trigger */
+	char comm[TASK_COMM_LEN];
+	struct timer_list wdog_timer;
 };
 
 struct psi_group {
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 255984e..4d809ef 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -184,9 +184,15 @@
 
 #define PM7250B_SUBTYPE		0x2E
 
+/* PM6125 SUBTYPE */
+#define PM6125_SUBTYPE		0x2D
+
 /* PMI632 */
 #define PMI632_SUBTYPE	0x25
 
+/* PM8008 SUBTYPE */
+#define PM8008_SUBTYPE	0x2C
+
 /* PMI8998 REV_ID */
 #define PMI8998_V1P0_REV1	0x00
 #define PMI8998_V1P0_REV2	0x00
@@ -279,6 +285,12 @@
 #define PM7250B_V1P0_REV3	0x00
 #define PM7250B_V1P0_REV4	0x01
 
+/* PM6125_REV_ID */
+#define PM6125_V1P0_REV1	0x00
+#define PM6125_V1P0_REV2	0x00
+#define PM6125_V1P0_REV3	0x00
+#define PM6125_V1P0_REV4	0x01
+
 /* PMI8998 FAB_ID */
 #define PMI8998_FAB_ID_SMIC	0x11
 #define PMI8998_FAB_ID_GF	0x30
@@ -297,6 +309,9 @@
 /* PMI8937 */
 #define PMI8937_SUBTYPE		0x37
 
+/* SMB1390 */
+#define SMB1390_SUBTYPE		0x23
+
 /* SMB1381 */
 #define SMB1381_SUBTYPE		0x17
 
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index dc905a4..185d948 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
 /* i_mutex must being held */
 static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
 {
-	return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+	return (ia->ia_valid & ATTR_SIZE) ||
 		(ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
 		(ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
 }
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 1f42332..63a0fbb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1176,6 +1176,7 @@ struct regmap_irq_chip {
 	unsigned int wake_base;
 	unsigned int type_base;
 	unsigned int irq_reg_stride;
+	unsigned int clear_ack;
 	bool mask_writeonly:1;
 	bool init_ack_masked:1;
 	bool mask_invert:1;
diff --git a/include/linux/regulator/rpm-smd-regulator.h b/include/linux/regulator/rpm-smd-regulator.h
new file mode 100644
index 0000000..901f084
--- /dev/null
+++ b/include/linux/regulator/rpm-smd-regulator.h
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012-2013, 2015, 2017, 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef _LINUX_REGULATOR_RPM_SMD_H
+#define _LINUX_REGULATOR_RPM_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM.  It is possible that
+ * future platforms will utilize different corner values.  The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+	RPM_REGULATOR_CORNER_NONE = 1,
+	RPM_REGULATOR_CORNER_RETENTION,
+	RPM_REGULATOR_CORNER_SVS_KRAIT,
+	RPM_REGULATOR_CORNER_SVS_SOC,
+	RPM_REGULATOR_CORNER_NORMAL,
+	RPM_REGULATOR_CORNER_TURBO,
+	RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+/**
+ * enum rpm_regulator_voltage_level - possible voltage level values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for level type regulators as if they had
+ * units of uV.
+ *
+ * Note: the meaning of level values is set by the RPM.
+ */
+enum rpm_regulator_voltage_level {
+	RPM_REGULATOR_LEVEL_NONE		= 0,
+	RPM_REGULATOR_LEVEL_RETENTION		= 16,
+	RPM_REGULATOR_LEVEL_RETENTION_PLUS	= 32,
+	RPM_REGULATOR_LEVEL_MIN_SVS		= 48,
+	RPM_REGULATOR_LEVEL_LOW_SVS		= 64,
+	RPM_REGULATOR_LEVEL_SVS			= 128,
+	RPM_REGULATOR_LEVEL_SVS_PLUS		= 192,
+	RPM_REGULATOR_LEVEL_NOM			= 256,
+	RPM_REGULATOR_LEVEL_NOM_PLUS		= 320,
+	RPM_REGULATOR_LEVEL_TURBO		= 384,
+	RPM_REGULATOR_LEVEL_TURBO_NO_CPR	= 416,
+	RPM_REGULATOR_LEVEL_BINNING		= 512,
+	RPM_REGULATOR_LEVEL_MAX			= 65535,
+};
+
+/**
+ * enum rpm_regulator_mode - control mode for LDO or SMPS type regulators
+ * %RPM_REGULATOR_MODE_AUTO:	For SMPS type regulators, use SMPS auto mode so
+ *				that the hardware can automatically switch
+ *				between PFM and PWM modes based on realtime
+ *				load.
+ *				LDO type regulators do not support this mode.
+ * %RPM_REGULATOR_MODE_IPEAK:	For SMPS type regulators, use aggregated
+ *				software current requests to determine
+ *				usage of PFM or PWM mode.
+ *				For LDO type regulators, use aggregated
+ *				software current requests to determine
+ *				usage of LPM or HPM mode.
+ * %RPM_REGULATOR_MODE_HPM:	For SMPS type regulators, force the
+ *				usage of PWM mode.
+ *				For LDO type regulators, force the
+ *				usage of HPM mode.
+ *
+ * These values should be used in calls to rpm_regulator_set_mode().
+ */
+enum rpm_regulator_mode {
+	RPM_REGULATOR_MODE_AUTO,
+	RPM_REGULATOR_MODE_IPEAK,
+	RPM_REGULATOR_MODE_HPM,
+};
+
+#ifdef CONFIG_REGULATOR_RPM_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV);
+
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode);
+
+int __init rpm_smd_regulator_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+					const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+					int min_uV, int max_uV) { return 0; }
+
+static inline int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+				enum rpm_regulator_mode mode) { return 0; }
+
+static inline int __init rpm_smd_regulator_driver_init(void) { return 0; }
+
+#endif /* CONFIG_REGULATOR_RPM_SMD */
+
+#ifdef CONFIG_DEBUG_FS
+
+static void rpm_vreg_create_debugfs(struct rpm_regulator *reg);
+
+#else
+
+static inline void rpm_vreg_create_debugfs(struct rpm_regulator *reg)
+{
+}
+#endif
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6b99745..7d4fea4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -112,6 +112,14 @@ struct task_group;
 					 (task->flags & PF_FROZEN) == 0 && \
 					 (task->state & TASK_NOLOAD) == 0)
 
+enum task_boost_type {
+	TASK_BOOST_NONE = 0,
+	TASK_BOOST_ON_MID,
+	TASK_BOOST_ON_MAX,
+	TASK_BOOST_STRICT_MAX,
+	TASK_BOOST_END,
+};
+
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
 /*
@@ -1963,7 +1971,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 # define vcpu_is_preempted(cpu)	false
 #endif
 
-extern long msm_sched_setaffinity(pid_t pid, struct cpumask *new_mask);
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0d10b7c..e9d4e38 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -330,6 +330,8 @@ enum {
 
 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 {
+	if (current->mm != mm)
+		return;
 	if (likely(!(atomic_read(&mm->membarrier_state) &
 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
 		return;
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 05370ef..8046ddd 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -22,10 +22,11 @@ extern unsigned long nr_iowait(void);
 extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_WALT
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
 extern unsigned int sched_get_cpu_util(int cpu);
-extern u64 sched_get_cpu_last_busy_time(int cpu);
+extern void sched_update_hyst_times(void);
+extern u64 sched_lpm_disallowed_time(int cpu);
 #else
 static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
 {
@@ -34,7 +35,10 @@ static inline unsigned int sched_get_cpu_util(int cpu)
 {
 	return 0;
 }
-static inline u64 sched_get_cpu_last_busy_time(int cpu)
+static inline void sched_update_hyst_times(void)
+{
+}
+static inline u64 sched_lpm_disallowed_time(int cpu)
 {
 	return 0;
 }
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3ab6949..7f7297e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -34,21 +34,35 @@ extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
 extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
 #ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_user_hint;
+extern const int sched_user_hint_max;
 extern unsigned int sysctl_sched_cpu_high_irqload;
 extern unsigned int sysctl_sched_boost;
 extern unsigned int sysctl_sched_group_upmigrate_pct;
 extern unsigned int sysctl_sched_group_downmigrate_pct;
+extern unsigned int sysctl_sched_conservative_pl;
+extern unsigned int sysctl_sched_many_wakeup_threshold;
 extern unsigned int sysctl_sched_walt_rotate_big_tasks;
 extern unsigned int sysctl_sched_min_task_util_for_boost;
 extern unsigned int sysctl_sched_min_task_util_for_colocation;
 extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;
 extern unsigned int sysctl_sched_coloc_downmigrate_ns;
 extern unsigned int sysctl_sched_task_unfilter_nr_windows;
+extern unsigned int sysctl_sched_busy_hyst_enable_cpus;
+extern unsigned int sysctl_sched_busy_hyst;
+extern unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
+extern unsigned int sysctl_sched_coloc_busy_hyst;
+extern unsigned int sysctl_sched_coloc_busy_hyst_max_ms;
+extern unsigned int sysctl_sched_window_stats_policy;
 
 extern int
 walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp,
 			 loff_t *ppos);
+extern int
+walt_proc_user_hint_handler(struct ctl_table *table, int write,
+			 void __user *buffer, size_t *lenp,
+			 loff_t *ppos);
 
 #endif
 
@@ -126,6 +140,5 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
 extern char sched_lib_name[LIB_PATH_LENGTH];
 extern unsigned int sched_lib_mask_force;
 extern bool is_sched_lib_based_app(pid_t pid);
-extern unsigned int sysctl_sched_busy_hysteresis_enable_cpus;
 
 #endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 9e4fdd8..fb24896 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -32,6 +32,8 @@ struct qcom_smd_rpm;
 #define QCOM_SMD_RPM_IPA_CLK	0x617069
 #define QCOM_SMD_RPM_CE_CLK	0x6563
 #define QCOM_SMD_RPM_AGGR_CLK	0x72676761
+#define QCOM_SMD_RPM_QUP_CLK    0x00707571
+#define QCOM_SMD_RPM_MMXI_CLK   0x69786D6D
 
 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
 		       int state,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4374196..9af7497 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -232,7 +232,8 @@ struct tcp_sock {
 		fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
 		fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
 		is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
-		unused:2;
+		unused:1,
+		wqp_called:1;
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
 		thin_lto    : 1,/* Use linear timeouts for thin streams */
 		recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eb51a27..20543db 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1832,6 +1832,8 @@ extern int usb_string(struct usb_device *dev, int index,
 extern int usb_clear_halt(struct usb_device *dev, int pipe);
 extern int usb_reset_configuration(struct usb_device *dev);
 extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
+extern int usb_set_interface_timeout(struct usb_device *dev, int ifnum,
+		int alternate, unsigned long timeout);
 extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
 
 /* this request isn't really synchronous, but it belongs with the others */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 3e697fe..2a44134 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -164,7 +164,6 @@ struct usb_phy {
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
-	int	(*link_training)(struct usb_phy *x, bool start);
 
 	/*
 	 * Charger detection method can be implemented if you need to
@@ -174,6 +173,7 @@ struct usb_phy {
 
 	/* reset the PHY clocks */
 	int     (*reset)(struct usb_phy *x);
+	int	(*drive_dp_pulse)(struct usb_phy *x, unsigned int pulse_width);
 };
 
 /* for board-specific init logic */
@@ -241,6 +241,15 @@ usb_phy_reset(struct usb_phy *x)
 	return 0;
 }
 
+static inline int
+usb_phy_drive_dp_pulse(struct usb_phy *x, unsigned int pulse_width)
+{
+	if (x && x->drive_dp_pulse)
+		return x->drive_dp_pulse(x, pulse_width);
+
+	return 0;
+}
+
 /* for usb host and peripheral controller drivers */
 #if IS_ENABLED(CONFIG_USB_PHY)
 extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
@@ -354,24 +363,6 @@ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
 }
 
 static inline int
-usb_phy_start_link_training(struct usb_phy *x)
-{
-	if (x && x->link_training)
-		return x->link_training(x, true);
-	else
-		return 0;
-}
-
-static inline int
-usb_phy_stop_link_training(struct usb_phy *x)
-{
-	if (x && x->link_training)
-		return x->link_training(x, false);
-	else
-		return 0;
-}
-
-static inline int
 usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
 {
 	if (x && x->notify_disconnect)
diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h
index 286c566..87dff93f 100644
--- a/include/linux/usb/usbpd.h
+++ b/include/linux/usb/usbpd.h
@@ -37,7 +37,8 @@ struct usbpd_svid_handler {
 	u16 svid;
 
 	/* Notified when VDM session established/reset; must be implemented */
-	void (*connect)(struct usbpd_svid_handler *hdlr);
+	void (*connect)(struct usbpd_svid_handler *hdlr,
+			bool supports_usb_comm);
 	void (*disconnect)(struct usbpd_svid_handler *hdlr);
 
 	/* DP driver -> PE driver for requesting USB SS lanes */
diff --git a/include/media/cec.h b/include/media/cec.h
index dc4b412..59bf280 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -333,67 +333,6 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
 u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
 			   unsigned int *offset);
 
-/**
- * cec_set_edid_phys_addr() - find and set the physical address
- *
- * @edid:	pointer to the EDID data
- * @size:	size in bytes of the EDID data
- * @phys_addr:	the new physical address
- *
- * This function finds the location of the physical address in the EDID
- * and fills in the given physical address and updates the checksum
- * at the end of the EDID block. It does nothing if the EDID doesn't
- * contain a physical address.
- */
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
-
-/**
- * cec_phys_addr_for_input() - calculate the PA for an input
- *
- * @phys_addr:	the physical address of the parent
- * @input:	the number of the input port, must be between 1 and 15
- *
- * This function calculates a new physical address based on the input
- * port number. For example:
- *
- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
- *
- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
- *
- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
- *
- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
- *
- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
- */
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
-
-/**
- * cec_phys_addr_validate() - validate a physical address from an EDID
- *
- * @phys_addr:	the physical address to validate
- * @parent:	if not %NULL, then this is filled with the parents PA.
- * @port:	if not %NULL, then this is filled with the input port.
- *
- * This validates a physical address as read from an EDID. If the
- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
- * then it will return -EINVAL.
- *
- * The parent PA is passed into %parent and the input port is passed into
- * %port. For example:
- *
- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
- *
- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
- *
- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
- *
- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
- *
- * Return: 0 if the PA is valid, -EINVAL if not.
- */
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-
 #else
 
 static inline int cec_register_adapter(struct cec_adapter *adap,
@@ -428,25 +367,6 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
 	return CEC_PHYS_ADDR_INVALID;
 }
 
-static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
-					  u16 phys_addr)
-{
-}
-
-static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
-	return CEC_PHYS_ADDR_INVALID;
-}
-
-static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
-	if (parent)
-		*parent = phys_addr;
-	if (port)
-		*port = 0;
-	return 0;
-}
-
 #endif
 
 /**
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 17cb27d..4e7732d 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -234,4 +234,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
 			 const struct hdmi_vendor_infoframe *hdmi,
 			 unsigned int height);
 
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+			    unsigned int *offset);
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
 #endif
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9703034..0c82d7e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -15,6 +15,7 @@
 struct tcf_idrinfo {
 	spinlock_t	lock;
 	struct idr	action_idr;
+	struct net	*net;
 };
 
 struct tc_action_ops;
@@ -107,7 +108,7 @@ struct tc_action_net {
 };
 
 static inline
-int tc_action_net_init(struct tc_action_net *tn,
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
 		       const struct tc_action_ops *ops)
 {
 	int err = 0;
@@ -116,6 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
 	if (!tn->idrinfo)
 		return -ENOMEM;
 	tn->ops = ops;
+	tn->idrinfo->net = net;
 	spin_lock_init(&tn->idrinfo->lock);
 	idr_init(&tn->idrinfo->action_idr);
 	return err;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3424613..1495cd7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -342,6 +342,60 @@ struct ieee80211_sband_iftype_data {
 };
 
 /**
+ * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations
+ *
+ * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz
+ * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz
+ * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and
+ *	2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and
+ *	4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and
+ *	4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz
+ *	and 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz,
+ *	2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz
+ */
+enum ieee80211_edmg_bw_config {
+	IEEE80211_EDMG_BW_CONFIG_4	= 4,
+	IEEE80211_EDMG_BW_CONFIG_5	= 5,
+	IEEE80211_EDMG_BW_CONFIG_6	= 6,
+	IEEE80211_EDMG_BW_CONFIG_7	= 7,
+	IEEE80211_EDMG_BW_CONFIG_8	= 8,
+	IEEE80211_EDMG_BW_CONFIG_9	= 9,
+	IEEE80211_EDMG_BW_CONFIG_10	= 10,
+	IEEE80211_EDMG_BW_CONFIG_11	= 11,
+	IEEE80211_EDMG_BW_CONFIG_12	= 12,
+	IEEE80211_EDMG_BW_CONFIG_13	= 13,
+	IEEE80211_EDMG_BW_CONFIG_14	= 14,
+	IEEE80211_EDMG_BW_CONFIG_15	= 15,
+};
+
+/**
+ * struct ieee80211_edmg - EDMG configuration
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ay EDMG configuration
+ *
+ * @channels: bitmap that indicates the 2.16 GHz channel(s)
+ *	that are allowed to be used for transmissions.
+ *	Bit 0 indicates channel 1, bit 1 indicates channel 2, etc.
+ *	Set to 0 indicate EDMG not supported.
+ * @bw_config: Channel BW Configuration subfield encodes
+ *	the allowed channel bandwidth configurations
+ */
+struct ieee80211_edmg {
+	u8 channels;
+	enum ieee80211_edmg_bw_config bw_config;
+};
+
+/**
  * struct ieee80211_supported_band - frequency band definition
  *
  * This structure describes a frequency band a wiphy
@@ -357,6 +411,7 @@ struct ieee80211_sband_iftype_data {
  * @n_bitrates: Number of bitrates in @bitrates
  * @ht_cap: HT capabilities in this band
  * @vht_cap: VHT capabilities in this band
+ * @edmg_cap: EDMG capabilities in this band
  * @n_iftype_data: number of iftype data entries
  * @iftype_data: interface type data entries.  Note that the bits in
  *	@types_mask inside this structure cannot overlap (i.e. only
@@ -371,6 +426,7 @@ struct ieee80211_supported_band {
 	int n_bitrates;
 	struct ieee80211_sta_ht_cap ht_cap;
 	struct ieee80211_sta_vht_cap vht_cap;
+	struct ieee80211_edmg edmg_cap;
 	u16 n_iftype_data;
 	const struct ieee80211_sband_iftype_data *iftype_data;
 };
@@ -522,12 +578,17 @@ struct key_params {
  * @center_freq1: center frequency of first segment
  * @center_freq2: center frequency of second segment
  *	(only with 80+80 MHz)
+ * @edmg: define the EDMG channels configuration.
+ *	If edmg is requested (i.e. the .channels member is non-zero),
+ *	chan will define the primary channel and all other
+ *	parameters are ignored.
  */
 struct cfg80211_chan_def {
 	struct ieee80211_channel *chan;
 	enum nl80211_chan_width width;
 	u32 center_freq1;
 	u32 center_freq2;
+	struct ieee80211_edmg edmg;
 };
 
 /**
@@ -586,6 +647,19 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
 }
 
 /**
+ * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel
+ *
+ * @chandef: the channel definition
+ *
+ * Return: %true if EDMG defined, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
+{
+	return chandef->edmg.channels || chandef->edmg.bw_config;
+}
+
+/**
  * cfg80211_chandef_compatible - check if two channel definitions are compatible
  * @chandef1: first channel definition
  * @chandef2: second channel definition
@@ -1124,15 +1198,17 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
  * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
  * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
- * @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_DMG: 60GHz MCS
  * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
+ * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
  */
 enum rate_info_flags {
 	RATE_INFO_FLAGS_MCS			= BIT(0),
 	RATE_INFO_FLAGS_VHT_MCS			= BIT(1),
 	RATE_INFO_FLAGS_SHORT_GI		= BIT(2),
-	RATE_INFO_FLAGS_60G			= BIT(3),
+	RATE_INFO_FLAGS_DMG			= BIT(3),
 	RATE_INFO_FLAGS_HE_MCS			= BIT(4),
+	RATE_INFO_FLAGS_EDMG			= BIT(5),
 };
 
 /**
@@ -1172,6 +1248,7 @@ enum rate_info_bw {
  * @he_dcm: HE DCM value
  * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
  *	only valid if bw is %RATE_INFO_BW_HE_RU)
+ * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
  */
 struct rate_info {
 	u8 flags;
@@ -1182,6 +1259,7 @@ struct rate_info {
 	u8 he_gi;
 	u8 he_dcm;
 	u8 he_ru_alloc;
+	u8 n_bonded_ch;
 };
 
 /**
@@ -2347,6 +2425,9 @@ struct cfg80211_bss_selection {
  * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
  * @want_1x: indicates user-space supports and wants to use 802.1X driver
  *	offload of 4-way handshake.
+ * @edmg: define the EDMG channels.
+ *	This may specify multiple channels and bonding options for the driver
+ *	to choose from, based on BSS configuration.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -2380,6 +2461,7 @@ struct cfg80211_connect_params {
 	const u8 *fils_erp_rrk;
 	size_t fils_erp_rrk_len;
 	bool want_1x;
+	struct ieee80211_edmg edmg;
 };
 
 /**
@@ -5184,22 +5266,20 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  * @bssid: transmitter BSSID
  * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
  * @mbssid_index: BSSID index, taken from Multiple BSSID index element
- * @new_bssid_addr: address of the resulting BSSID
+ * @new_bssid: calculated nontransmitted BSSID
  */
 static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
-					  u8 mbssid_index, u8 *new_bssid_addr)
+					  u8 mbssid_index, u8 *new_bssid)
 {
-	u64 bssid_tmp, new_bssid;
-	u64 lsb_n;
+	u64 bssid_u64 = ether_addr_to_u64(bssid);
+	u64 mask = GENMASK_ULL(max_bssid - 1, 0);
+	u64 new_bssid_u64;
 
-	bssid_tmp = ether_addr_to_u64(bssid);
+	new_bssid_u64 = bssid_u64 & ~mask;
 
-	lsb_n = bssid_tmp & ((1 << max_bssid) - 1);
-	new_bssid = bssid_tmp;
-	new_bssid &= ~((1 << max_bssid) - 1);
-	new_bssid |= (lsb_n + mbssid_index) % (1 << max_bssid);
+	new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask;
 
-	u64_to_ether_addr(new_bssid, new_bssid_addr);
+	u64_to_ether_addr(new_bssid_u64, new_bssid);
 }
 
 /**
@@ -6806,6 +6886,21 @@ int cfg80211_external_auth_request(struct net_device *netdev,
 				   struct cfg80211_external_auth_params *params,
 				   gfp_t gfp);
 
+/**
+ * cfg80211_iftype_allowed - check whether the interface can be allowed
+ * @wiphy: the wiphy
+ * @iftype: interface type
+ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
+ * @check_swif: check iftype against software interfaces
+ *
+ * Check whether the interface is allowed to operate; additionally, this API
+ * can be used to check iftype against the software interfaces when
+ * check_swif is '1'.
+ */
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+			     bool is_4addr, u8 check_swif);
+
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 892096b..ee6cbb7 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -166,6 +166,11 @@ extern int cnss_self_recovery(struct device *dev,
 			      enum cnss_recovery_reason reason);
 extern int cnss_force_fw_assert(struct device *dev);
 extern int cnss_force_collect_rddm(struct device *dev);
+extern int cnss_qmi_send_get(struct device *dev);
+extern int cnss_qmi_send_put(struct device *dev);
+extern int cnss_qmi_send(struct device *dev, int type, void *cmd,
+			 int cmd_len, void *cb_ctx,
+			 int (*cb)(void *ctx, void *event, int event_len));
 extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
 extern int cnss_get_fw_files_for_target(struct device *dev,
 					struct cnss_fw_files *pfw_files,
@@ -185,6 +190,9 @@ extern void cnss_request_pm_qos(struct device *dev, u32 qos_val);
 extern void cnss_remove_pm_qos(struct device *dev);
 extern void cnss_lock_pm_sem(struct device *dev);
 extern void cnss_release_pm_sem(struct device *dev);
+extern void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags);
+extern void cnss_pci_unlock_reg_window(struct device *dev,
+				       unsigned long *flags);
 extern int cnss_wlan_pm_control(struct device *dev, bool vote);
 extern int cnss_auto_suspend(struct device *dev);
 extern int cnss_auto_resume(struct device *dev);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f2be5d0..7685cbd 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -418,8 +418,7 @@ struct nft_set {
 	unsigned char			*udata;
 	/* runtime data below here */
 	const struct nft_set_ops	*ops ____cacheline_aligned;
-	u16				flags:13,
-					bound:1,
+	u16				flags:14,
 					genmask:2;
 	u8				klen;
 	u8				dlen;
@@ -1337,12 +1336,15 @@ struct nft_trans_rule {
 struct nft_trans_set {
 	struct nft_set			*set;
 	u32				set_id;
+	bool				bound;
 };
 
 #define nft_trans_set(trans)	\
 	(((struct nft_trans_set *)trans->data)->set)
 #define nft_trans_set_id(trans)	\
 	(((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans)	\
+	(((struct nft_trans_set *)trans->data)->bound)
 
 struct nft_trans_chain {
 	bool				update;
@@ -1373,12 +1375,15 @@ struct nft_trans_table {
 struct nft_trans_elem {
 	struct nft_set			*set;
 	struct nft_set_elem		elem;
+	bool				bound;
 };
 
 #define nft_trans_elem_set(trans)	\
 	(((struct nft_trans_elem *)trans->data)->set)
 #define nft_trans_elem(trans)	\
 	(((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_set_bound(trans)	\
+	(((struct nft_trans_elem *)trans->data)->bound)
 
 struct nft_trans_obj {
 	struct nft_object		*obj;
diff --git a/include/net/psample.h b/include/net/psample.h
index 9b80f81..94cb37a 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -12,6 +12,7 @@ struct psample_group {
 	u32 group_num;
 	u32 refcount;
 	u32 seq;
+	struct rcu_head rcu;
 };
 
 struct psample_group *psample_group_get(struct net *net, u32 group_num);
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 8a5f70c..5e69fba1 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -21,7 +21,8 @@ struct sock_reuseport {
 	unsigned int		synq_overflow_ts;
 	/* ID stays the same even after the size of socks[] grows. */
 	unsigned int		reuseport_id;
-	bool			bind_inany;
+	unsigned int		bind_inany:1;
+	unsigned int		has_conns:1;
 	struct bpf_prog __rcu	*prog;		/* optional BPF sock selector */
 	struct sock		*socks[0];	/* array of sock pointers */
 };
@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
 					  struct sk_buff *skb,
 					  int hdr_len);
 extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+
+static inline bool reuseport_has_conns(struct sock *sk, bool set)
+{
+	struct sock_reuseport *reuse;
+	bool ret = false;
+
+	rcu_read_lock();
+	reuse = rcu_dereference(sk->sk_reuseport_cb);
+	if (reuse) {
+		if (set)
+			reuse->has_conns = 1;
+		ret = reuse->has_conns;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
 int reuseport_get_id(struct sock_reuseport *reuse);
 
 #endif  /* _SOCK_REUSEPORT_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 412c282..b7d63c3 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -71,6 +71,7 @@
 
 extern struct workqueue_struct *ib_wq;
 extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
 
 union ib_gid {
 	u8	raw[16];
@@ -1576,9 +1577,10 @@ struct ib_ah {
 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 
 enum ib_poll_context {
-	IB_POLL_DIRECT,		/* caller context, no hw completions */
-	IB_POLL_SOFTIRQ,	/* poll from softirq context */
-	IB_POLL_WORKQUEUE,	/* poll from workqueue */
+	IB_POLL_DIRECT,		   /* caller context, no hw completions */
+	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
+	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
+	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
 };
 
 struct ib_cq {
@@ -1595,6 +1597,7 @@ struct ib_cq {
 		struct irq_poll		iop;
 		struct work_struct	work;
 	};
+	struct workqueue_struct *comp_wq;
 	/*
 	 * Implementation details of the RDMA core, don't use in drivers:
 	 */
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index e03bd9d..7b196d2 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -6,8 +6,6 @@ struct scsi_cmnd;
 struct scsi_device;
 struct scsi_sense_hdr;
 
-#define SCSI_LOG_BUFSIZE 128
-
 extern void scsi_print_command(struct scsi_cmnd *);
 extern size_t __scsi_format_command(char *, size_t,
 				   const unsigned char *, size_t);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 2b7e227..91f4033 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 struct scsi_eh_save {
 	/* saved state */
 	int result;
+	unsigned int resid_len;
 	int eh_eflags;
 	enum dma_data_direction data_direction;
 	unsigned underflow;
diff --git a/include/soc/qcom/minidump.h b/include/soc/qcom/minidump.h
index 94aaedf..e6bdf0e 100644
--- a/include/soc/qcom/minidump.h
+++ b/include/soc/qcom/minidump.h
@@ -30,6 +30,7 @@ struct md_region {
  */
 #ifdef CONFIG_QCOM_MINIDUMP
 extern int msm_minidump_add_region(const struct md_region *entry);
+extern int msm_minidump_remove_region(const struct md_region *entry);
 extern bool msm_minidump_enabled(void);
 extern void dump_stack_minidump(u64 sp);
 #else
@@ -38,6 +39,10 @@ static inline int msm_minidump_add_region(const struct md_region *entry)
 	/* Return quietly, if minidump is not supported */
 	return 0;
 }
+static inline int msm_minidump_remove_region(const struct md_region *entry)
+{
+	return 0;
+}
 static inline bool msm_minidump_enabled(void) { return false; }
 static inline void dump_stack_minidump(u64 sp) {}
 #endif
diff --git a/include/soc/qcom/mpm.h b/include/soc/qcom/mpm.h
new file mode 100644
index 0000000..835c94b
--- /dev/null
+++ b/include/soc/qcom/mpm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_MPM_H__
+#define __QCOM_MPM_H__
+
+#include <linux/irq.h>
+#include <linux/device.h>
+
+struct mpm_pin {
+	int pin;
+	irq_hw_number_t hwirq;
+};
+
+extern const struct mpm_pin mpm_bengal_gic_chip_data[];
+#endif /* __QCOM_MPM_H__ */
diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h
index fe172db..11aa0d9 100644
--- a/include/soc/qcom/ramdump.h
+++ b/include/soc/qcom/ramdump.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2011-2014, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RAMDUMP_HEADER
@@ -24,6 +24,8 @@ extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
 		int nsegments);
 extern int do_minidump(void *handle, struct ramdump_segment *segments,
 		       int nsegments);
+extern int do_minidump_elf32(void *handle, struct ramdump_segment *segments,
+			     int nsegments);
 
 #else
 static inline void *create_ramdump_device(const char *dev_name,
diff --git a/include/soc/qcom/rmnet_ctl.h b/include/soc/qcom/rmnet_ctl.h
index 0080560..94177ec 100644
--- a/include/soc/qcom/rmnet_ctl.h
+++ b/include/soc/qcom/rmnet_ctl.h
@@ -10,6 +10,22 @@
 
 #include <linux/skbuff.h>
 
+enum rmnet_ctl_log_lvl {
+	RMNET_CTL_LOG_CRIT,
+	RMNET_CTL_LOG_ERR,
+	RMNET_CTL_LOG_INFO,
+	RMNET_CTL_LOG_DEBUG,
+};
+
+#define rmnet_ctl_log_err(msg, rc, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_ERR, msg, rc, data, len)
+
+#define rmnet_ctl_log_info(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_INFO, msg, 0, data, len)
+
+#define rmnet_ctl_log_debug(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_DEBUG, msg, 0, data, len)
+
 struct rmnet_ctl_client_hooks {
 	void (*ctl_dl_client_hook)(struct sk_buff *skb);
 };
@@ -19,6 +35,8 @@ struct rmnet_ctl_client_hooks {
 void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
 int rmnet_ctl_unregister_client(void *handle);
 int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len);
 
 #else
 
@@ -38,6 +56,11 @@ static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
 	return -EINVAL;
 }
 
+static inline void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+				 int rc, const void *data, unsigned int len)
+{
+}
+
 #endif /* CONFIG_RMNET_CTL */
 
 #endif /* _RMNET_CTL_H_ */
diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h
new file mode 100644
index 0000000..76af11e
--- /dev/null
+++ b/include/soc/qcom/rpm-notifier.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+	uint32_t rsc_type;
+	uint32_t rsc_id;
+	uint32_t key;
+	uint32_t size;
+	uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ *				pending acknowledgment.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h
new file mode 100644
index 0000000..fc77bd4
--- /dev/null
+++ b/include/soc/qcom/rpm-smd.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+	MSM_RPM_CTX_ACTIVE_SET,
+	MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+	uint32_t key;
+	uint32_t length;
+	uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noack() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages, but this API does not wait
+ * on the ACK for this message id and it does not add the message id to the wait
+ * list.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns NULL on success and PTR_ERR on a failed transaction.
+ */
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noack() -Wrapper function for clients to send data
+ * given an array of key value pairs without waiting for ack.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  NULL on success and PTR_ERR(errno) on failure.
+ */
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int count)
+{
+	return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+		struct msm_rpm_request *handle, uint32_t key,
+		const uint8_t *data, int count)
+{
+	return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return 0;
+
+}
+
+static inline void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return 0;
+}
+
+static inline void *msm_rpm_send_message_noack(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+	return 0;
+}
+#endif
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 02713d1..82845d9 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -41,6 +41,18 @@ enum vmid {
 #define PERM_WRITE                      0x2
 #define PERM_EXEC			0x1
 
+struct dest_vm_and_perm_info {
+	u32 vm;
+	u32 perm;
+	u64 ctx;
+	u32 ctx_size;
+};
+
+struct mem_prot_info {
+	phys_addr_t addr;
+	u64 size;
+};
+
 #ifdef CONFIG_QCOM_SECURE_BUFFER
 int msm_secure_table(struct sg_table *table);
 int msm_unsecure_table(struct sg_table *table);
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
index 79f8169..3c38c0d 100644
--- a/include/soc/qcom/subsystem_notif.h
+++ b/include/soc/qcom/subsystem_notif.h
@@ -28,6 +28,7 @@ enum subsys_notif_type {
 
 enum early_subsys_notif_type {
 	XPORT_LAYER_NOTIF,
+	PCIE_DRV_LAYER_NOTIF,
 	NUM_EARLY_NOTIFS
 };
 
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 21f1498..d183de5 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -358,6 +358,8 @@ struct device;
 #define SND_SOC_DAPM_WILL_PMD   0x80    /* called at start of sequence */
 #define SND_SOC_DAPM_PRE_POST_PMD \
 				(SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+				(SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
 
 /* convenience event type detection */
 #define SND_SOC_DAPM_EVENT_ON(e)	\
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index b401c4e..eb3f668 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1655,6 +1655,7 @@ TRACE_EVENT(qgroup_update_reserve,
 		__entry->qgid		= qgroup->qgroupid;
 		__entry->cur_reserved	= qgroup->rsv.values[type];
 		__entry->diff		= diff;
+		__entry->type		= type;
 	),
 
 	TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1677,6 +1678,7 @@ TRACE_EVENT(qgroup_meta_reserve,
 	TP_fast_assign_btrfs(root->fs_info,
 		__entry->refroot	= root->objectid;
 		__entry->diff		= diff;
+		__entry->type		= type;
 	),
 
 	TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1693,7 +1695,6 @@ TRACE_EVENT(qgroup_meta_convert,
 	TP_STRUCT__entry_btrfs(
 		__field(	u64,	refroot			)
 		__field(	s64,	diff			)
-		__field(	int,	type			)
 	),
 
 	TP_fast_assign_btrfs(root->fs_info,
diff --git a/include/trace/events/dcvsh.h b/include/trace/events/dcvsh.h
new file mode 100644
index 0000000..37cb49a
--- /dev/null
+++ b/include/trace/events/dcvsh.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dcvsh
+
+#if !defined(_TRACE_DCVSH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCVSH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(dcvsh_freq,
+	TP_PROTO(unsigned long cpu, unsigned long freq),
+
+	TP_ARGS(cpu, freq),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, cpu)
+		__field(unsigned long, freq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->freq = freq;
+	),
+
+	TP_printk("cpu:%lu max frequency:%lu", __entry->cpu, __entry->freq)
+);
+
+#endif /* _TRACE_DCVSH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fastrpc.h b/include/trace/events/fastrpc.h
new file mode 100644
index 0000000..dbf6d8e
--- /dev/null
+++ b/include/trace/events/fastrpc.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fastrpc
+
+#if !defined(_TRACE_FASTRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FASTRPC_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fastrpc_rpmsg_send,
+
+	TP_PROTO(int cid, uint64_t smq_ctx,
+		uint64_t ctx, uint32_t handle,
+		uint32_t sc, uint64_t addr, uint64_t size),
+
+	TP_ARGS(cid, smq_ctx, ctx, handle, sc, addr, size),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, smq_ctx)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+		__field(uint64_t, addr)
+		__field(uint64_t, size)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->smq_ctx = smq_ctx;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+		__entry->addr = addr;
+		__entry->size = size;
+	),
+
+	TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x, addr 0x%llx, size %llu",
+		__entry->cid, __entry->smq_ctx, __entry->ctx, __entry->handle,
+		__entry->sc, __entry->addr, __entry->size)
+);
+
+TRACE_EVENT(fastrpc_rpmsg_response,
+
+	TP_PROTO(int cid, uint64_t ctx, int retval,
+		uint32_t rspFlags, uint32_t earlyWakeTime),
+
+	TP_ARGS(cid, ctx, retval, rspFlags, earlyWakeTime),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, ctx)
+		__field(int, retval)
+		__field(uint32_t, rspFlags)
+		__field(uint32_t, earlyWakeTime)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->ctx = ctx;
+		__entry->retval = retval;
+		__entry->rspFlags = rspFlags;
+		__entry->earlyWakeTime = earlyWakeTime;
+	),
+
+	TP_printk("from cid %d: ctx 0x%llx, retval 0x%x, rspFlags %u, earlyWakeTime %u",
+		__entry->cid, __entry->ctx, __entry->retval,
+		__entry->rspFlags, __entry->earlyWakeTime)
+);
+
+TRACE_EVENT(fastrpc_context_interrupt,
+
+	TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx,
+		uint32_t handle, uint32_t sc),
+
+	TP_ARGS(cid, smq_ctx, ctx, handle, sc),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, smq_ctx)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->smq_ctx = smq_ctx;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+	),
+
+	TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
+		__entry->cid, __entry->smq_ctx,
+		__entry->ctx, __entry->handle, __entry->sc)
+);
+
+TRACE_EVENT(fastrpc_context_restore,
+
+	TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx,
+		uint32_t handle, uint32_t sc),
+
+	TP_ARGS(cid, smq_ctx, ctx, handle, sc),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, smq_ctx)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->smq_ctx = smq_ctx;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+	),
+
+	TP_printk("for cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
+		__entry->cid, __entry->smq_ctx,
+		__entry->ctx, __entry->handle, __entry->sc)
+);
+
+TRACE_EVENT(fastrpc_dma_map,
+
+	TP_PROTO(int cid, int fd, uint64_t phys, size_t size,
+		size_t len, unsigned int attr, int mflags),
+
+	TP_ARGS(cid, fd, phys, size, len, attr, mflags),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(int, fd)
+		__field(uint64_t, phys)
+		__field(size_t, size)
+		__field(size_t, len)
+		__field(unsigned int, attr)
+		__field(int, mflags)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->fd = fd;
+		__entry->phys = phys;
+		__entry->size = size;
+		__entry->len = len;
+		__entry->attr = attr;
+		__entry->mflags = mflags;
+	),
+
+	TP_printk("cid %d, fd %d, phys 0x%llx, size %zu (len %zu), attr 0x%x, flags 0x%x",
+		__entry->cid, __entry->fd, __entry->phys, __entry->size,
+		__entry->len, __entry->attr, __entry->mflags)
+);
+
+TRACE_EVENT(fastrpc_dma_unmap,
+
+	TP_PROTO(int cid, uint64_t phys, size_t size),
+
+	TP_ARGS(cid, phys, size),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, phys)
+		__field(size_t, size)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->phys = phys;
+		__entry->size = size;
+	),
+
+	TP_printk("cid %d, phys 0x%llx, size %zu",
+		__entry->cid, __entry->phys, __entry->size)
+);
+
+TRACE_EVENT(fastrpc_dma_alloc,
+
+	TP_PROTO(int cid, uint64_t phys, size_t size,
+		unsigned long attr, int mflags),
+
+	TP_ARGS(cid, phys, size, attr, mflags),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, phys)
+		__field(size_t, size)
+		__field(unsigned long, attr)
+		__field(int, mflags)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->phys = phys;
+		__entry->size = size;
+		__entry->attr = attr;
+		__entry->mflags = mflags;
+	),
+
+	TP_printk("cid %d, phys 0x%llx, size %zu, attr 0x%lx, flags 0x%x",
+		__entry->cid, __entry->phys, __entry->size,
+		__entry->attr, __entry->mflags)
+);
+
+TRACE_EVENT(fastrpc_dma_free,
+
+	TP_PROTO(int cid, uint64_t phys, size_t size),
+
+	TP_ARGS(cid, phys, size),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, phys)
+		__field(size_t, size)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->phys = phys;
+		__entry->size = size;
+	),
+
+	TP_printk("cid %d, phys 0x%llx, size %zu",
+		__entry->cid, __entry->phys, __entry->size)
+);
+
+TRACE_EVENT(fastrpc_context_complete,
+
+	TP_PROTO(int cid, uint64_t smq_ctx, int retval,
+		uint64_t ctx, uint32_t handle, uint32_t sc),
+
+	TP_ARGS(cid, smq_ctx, retval, ctx, handle, sc),
+
+	TP_STRUCT__entry(
+		__field(int, cid)
+		__field(uint64_t, smq_ctx)
+		__field(int, retval)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+	),
+
+	TP_fast_assign(
+		__entry->cid = cid;
+		__entry->smq_ctx = smq_ctx;
+		__entry->retval = retval;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+	),
+
+	TP_printk("from cid %d: smq_ctx 0x%llx, retval 0x%x, ctx 0x%llx, handle 0x%x, sc 0x%x",
+		__entry->cid, __entry->smq_ctx, __entry->retval,
+		__entry->ctx, __entry->handle, __entry->sc)
+);
+
+TRACE_EVENT(fastrpc_context_alloc,
+
+	TP_PROTO(uint64_t smq_ctx, uint64_t ctx,
+		uint32_t handle, uint32_t sc),
+
+	TP_ARGS(smq_ctx, ctx, handle, sc),
+
+	TP_STRUCT__entry(
+		__field(uint64_t, smq_ctx)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+	),
+
+	TP_fast_assign(
+		__entry->smq_ctx = smq_ctx;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+	),
+
+	TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
+		__entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc)
+);
+
+TRACE_EVENT(fastrpc_context_free,
+
+	TP_PROTO(uint64_t smq_ctx, uint64_t ctx,
+		uint32_t handle, uint32_t sc),
+
+	TP_ARGS(smq_ctx, ctx, handle, sc),
+
+	TP_STRUCT__entry(
+		__field(uint64_t, smq_ctx)
+		__field(uint64_t, ctx)
+		__field(uint32_t, handle)
+		__field(uint32_t, sc)
+	),
+
+	TP_fast_assign(
+		__entry->smq_ctx = smq_ctx;
+		__entry->ctx = ctx;
+		__entry->handle = handle;
+		__entry->sc = sc;
+	),
+
+	TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
+		__entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mpm.h b/include/trace/events/mpm.h
new file mode 100644
index 0000000..5ab7e2d
--- /dev/null
+++ b/include/trace/events/mpm.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mpm
+
+#if !defined(_TRACE_MPM_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPM_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mpm_wakeup_enable_irqs,
+
+	TP_PROTO(uint32_t index, uint32_t irqs),
+
+	TP_ARGS(index, irqs),
+
+	TP_STRUCT__entry(
+		__field(uint32_t, index)
+		__field(uint32_t, irqs)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->irqs = irqs;
+	),
+
+	TP_printk("index:%u  wakeup_capable_irqs:0x%x",
+				__entry->index, __entry->irqs)
+);
+
+TRACE_EVENT(mpm_wakeup_pending_irqs,
+
+	TP_PROTO(uint32_t index, uint32_t irqs),
+
+	TP_ARGS(index, irqs),
+
+	TP_STRUCT__entry(
+		__field(uint32_t, index)
+		__field(uint32_t, irqs)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->irqs = irqs;
+	),
+
+	TP_printk("index:%u wakeup_irqs:0x%x", __entry->index, __entry->irqs)
+);
+
+TRACE_EVENT(mpm_wakeup_time,
+
+	TP_PROTO(bool from_idle, u64 wakeup, u64 current_ticks),
+
+	TP_ARGS(from_idle, wakeup, current_ticks),
+
+	TP_STRUCT__entry(
+		__field(bool, from_idle)
+		__field(u64, wakeup)
+		__field(u64, current_ticks)
+	),
+
+	TP_fast_assign(
+		__entry->from_idle = from_idle;
+		__entry->wakeup = wakeup;
+		__entry->current_ticks = current_ticks;
+	),
+
+	TP_printk("idle:%d wakeup:0x%llx current:0x%llx", __entry->from_idle,
+				__entry->wakeup, __entry->current_ticks)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE mpm
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 147546e..0fe169c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -500,10 +500,10 @@ rxrpc_tx_points;
 #define E_(a, b)	{ a, b }
 
 TRACE_EVENT(rxrpc_local,
-	    TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+	    TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
 		     int usage, const void *where),
 
-	    TP_ARGS(local, op, usage, where),
+	    TP_ARGS(local_debug_id, op, usage, where),
 
 	    TP_STRUCT__entry(
 		    __field(unsigned int,	local		)
@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
 			     ),
 
 	    TP_fast_assign(
-		    __entry->local = local->debug_id;
+		    __entry->local = local_debug_id;
 		    __entry->op = op;
 		    __entry->usage = usage;
 		    __entry->where = where;
@@ -1073,7 +1073,7 @@ TRACE_EVENT(rxrpc_recvmsg,
 			     ),
 
 	    TP_fast_assign(
-		    __entry->call = call->debug_id;
+		    __entry->call = call ? call->debug_id : 0;
 		    __entry->why = why;
 		    __entry->seq = seq;
 		    __entry->offset = offset;
diff --git a/include/trace/events/trace_rpm_smd.h b/include/trace/events/trace_rpm_smd.h
new file mode 100644
index 0000000..242c4fa
--- /dev/null
+++ b/include/trace/events/trace_rpm_smd.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm_smd
+
+#if !defined(_TRACE_RPM_SMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPM_SMD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rpm_smd_ack_recvd,
+
+	TP_PROTO(unsigned int irq, unsigned int msg_id, int errno),
+
+	TP_ARGS(irq, msg_id, errno),
+
+	TP_STRUCT__entry(
+		__field(int, irq)
+		__field(int, msg_id)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+		__entry->msg_id = msg_id;
+		__entry->errno = errno;
+	),
+
+	TP_printk("ctx:%s msg_id:%d errno:%08x",
+		__entry->irq ? "noslp" : "sleep",
+		__entry->msg_id,
+		__entry->errno)
+);
+
+TRACE_EVENT(rpm_smd_interrupt_notify,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(char *, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy = dummy;
+	),
+
+	TP_printk("%s", __entry->dummy)
+);
+
+DECLARE_EVENT_CLASS(rpm_send_msg,
+
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+
+	TP_ARGS(msg_id, rsc_type, rsc_id),
+
+	TP_STRUCT__entry(
+		__field(u32, msg_id)
+		__field(u32, rsc_type)
+		__field(u32, rsc_id)
+		__array(char, name, 5)
+	),
+
+	TP_fast_assign(
+		__entry->msg_id = msg_id;
+		__entry->name[4] = 0;
+		__entry->rsc_type = rsc_type;
+		__entry->rsc_id = rsc_id;
+		memcpy(__entry->name, &rsc_type, sizeof(uint32_t));
+
+	),
+
+	TP_printk("msg_id:%d, rsc_type:0x%08x(%s), rsc_id:0x%08x",
+			__entry->msg_id,
+			__entry->rsc_type, __entry->name,
+			__entry->rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_active_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_rpm_smd
+#include <trace/define_trace.h>
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 1cf2bf9..10fecce 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -83,10 +83,10 @@ __get_update_sum(struct rq *rq, enum migrate_types migrate_type,
 
 TRACE_EVENT(sched_update_pred_demand,
 
-	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
+	TP_PROTO(struct task_struct *p, u32 runtime, int pct,
 		 unsigned int pred_demand),
 
-	TP_ARGS(rq, p, runtime, pct, pred_demand),
+	TP_ARGS(p, runtime, pct, pred_demand),
 
 	TP_STRUCT__entry(
 		__array(char,		comm, TASK_COMM_LEN)
@@ -106,7 +106,7 @@ TRACE_EVENT(sched_update_pred_demand,
 		__entry->pred_demand     = pred_demand;
 		memcpy(__entry->bucket, p->ravg.busy_buckets,
 					NUM_BUSY_BUCKETS * sizeof(u8));
-		__entry->cpu            = rq->cpu;
+		__entry->cpu            = task_cpu(p);
 	),
 
 	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
@@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov,
 
 	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
 		int freq_aggr, u64 load, int policy,
-		int big_task_rotation),
+		int big_task_rotation,
+		unsigned int user_hint),
 	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
-		big_task_rotation),
+		big_task_rotation, user_hint),
 
 	TP_STRUCT__entry(
 		__field(int,	cpu)
@@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov,
 		__field(u64,	pl)
 		__field(u64,    load)
 		__field(int,    big_task_rotation)
+		__field(unsigned int, user_hint)
 	),
 
 	TP_fast_assign(
@@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov,
 					rq->walt_stats.pred_demands_sum_scaled;
 		__entry->load		= load;
 		__entry->big_task_rotation = big_task_rotation;
+		__entry->user_hint = user_hint;
 	),
 
-	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
+	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u",
 		__entry->cpu, __entry->policy, __entry->ed_task_pid,
 		__entry->aggr_grp_load, __entry->freq_aggr,
 		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
 		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
-		__entry->big_task_rotation)
+		__entry->big_task_rotation, __entry->user_hint)
 );
 #endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 78ad139..8a9adc6 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -440,6 +440,7 @@ struct drm_msm_power_ctrl {
 #define DRM_EVENT_SDE_HW_RECOVERY 0X80000007
 #define DRM_EVENT_LTM_HIST 0X80000008
 #define DRM_EVENT_LTM_WB_PB 0X80000009
+#define DRM_EVENT_LTM_OFF 0X8000000A
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index a266e45..271bd96 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -480,6 +480,19 @@ struct ipa_indication_reg_req_msg_v01 {
 	 * message makes sense only when the  QMI_IPA_INDICATION_REGISTER_REQ
 	 * is being originated from the master driver.
 	 */
+
+	/* Optional */
+	/* BW CHANGE Indication */
+	uint8_t bw_change_ind_valid;
+	/* Must be set to true if bw_change_ind is being passed */
+	uint8_t bw_change_ind;
+	/*
+	 * If set to TRUE, this field indicates that the client wants to
+	 * receive indications for BW change information via
+	 * QMI_IPA_BW_CHANGE_INDICATION. Setting this field in the request
+	 * message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ
+	 * is being originated from the master driver.
+	 */
 };  /* Message */
 
 
@@ -2647,6 +2660,19 @@ struct ipa_remove_offload_connection_resp_msg_v01 {
 }; /* Message */
 #define IPA_REMOVE_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN 7
 
+struct ipa_bw_change_ind_msg_v01 {
+	/* optional */
+	/* Must be set to true if peak_bw_ul is being passed*/
+	uint8_t peak_bw_ul_valid;
+	/* Kbps */
+	uint32_t peak_bw_ul;
+	/* Must be set to true if peak_bw_dl is being passed*/
+	uint8_t peak_bw_dl_valid;
+	/* Kbps */
+	uint32_t peak_bw_dl;
+}; /* Message */
+#define IPA_BW_CHANGE_IND_MSG_V01_MAX_MSG_LEN 14
+
 /*Service Message Definition*/
 #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -2701,12 +2727,12 @@ struct ipa_remove_offload_connection_resp_msg_v01 {
 #define QMI_IPA_ADD_OFFLOAD_CONNECTION_RESP_V01 0x0041
 #define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01 0x0042
 #define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_RESP_V01 0x0042
-
+#define QMI_IPA_BW_CHANGE_INDICATION_V01 0x0044
 
 /* add for max length*/
 #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 162
 #define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
-#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 16
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 20
 #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 33705
 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
index 4941628..5ec88e7 100644
--- a/include/uapi/linux/isdn/capicmd.h
+++ b/include/uapi/linux/isdn/capicmd.h
@@ -16,6 +16,7 @@
 #define CAPI_MSG_BASELEN		8
 #define CAPI_DATA_B3_REQ_LEN		(CAPI_MSG_BASELEN+4+4+2+2+2)
 #define CAPI_DATA_B3_RESP_LEN		(CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN	(CAPI_MSG_BASELEN+4)
 
 /*----- CAPI commands -----*/
 #define CAPI_ALERT		    0x01
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 7b8c9e1..0f3cb13 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -65,7 +65,12 @@
 
 /* keyctl structures */
 struct keyctl_dh_params {
-	__s32 private;
+	union {
+#ifndef __cplusplus
+		__s32 private;
+#endif
+		__s32 priv;
+	};
 	__s32 prime;
 	__s32 base;
 };
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index a1953ab..3dcbefe 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -7,6 +7,7 @@
 #define _UAPI_MSM_IPA_H_
 
 #ifndef __KERNEL__
+#include <stdio.h>
 #include <stdint.h>
 #include <stddef.h>
 #include <sys/stat.h>
@@ -126,6 +127,8 @@
 #define IPA_IOCTL_FNR_COUNTER_ALLOC             74
 #define IPA_IOCTL_FNR_COUNTER_DEALLOC           75
 #define IPA_IOCTL_FNR_COUNTER_QUERY             76
+#define IPA_IOCTL_SET_FNR_COUNTER_INFO          77
+#define IPA_IOCTL_GET_NAT_IN_SRAM_INFO          78
 
 /**
  * max size of the header to be inserted
@@ -207,6 +210,7 @@
 #define IPA_FLT_L2TP_INNER_IP_TYPE	(1ul << 25)
 #define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
 #define IPA_FLT_IS_PURE_ACK		(1ul << 27)
+#define IPA_FLT_VLAN_ID			(1ul << 28)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
@@ -376,9 +380,13 @@ enum ipa_client_type {
 	IPA_CLIENT_MHI_PRIME_RMNET_CONS		= 99,
 
 	IPA_CLIENT_MHI_PRIME_DPL_PROD		= 100,
+	/* RESERVED CONS                        = 101, */
+
+	IPA_CLIENT_AQC_ETHERNET_PROD		= 102,
+	IPA_CLIENT_AQC_ETHERNET_CONS		= 103,
 };
 
-#define IPA_CLIENT_MAX (IPA_CLIENT_MHI_PRIME_DPL_PROD + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_AQC_ETHERNET_CONS + 1)
 
 #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
 #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
@@ -398,6 +406,8 @@ enum ipa_client_type {
 #define IPA_CLIENT_MHI_PRIME_RMNET_PROD IPA_CLIENT_MHI_PRIME_RMNET_PROD
 #define IPA_CLIENT_MHI_PRIME_RMNET_CONS IPA_CLIENT_MHI_PRIME_RMNET_CONS
 #define IPA_CLIENT_MHI_PRIME_DPL_PROD IPA_CLIENT_MHI_PRIME_DPL_PROD
+#define IPA_CLIENT_AQC_ETHERNET_PROD IPA_CLIENT_AQC_ETHERNET_PROD
+#define IPA_CLIENT_AQC_ETHERNET_CONS IPA_CLIENT_AQC_ETHERNET_CONS
 
 #define IPA_CLIENT_IS_APPS_CONS(client) \
 	((client) == IPA_CLIENT_APPS_LAN_CONS || \
@@ -510,7 +520,27 @@ enum ipa_client_type {
 	(IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client))
 
 /**
+ * The following is used to describe the types of memory NAT can
+ * reside in.
+ *
+ * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa3_nat_mem_in_as_str()
+ * BELOW.
+ */
+enum ipa3_nat_mem_in {
+	IPA_NAT_MEM_IN_DDR  = 0,
+	IPA_NAT_MEM_IN_SRAM = 1,
+
+	IPA_NAT_MEM_IN_MAX
+};
+
+#define IPA_VALID_NAT_MEM_IN(t) \
+	((t) >= IPA_NAT_MEM_IN_DDR && (t) < IPA_NAT_MEM_IN_MAX)
+
+/**
  * enum ipa_ip_type - Address family: IPv4 or IPv6
+ *
+ * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa_ip_type_as_str()
+ * BELOW.
  */
 enum ipa_ip_type {
 	IPA_IP_v4,
@@ -518,6 +548,9 @@ enum ipa_ip_type {
 	IPA_IP_MAX
 };
 
+#define VALID_IPA_IP_TYPE(t) \
+	((t) >= IPA_IP_v4 && (t) < IPA_IP_MAX)
+
 /**
  * enum ipa_rule_type - Type of routing or filtering rule
  * Hashable: Rule will be located at the hashable tables
@@ -783,6 +816,7 @@ enum ipa_hw_type {
  * @u.v6.src_addr_mask: src address mask
  * @u.v6.dst_addr: dst address val
  * @u.v6.dst_addr_mask: dst address mask
+ * @vlan_id: vlan id value
  */
 struct ipa_rule_attrib {
 	uint32_t attrib_mask;
@@ -823,6 +857,7 @@ struct ipa_rule_attrib {
 			uint32_t dst_addr_mask[4];
 		} v6;
 	} u;
+	uint16_t vlan_id;
 };
 
 /*! @brief The maximum number of Mask Equal 32 Eqns */
@@ -1063,6 +1098,8 @@ enum ipa_hdr_l2_type {
  * IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3
  * IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II
  * IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3
+ * IPA_HDR_PROC_ETHII_TO_ETHII_EX: Process Ethernet II to Ethernet II with
+ *	generic lengths of src and dst headers
  */
 enum ipa_hdr_proc_type {
 	IPA_HDR_PROC_NONE,
@@ -1071,9 +1108,10 @@ enum ipa_hdr_proc_type {
 	IPA_HDR_PROC_802_3_TO_ETHII,
 	IPA_HDR_PROC_802_3_TO_802_3,
 	IPA_HDR_PROC_L2TP_HEADER_ADD,
-	IPA_HDR_PROC_L2TP_HEADER_REMOVE
+	IPA_HDR_PROC_L2TP_HEADER_REMOVE,
+	IPA_HDR_PROC_ETHII_TO_ETHII_EX
 };
-#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1)
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_ETHII_TO_ETHII_EX + 1)
 
 /**
  * struct ipa_rt_rule - attributes of a routing rule
@@ -1228,6 +1266,20 @@ struct ipa_l2tp_hdr_proc_ctx_params {
 	enum ipa_client_type dst_pipe;
 };
 
+/**
+ * struct ipa_eth_II_to_eth_II_ex_procparams -
+ * @input_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ *	(in bytes) from the start of the input IP hdr
+ * @output_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ *	(in bytes) from the end of the template hdr
+ * @reserved: for future use
+ */
+struct ipa_eth_II_to_eth_II_ex_procparams {
+	uint32_t input_ethhdr_negative_offset : 8;
+	uint32_t output_ethhdr_negative_offset : 8;
+	uint32_t reserved : 16;
+};
+
 #define L2TP_USER_SPACE_SPECIFY_DST_PIPE
 
 /**
@@ -1236,6 +1288,7 @@ struct ipa_l2tp_hdr_proc_ctx_params {
  * @type: processing context type
  * @hdr_hdl: in parameter, handle to header
  * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
  * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
  * @status:	out parameter, status of header add operation,
  *		0 for success,
@@ -1247,6 +1300,7 @@ struct ipa_hdr_proc_ctx_add {
 	uint32_t proc_ctx_hdl;
 	int status;
 	struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+	struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
 };
 
 #define IPA_L2TP_HDR_PROC_SUPPORT
@@ -2073,9 +2127,11 @@ struct ipa_ioc_nat_ipv6ct_table_alloc {
  * @expn_table_entries: input parameter, ipv4 expansion rules table number of
  *                      entries
  * @ip_addr: input parameter, public ip address
+ * @mem_type: input parameter, type of memory the table resides in
+ * @focus_change: input parameter, are we moving to/from sram or ddr
  */
 struct ipa_ioc_v4_nat_init {
-	uint8_t tbl_index;
+	uint8_t  tbl_index;
 	uint32_t ipv4_rules_offset;
 	uint32_t expn_rules_offset;
 
@@ -2085,6 +2141,9 @@ struct ipa_ioc_v4_nat_init {
 	uint16_t table_entries;
 	uint16_t expn_table_entries;
 	uint32_t ip_addr;
+
+	uint8_t  mem_type;
+	uint8_t  focus_change;
 };
 
 /**
@@ -2117,9 +2176,11 @@ struct ipa_ioc_v4_nat_del {
 /**
  * struct ipa_ioc_nat_ipv6ct_table_del - NAT/IPv6CT table delete parameter
  * @table_index: input parameter, index of the table
+ * @mem_type: input parameter, type of memory the table resides in
  */
 struct ipa_ioc_nat_ipv6ct_table_del {
 	uint8_t table_index;
+	uint8_t mem_type;
 };
 
 /**
@@ -2143,11 +2204,12 @@ struct ipa_ioc_nat_dma_one {
  * struct ipa_ioc_nat_dma_cmd - To hold multiple nat/ipv6ct dma commands
  * @entries: number of dma commands in use
  * @dma: data pointer to the dma commands
+ * @mem_type: input parameter, type of memory the table resides in
  */
 struct ipa_ioc_nat_dma_cmd {
 	uint8_t entries;
+	uint8_t mem_type;
 	struct ipa_ioc_nat_dma_one dma[0];
-
 };
 
 /**
@@ -2523,6 +2585,24 @@ struct ipa_odl_modem_config {
 	__u8 config_status;
 };
 
+struct ipa_ioc_fnr_index_info {
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
+enum ipacm_hw_index_counter_type {
+	UL_HW = 0,
+	DL_HW,
+	DL_ALL,
+	UL_ALL,
+};
+
+enum ipacm_hw_index_counter_virtual_type {
+	UL_HW_CACHE = 0,
+	DL_HW_CACHE,
+	UL_WLAN_TX,
+	DL_WLAN_TX
+};
 
 /**
  *   actual IOCTLs supported by IPA driver
@@ -2773,6 +2853,14 @@ struct ipa_odl_modem_config {
 				IPA_IOCTL_FNR_COUNTER_QUERY, \
 				struct ipa_ioc_flt_rt_query)
 
+#define IPA_IOC_SET_FNR_COUNTER_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_FNR_COUNTER_INFO, \
+				struct ipa_ioc_fnr_index_info)
+
+#define IPA_IOC_GET_NAT_IN_SRAM_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
+				struct ipa_nat_in_sram_info)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
@@ -2862,6 +2950,21 @@ struct teth_ioc_aggr_params {
 	uint16_t lcid;
 };
 
+/**
+ * struct ipa_nat_in_sram_info - query for nat in sram particulars
+ * @sram_mem_available_for_nat: Amount SRAM available to fit nat table
+ * @nat_table_offset_into_mmap: Offset into mmap'd vm where table will be
+ * @best_nat_in_sram_size_rqst: The size to request for mmap
+ *
+ * The last two elements above are required to deal with situations
+ * where the SRAM's physical address and size don't play nice with
+ * mmap'ings page size and boundary attributes.
+ */
+struct ipa_nat_in_sram_info {
+	uint32_t sram_mem_available_for_nat;
+	uint32_t nat_table_offset_into_mmap;
+	uint32_t best_nat_in_sram_size_rqst;
+};
 
 #define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
 				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h
index 9309567..bd68c53 100644
--- a/include/uapi/linux/msm_npu.h
+++ b/include/uapi/linux/msm_npu.h
@@ -78,6 +78,8 @@
 #define MSM_NPU_PROP_ID_PERF_MODE_MAX (MSM_NPU_PROP_ID_START + 2)
 #define MSM_NPU_PROP_ID_DRV_VERSION (MSM_NPU_PROP_ID_START + 3)
 #define MSM_NPU_PROP_ID_HARDWARE_VERSION (MSM_NPU_PROP_ID_START + 4)
+#define MSM_NPU_PROP_ID_IPC_QUEUE_INFO (MSM_NPU_PROP_ID_START + 5)
+#define MSM_NPU_PROP_ID_DRV_FEATURE (MSM_NPU_PROP_ID_START + 6)
 
 #define MSM_NPU_FW_PROP_ID_START 0x1000
 #define MSM_NPU_PROP_ID_DCVS_MODE (MSM_NPU_FW_PROP_ID_START + 0)
@@ -86,6 +88,9 @@
 #define MSM_NPU_PROP_ID_HW_VERSION (MSM_NPU_FW_PROP_ID_START + 3)
 #define MSM_NPU_PROP_ID_FW_VERSION (MSM_NPU_FW_PROP_ID_START + 4)
 
+/* features supported by driver */
+#define MSM_NPU_FEATURE_MULTI_EXECUTE  0x1
+#define MSM_NPU_FEATURE_ASYNC_EXECUTE  0x2
 
 #define PROP_PARAM_MAX_SIZE 8
 
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d7..b5123ab 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
 	struct nf_acct	*nfacct;
 };
 
+struct xt_nfacct_match_info_v1 {
+	char		name[NFACCT_NAME_MAX];
+	struct nf_acct	*nfacct __attribute__((aligned(8)));
+};
+
 #endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index fc9dfce..2f938db 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -52,6 +52,11 @@
 #define NL80211_MULTICAST_GROUP_NAN		"nan"
 #define NL80211_MULTICAST_GROUP_TESTMODE	"testmode"
 
+#define NL80211_EDMG_BW_CONFIG_MIN	4
+#define NL80211_EDMG_BW_CONFIG_MAX	15
+#define NL80211_EDMG_CHANNELS_MIN	1
+#define NL80211_EDMG_CHANNELS_MAX	0x3c /* 0b00111100 */
+
 /**
  * DOC: Station handling
  *
@@ -2288,6 +2293,52 @@ enum nl80211_commands {
  *	association request when used with NL80211_CMD_NEW_STATION). Can be set
  *	only if %NL80211_STA_FLAG_WME is set.
  *
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ *      in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ *      measurement (FTM) responder functionality and containing parameters as
+ *      possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ *      statistics, see &enum nl80211_ftm_responder_stats.
+ *
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ *      if the attribute is not given no timeout is requested. Note that 0 is an
+ *      invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ *      data, uses nested attributes specified in
+ *      &enum nl80211_peer_measurement_attrs.
+ *      This is also used for capability advertisement in the wiphy information,
+ *      with the appropriate sub-attributes.
+ *
+ * @NL80211_ATTR_AIRTIME_WEIGHT: Station's weight when scheduled by the airtime
+ *      scheduler.
+ *
+ * @NL80211_ATTR_STA_TX_POWER_SETTING: Transmit power setting type (u8) for
+ *      station associated with the AP. See &enum nl80211_tx_power_setting for
+ *      possible values.
+ * @NL80211_ATTR_STA_TX_POWER: Transmit power level (s16) in dBm units. This
+ *      allows to set Tx power for a station. If this attribute is not included,
+ *      the default per-interface tx power setting will be overriding. Driver
+ *      should be picking up the lowest tx power, either tx power per-interface
+ *      or per-station.
+ *
+ * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It
+ *      is used with %NL80211_CMD_CONNECT to provide password for offloading
+ *      SAE authentication for WPA3-Personal networks.
+ *
+ * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support.
+ *
+ * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection
+ *      functionality.
+ *
+ * @NL80211_ATTR_WIPHY_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ *      channel(s) that are allowed to be used for EDMG transmissions.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. (u8 attribute)
+ * @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ *      the allowed channel bandwidth configurations. (u8 attribute)
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2729,6 +2780,27 @@ enum nl80211_attrs {
 
 	NL80211_ATTR_HE_CAPABILITY,
 
+	NL80211_ATTR_FTM_RESPONDER,
+
+	NL80211_ATTR_FTM_RESPONDER_STATS,
+
+	NL80211_ATTR_TIMEOUT,
+
+	NL80211_ATTR_PEER_MEASUREMENTS,
+
+	NL80211_ATTR_AIRTIME_WEIGHT,
+	NL80211_ATTR_STA_TX_POWER_SETTING,
+	NL80211_ATTR_STA_TX_POWER,
+
+	NL80211_ATTR_SAE_PASSWORD,
+
+	NL80211_ATTR_TWT_RESPONDER,
+
+	NL80211_ATTR_HE_OBSS_PD,
+
+	NL80211_ATTR_WIPHY_EDMG_CHANNELS,
+	NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -3320,6 +3392,12 @@ enum nl80211_band_iftype_attr {
  * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
  * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
  *	attributes from &enum nl80211_band_iftype_attr
+ * @NL80211_BAND_ATTR_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ *      channel(s) that are allowed to be used for EDMG transmissions.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251.
+ * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ *      the allowed channel bandwidth configurations.
+ *      Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
  * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
  * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
  */
@@ -3337,6 +3415,9 @@ enum nl80211_band_attr {
 	NL80211_BAND_ATTR_VHT_CAPA,
 	NL80211_BAND_ATTR_IFTYPE_DATA,
 
+	NL80211_BAND_ATTR_EDMG_CHANNELS,
+	NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+
 	/* keep last */
 	__NL80211_BAND_ATTR_AFTER_LAST,
 	NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
index c5b97fd..65c9e23 100644
--- a/include/uapi/linux/spcom.h
+++ b/include/uapi/linux/spcom.h
@@ -50,9 +50,14 @@ enum spcom_cmd_id {
 	SPCOM_CMD_UNLOCK_ION_BUF = 0x554C434B, /* "ULCK" = 0x4C4F434B */
 	SPCOM_CMD_FSSR		= 0x46535352, /* "FSSR" = 0x46535352 */
 	SPCOM_CMD_CREATE_CHANNEL = 0x43524554, /* "CRET" = 0x43524554 */
+
+#define SPCOM_CMD_ENABLE_SSR \
+	SPCOM_CMD_ENABLE_SSR
+	SPCOM_CMD_ENABLE_SSR     = 0x45535352,   /* "ESSR" =0x45535352*/
+
 #define SPCOM_CMD_RESTART_SP \
 	SPCOM_CMD_RESTART_SP
-	SPCOM_CMD_RESTART_SP    = 0x52535452, /* "RSTR" = 0x52535452 */
+	SPCOM_CMD_RESTART_SP         = 0x52535452,   /* "RSTR" = 0x52535452 */
 };
 
 /*
@@ -94,6 +99,8 @@ struct spcom_user_create_channel_command {
 struct spcom_user_restart_sp_command {
 	enum spcom_cmd_id cmd_id;
 	uint32_t arg;
+#define SPCOM_IS_UPDATED_SUPPORETED
+	uint32_t is_updated;
 } __packed;
 
 /* maximum ION buf for send-modfied-command */
diff --git a/include/uapi/linux/spss_utils.h b/include/uapi/linux/spss_utils.h
index acb7bceb..a5ff6ed 100644
--- a/include/uapi/linux/spss_utils.h
+++ b/include/uapi/linux/spss_utils.h
@@ -17,9 +17,11 @@
  */
 
 #define SPSS_IOC_MAGIC  'S'
+#define NUM_SPU_UEFI_APPS   3
 
 struct spss_ioc_set_fw_cmac {
 	uint32_t cmac[4];
+	uint32_t app_cmacs[NUM_SPU_UEFI_APPS][4];
 } __packed;
 
 #define SPSS_IOC_SET_FW_CMAC \
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 57b3338..b28995b 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -994,6 +994,14 @@ enum v4l2_mpeg_vidc_video_hevc_max_hier_coding_layer {
 #define V4L2_CID_MPEG_VIDC_CVP_FRAME_RATE \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 126)
 
+#define V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 127)
+enum v4l2_mpeg_vidc_video_roi_type {
+	V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_NONE = 0,
+	V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BIT = 1,
+	V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE = 2,
+};
+
 /*  Camera class control IDs */
 
 #define V4L2_CID_CAMERA_CLASS_BASE	(V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 2f58502..645ae6a 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -75,6 +75,8 @@
 
 #define CVP_KMD_HFI_FD_FRAME_CMD  (CVP_KMD_CMD_START + 16)
 
+#define CVP_KMD_UPDATE_POWER	(CVP_KMD_CMD_START + 17)
+
 #define CVP_KMD_SEND_CMD_PKT	(CVP_KMD_CMD_START + 64)
 
 #define CVP_KMD_RECEIVE_MSG_PKT	 (CVP_KMD_CMD_START + 65)
@@ -225,6 +227,24 @@ struct cvp_kmd_hfi_packet {
 #define CVP_KMD_PROP_SESSION_PRIORITY	4
 #define CVP_KMD_PROP_SESSION_SECURITY	5
 #define CVP_KMD_PROP_SESSION_DSPMASK	6
+
+#define CVP_KMD_PROP_PWR_FDU	0x10
+#define CVP_KMD_PROP_PWR_ICA	0x11
+#define CVP_KMD_PROP_PWR_OD	0x12
+#define CVP_KMD_PROP_PWR_MPU	0x13
+#define CVP_KMD_PROP_PWR_FW	0x14
+#define CVP_KMD_PROP_PWR_DDR	0x15
+#define CVP_KMD_PROP_PWR_SYSCACHE	0x16
+#define CVP_KMD_PROP_PWR_FDU_OP	0x17
+#define CVP_KMD_PROP_PWR_ICA_OP	0x18
+#define CVP_KMD_PROP_PWR_OD_OP	0x19
+#define CVP_KMD_PROP_PWR_MPU_OP	0x1A
+#define CVP_KMD_PROP_PWR_FW_OP	0x1B
+#define CVP_KMD_PROP_PWR_DDR_OP	0x1C
+#define CVP_KMD_PROP_PWR_SYSCACHE_OP	0x1D
+
+#define MAX_KMD_PROP_NUM	(CVP_KMD_PROP_PWR_SYSCACHE_OP + 1)
+
 struct cvp_kmd_sys_property {
 	unsigned int prop_type;
 	unsigned int data;
diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h
index 18fdddb..c121c36 100644
--- a/include/uapi/media/msm_vidc_utils.h
+++ b/include/uapi/media/msm_vidc_utils.h
@@ -155,6 +155,14 @@ struct msm_vidc_s3d_frame_packing_payload {
 	__u32 fpa_extension_flag;
 };
 
+struct msm_vidc_roi_qp_payload {
+	__s32 upper_qp_offset;
+	__s32 lower_qp_offset;
+	__u32 b_roi_info;
+	__u32 mbi_info_size;
+	__u32 data[1];
+};
+
 #define MSM_VIDC_EXTRADATA_ROI_QP 0x00000013
 struct msm_vidc_roi_deltaqp_payload {
 	__u32 b_roi_info; /*Enable/Disable*/
@@ -362,11 +370,6 @@ enum msm_vidc_cb_event_types {
 	MSM_VIDC_BIT_DEPTH,
 	MSM_VIDC_PIC_STRUCT,
 	MSM_VIDC_COLOR_SPACE,
-	MSM_VIDC_CROP_TOP,
-	MSM_VIDC_CROP_LEFT,
-	MSM_VIDC_CROP_HEIGHT,
-	MSM_VIDC_CROP_WIDTH,
-	MSM_VIDC_PROFILE,
-	MSM_VIDC_LEVEL,
+	MSM_VIDC_FW_MIN_COUNT,
 };
 #endif
diff --git a/include/uapi/misc/wigig_sensing_uapi.h b/include/uapi/misc/wigig_sensing_uapi.h
new file mode 100644
index 0000000..4fb8221
--- /dev/null
+++ b/include/uapi/misc/wigig_sensing_uapi.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#ifndef __WIGIG_SENSING_UAPI_H__
+#define __WIGIG_SENSING_UAPI_H__
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+enum wigig_sensing_mode {
+	WIGIG_SENSING_MODE_SEARCH             = 1,
+	WIGIG_SENSING_MODE_FACIAL_RECOGNITION = 2,
+	WIGIG_SENSING_MODE_GESTURE_DETECTION  = 3,
+	WIGIG_SENSING_MODE_STOP               = 7,
+	WIGIG_SENSING_MODE_CUSTOM             = 15
+};
+
+struct wigig_sensing_change_mode {
+	enum wigig_sensing_mode mode;
+	bool has_channel;
+	uint32_t channel;
+};
+
+enum wigig_sensing_event {
+	WIGIG_SENSING_EVENT_FW_READY,
+	WIGIG_SENSING_EVENT_RESET,
+};
+
+#define WIGIG_SENSING_IOC_MAGIC	'r'
+
+#define WIGIG_SENSING_IOCTL_SET_AUTO_RECOVERY      (0)
+#define WIGIG_SENSING_IOCTL_GET_MODE               (1)
+#define WIGIG_SENSING_IOCTL_CHANGE_MODE            (2)
+#define WIGIG_SENSING_IOCTL_CLEAR_DATA             (3)
+#define WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS (4)
+#define WIGIG_SENSING_IOCTL_GET_EVENT              (5)
+
+/**
+ * Set auto recovery, which means that the system will go back to search mode
+ *  after an error
+ */
+#define WIGIG_SENSING_IOC_SET_AUTO_RECOVERY \
+	_IO(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_SET_AUTO_RECOVERY)
+
+/**
+ * Get current system mode of operation *
+ *
+ * Returns struct wigig_sensing_mode
+ */
+#define WIGIG_SENSING_IOC_GET_MODE \
+	_IOR(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_GET_MODE, \
+	     sizeof(enum wigig_sensing_mode))
+
+/**
+ * Change mode of operation and optionaly channel
+ *
+ * Note: Before issuing a CHANGE_MODE operation, the application must stop
+ * reading data from the device node and clear any cached data. This comes to
+ * prevent loss of burst boundary synchronization in the application.
+ *
+ * Returns burst size
+ */
+#define WIGIG_SENSING_IOC_CHANGE_MODE \
+	_IOWR(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_CHANGE_MODE, \
+	      sizeof(struct wigig_sensing_change_mode))
+
+/**
+ * Clear data buffer
+ */
+#define WIGIG_SENSING_IOC_CLEAR_DATA \
+	_IO(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_CLEAR_DATA)
+
+/**
+ * Get number of bursts that where dropped due to data buffer overflow
+ */
+#define WIGIG_SENSING_IOC_GET_NUM_DROPPED_BURSTS \
+	_IOR(WIGIG_SENSING_IOC_MAGIC, \
+	     WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS, uint32_t)
+
+/**
+ * Get number of bursts that where dropped due to data buffer overflow
+ */
+#define WIGIG_SENSING_IOC_GET_EVENT \
+	_IOR(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_GET_EVENT, \
+	     sizeof(enum wigig_sensing_event))
+
+#endif /* ____WIGIG_SENSING_UAPI_H__ */
diff --git a/include/uapi/scsi/ufs/ufs.h b/include/uapi/scsi/ufs/ufs.h
index 8f906c9..22a0a7e 100644
--- a/include/uapi/scsi/ufs/ufs.h
+++ b/include/uapi/scsi/ufs/ufs.h
@@ -17,6 +17,9 @@ enum flag_idn {
 	QUERY_FLAG_IDN_BUSY_RTC                         = 0x09,
 	QUERY_FLAG_IDN_RESERVED3                        = 0x0A,
 	QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE    = 0x0B,
+	QUERY_FLAG_IDN_WB_EN                            = 0x0E,
+	QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN                 = 0x0F,
+	QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8     = 0x10,
 };
 
 /* Attribute idn for Query requests */
@@ -45,6 +48,10 @@ enum attr_idn {
 	QUERY_ATTR_IDN_PSA_STATE                = 0x15,
 	QUERY_ATTR_IDN_PSA_DATA_SIZE            = 0x16,
 	QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME	= 0x17,
+	QUERY_ATTR_IDN_WB_FLUSH_STATUS	        = 0x1C,
+	QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE       = 0x1D,
+	QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST    = 0x1E,
+	QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE        = 0x1F,
 };
 
 #define QUERY_ATTR_IDN_BOOT_LU_EN_MAX	0x02
diff --git a/init/initramfs.c b/init/initramfs.c
index edaf8a8..4143440 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -525,7 +525,7 @@ static void __init free_initrd(void)
 	unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
 	unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
 #endif
-	if (do_retain_initrd)
+	if (do_retain_initrd || !initrd_start)
 		goto skip;
 
 #ifdef CONFIG_KEXEC_CORE
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 36be400..afee05e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -621,6 +621,14 @@ static void bpf_jit_uncharge_modmem(u32 pages)
 	atomic_long_sub(pages, &bpf_jit_current);
 }
 
+#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
+bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
+{
+	return true;
+}
+EXPORT_SYMBOL(arch_bpf_jit_check_func);
+#endif
+
 struct bpf_binary_header *
 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 		     unsigned int alignment,
@@ -647,6 +655,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 	/* Fill space with illegal/arch-dep instructions. */
 	bpf_fill_ill_insns(hdr, size);
 
+	bpf_jit_set_header_magic(hdr);
 	hdr->pages = pages;
 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 		     PAGE_SIZE - sizeof(*hdr));
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 118e3a8..6e544e3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1454,19 +1454,25 @@ static int bpf_prog_load(union bpf_attr *attr)
 	if (err)
 		goto free_used_maps;
 
-	err = bpf_prog_new_fd(prog);
-	if (err < 0) {
-		/* failed to allocate fd.
-		 * bpf_prog_put() is needed because the above
-		 * bpf_prog_alloc_id() has published the prog
-		 * to the userspace and the userspace may
-		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
-		 */
-		bpf_prog_put(prog);
-		return err;
-	}
-
+	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
+	 * effectively publicly exposed. However, retrieving via
+	 * bpf_prog_get_fd_by_id() will take another reference,
+	 * therefore it cannot be gone underneath us.
+	 *
+	 * Only for the time /after/ successful bpf_prog_new_fd()
+	 * and before returning to userspace, we might just hold
+	 * one reference and any parallel close on that fd could
+	 * rip everything out. Hence, below notifications must
+	 * happen before bpf_prog_new_fd().
+	 *
+	 * Also, any failure handling from this point onwards must
+	 * be using bpf_prog_put() given the program is exposed.
+	 */
 	bpf_prog_kallsyms_add(prog);
+
+	err = bpf_prog_new_fd(prog);
+	if (err < 0)
+		bpf_prog_put(prog);
 	return err;
 
 free_used_maps:
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8..57fb4dc 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/binfmts.h>
+#include <linux/elfcore.h>
 
 Elf_Half __weak elf_core_extra_phdrs(void)
 {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6d7d708..18d8f2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6986,7 +6986,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
 	struct perf_event *event = info;
-	struct pmu *pmu = event->pmu;
+	struct pmu *pmu = event->ctx->pmu;
 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 	struct remote_output ro = {
 		.rb	= event->rb,
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index d6b5618..bf3f2d3 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -426,7 +426,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-	struct arch_hw_breakpoint hw;
+	struct arch_hw_breakpoint hw = { };
 	int err;
 
 	err = reserve_bp_slot(bp);
@@ -474,7 +474,7 @@ int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
 			        bool check)
 {
-	struct arch_hw_breakpoint hw;
+	struct arch_hw_breakpoint hw = { };
 	int err;
 
 	err = hw_breakpoint_parse(bp, attr, &hw);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6a5d06d..55ec95a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2670,7 +2670,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
 	struct ctl_table t;
 	int ret;
 	int threads = max_threads;
-	int min = MIN_THREADS;
+	int min = 1;
 	int max = MAX_THREADS;
 
 	t = *table;
@@ -2682,7 +2682,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
 	if (ret || !write)
 		return ret;
 
-	set_max_threads(threads);
+	max_threads = threads;
 
 	return 0;
 }
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 8e009cee..26814a1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
 	}
 }
 
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+	/*
+	 * If irq_sysfs_init() has not yet been invoked (early boot), then
+	 * irq_kobj_base is NULL and the descriptor was never added.
+	 * kobject_del() complains about a object with no parent, so make
+	 * it conditional.
+	 */
+	if (irq_kobj_base)
+		kobject_del(&desc->kobj);
+}
+
 static int __init irq_sysfs_init(void)
 {
 	struct irq_desc *desc;
@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
 };
 
 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
 
 #endif /* CONFIG_SYSFS */
 
@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
 	 * The sysfs entry must be serialized against a concurrent
 	 * irq_sysfs_init() as well.
 	 */
-	kobject_del(&desc->kobj);
+	irq_sysfs_del(desc);
 	delete_irq_desc(irq);
 
 	/*
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 95414ad..98c04ca5 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
 		irq = find_first_bit(irqs_resend, nr_irqs);
 		clear_bit(irq, irqs_resend);
 		desc = irq_to_desc(irq);
+		if (!desc)
+			continue;
 		local_irq_disable();
 		desc->handle_irq(desc);
 		local_irq_enable();
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 672ed40..181db01 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -262,8 +262,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
 {
 	char namebuf[KSYM_NAME_LEN];
 
-	if (is_ksym_addr(addr))
-		return !!get_symbol_pos(addr, symbolsize, offset);
+	if (is_ksym_addr(addr)) {
+		get_symbol_pos(addr, symbolsize, offset);
+		return 1;
+	}
 	return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
 	       !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
 }
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 23a83a4..f50b90d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -301,6 +301,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
 	struct page *pages;
 
+	if (fatal_signal_pending(current))
+		return NULL;
 	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 	if (pages) {
 		unsigned int count, i;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 29ff663..b8efca9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+	lockdep_assert_held(&text_mutex);
 	/*
 	 * The optimization/unoptimization refers online_cpus via
 	 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
 	    list_empty(&optimizing_list))
 		return;
 
-	mutex_lock(&text_mutex);
 	arch_optimize_kprobes(&optimizing_list);
-	mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
+	lockdep_assert_held(&text_mutex);
 	/* See comment in do_optimize_kprobes() */
 	lockdep_assert_cpus_held();
 
@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
 	if (list_empty(&unoptimizing_list))
 		return;
 
-	mutex_lock(&text_mutex);
 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 	/* Loop free_list for disarming */
 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
 		} else
 			list_del_init(&op->list);
 	}
-	mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
 	mutex_lock(&kprobe_mutex);
 	cpus_read_lock();
+	mutex_lock(&text_mutex);
 	/* Lock modules while optimizing kprobes */
 	mutex_lock(&module_mutex);
 
@@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
 	do_free_cleaned_kprobes();
 
 	mutex_unlock(&module_mutex);
+	mutex_unlock(&text_mutex);
 	cpus_read_unlock();
 	mutex_unlock(&kprobe_mutex);
 
@@ -1505,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
 	/* Ensure it is not in reserved area nor out of text */
 	if (!kernel_text_address((unsigned long) p->addr) ||
 	    within_kprobe_blacklist((unsigned long) p->addr) ||
-	    jump_label_text_reserved(p->addr, p->addr)) {
+	    jump_label_text_reserved(p->addr, p->addr) ||
+	    find_bug((unsigned long)p->addr)) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 722c27c..a1250ad 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -1027,6 +1027,7 @@ int klp_module_coming(struct module *mod)
 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
 		patch->mod->name, obj->mod->name, obj->mod->name);
 	mod->klp_alive = false;
+	obj->mod = NULL;
 	klp_cleanup_module_patches_limited(mod, patch);
 	mutex_unlock(&klp_mutex);
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e810e8c..1e272f6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3605,6 +3605,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 	unsigned int depth;
 	int i;
 
+	if (unlikely(!debug_locks))
+		return 0;
+
 	depth = curr->lockdep_depth;
 	/*
 	 * This function is about (re)setting the class of a held lock,
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 5a0cf5f..82104d3 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -271,7 +271,7 @@ pv_wait_early(struct pv_node *prev, int loop)
 	if ((loop & PV_PREV_CHECK_MASK) != 0)
 		return false;
 
-	return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
+	return READ_ONCE(prev->state) != vcpu_running;
 }
 
 /*
diff --git a/kernel/module.c b/kernel/module.c
index 8644c18..e27ec1a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -76,14 +76,9 @@
 
 /*
  * Modules' sections will be aligned on page boundaries
- * to ensure complete separation of code and data, but
- * only when CONFIG_STRICT_MODULE_RWX=y
+ * to ensure complete separation of code and data
  */
-#ifdef CONFIG_STRICT_MODULE_RWX
 # define debug_align(X) ALIGN(X, PAGE_SIZE)
-#else
-# define debug_align(X) (X)
-#endif
 
 /* If this is set, the section belongs in the init part of the module */
 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
@@ -1699,6 +1694,8 @@ static int add_usage_links(struct module *mod)
 	return ret;
 }
 
+static void module_remove_modinfo_attrs(struct module *mod, int end);
+
 static int module_add_modinfo_attrs(struct module *mod)
 {
 	struct module_attribute *attr;
@@ -1713,24 +1710,34 @@ static int module_add_modinfo_attrs(struct module *mod)
 		return -ENOMEM;
 
 	temp_attr = mod->modinfo_attrs;
-	for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
+	for (i = 0; (attr = modinfo_attrs[i]); i++) {
 		if (!attr->test || attr->test(mod)) {
 			memcpy(temp_attr, attr, sizeof(*temp_attr));
 			sysfs_attr_init(&temp_attr->attr);
 			error = sysfs_create_file(&mod->mkobj.kobj,
 					&temp_attr->attr);
+			if (error)
+				goto error_out;
 			++temp_attr;
 		}
 	}
+
+	return 0;
+
+error_out:
+	if (i > 0)
+		module_remove_modinfo_attrs(mod, --i);
 	return error;
 }
 
-static void module_remove_modinfo_attrs(struct module *mod)
+static void module_remove_modinfo_attrs(struct module *mod, int end)
 {
 	struct module_attribute *attr;
 	int i;
 
 	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
+		if (end >= 0 && i > end)
+			break;
 		/* pick a field to test for end of list */
 		if (!attr->attr.name)
 			break;
@@ -1818,7 +1825,7 @@ static int mod_sysfs_setup(struct module *mod,
 	return 0;
 
 out_unreg_modinfo_attrs:
-	module_remove_modinfo_attrs(mod);
+	module_remove_modinfo_attrs(mod, -1);
 out_unreg_param:
 	module_param_sysfs_remove(mod);
 out_unreg_holders:
@@ -1854,7 +1861,7 @@ static void mod_sysfs_fini(struct module *mod)
 {
 }
 
-static void module_remove_modinfo_attrs(struct module *mod)
+static void module_remove_modinfo_attrs(struct module *mod, int end)
 {
 }
 
@@ -1870,14 +1877,14 @@ static void init_param_lock(struct module *mod)
 static void mod_sysfs_teardown(struct module *mod)
 {
 	del_usage_links(mod);
-	module_remove_modinfo_attrs(mod);
+	module_remove_modinfo_attrs(mod, -1);
 	module_param_sysfs_remove(mod);
 	kobject_put(mod->mkobj.drivers_dir);
 	kobject_put(mod->holders_dir);
 	mod_sysfs_fini(mod);
 }
 
-#ifdef CONFIG_STRICT_MODULE_RWX
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
 /*
  * LKM RO/NX protection: protect module's text/ro-data
  * from modification and any data from execution.
@@ -1900,6 +1907,7 @@ static void frob_text(const struct module_layout *layout,
 		   layout->text_size >> PAGE_SHIFT);
 }
 
+#ifdef CONFIG_STRICT_MODULE_RWX
 static void frob_rodata(const struct module_layout *layout,
 			int (*set_memory)(unsigned long start, int num_pages))
 {
@@ -1949,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool after_init)
 		return;
 
 	frob_text(&mod->core_layout, set_memory_ro);
-	frob_text(&mod->core_layout, set_memory_x);
 
 	frob_rodata(&mod->core_layout, set_memory_ro);
-
 	frob_text(&mod->init_layout, set_memory_ro);
-	frob_text(&mod->init_layout, set_memory_x);
-
 	frob_rodata(&mod->init_layout, set_memory_ro);
 
 	if (after_init)
@@ -2036,11 +2040,23 @@ static void disable_ro_nx(const struct module_layout *layout)
 	frob_writable_data(layout, set_memory_x);
 }
 
-#else
+#else /* !CONFIG_STRICT_MODULE_RWX */
 static void disable_ro_nx(const struct module_layout *layout) { }
 static void module_enable_nx(const struct module *mod) { }
 static void module_disable_nx(const struct module *mod) { }
-#endif
+#endif /*  CONFIG_STRICT_MODULE_RWX */
+
+static void module_enable_x(const struct module *mod)
+{
+	frob_text(&mod->core_layout, set_memory_x);
+	frob_text(&mod->init_layout, set_memory_x);
+}
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+static void disable_ro_nx(const struct module_layout *layout) { }
+static void module_enable_nx(const struct module *mod) { }
+static void module_disable_nx(const struct module *mod) { }
+static void module_enable_x(const struct module *mod) { }
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
 
 #ifdef CONFIG_LIVEPATCH
 /*
@@ -2187,6 +2203,10 @@ static void free_module(struct module *mod)
 
 	/* Finally, free the core (containing the module structure) */
 	disable_ro_nx(&mod->core_layout);
+#ifdef CONFIG_DEBUG_MODULE_LOAD_INFO
+	pr_info("Unloaded %s: module core layout, start: 0x%pK size: 0x%x\n",
+		mod->name, mod->core_layout.base, mod->core_layout.size);
+#endif
 	module_memfree(mod->core_layout.base);
 }
 
@@ -3608,6 +3628,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
 
 	module_enable_ro(mod, false);
 	module_enable_nx(mod);
+	module_enable_x(mod);
 
 	/* Mark state as coming so strong_try_module_get() ignores us,
 	 * but kallsyms etc. can see us. */
diff --git a/kernel/panic.c b/kernel/panic.c
index 7241892..9bc9c23 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -150,6 +150,7 @@ void panic(const char *fmt, ...)
 	 * after setting panic_cpu) from invoking panic() again.
 	 */
 	local_irq_disable();
+	preempt_disable_notrace();
 
 	/*
 	 * It's possible to come here directly from a panic-assertion and
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index cfd3f4c..32a55ec 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -266,13 +266,21 @@ static const struct file_operations pm_qos_debug_fops = {
 	.release        = single_release,
 };
 
-static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
 		struct cpumask *cpus)
 {
 	struct pm_qos_request *req = NULL;
 	int cpu;
 	s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
 
+	/*
+	 * pm_qos_constraints can be from different classes,
+	 * Update cpumask only only for CPU_DMA_LATENCY classes
+	 */
+
+	if (c != pm_qos_array[PM_QOS_CPU_DMA_LATENCY]->constraints)
+		return -EINVAL;
+
 	plist_for_each_entry(req, &c->list, node) {
 		for_each_cpu(cpu, &req->cpus_affine) {
 			switch (c->type) {
@@ -295,6 +303,8 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
 			cpumask_set_cpu(cpu, cpus);
 		c->target_per_cpu[cpu] = qos_val[cpu];
 	}
+
+	return 0;
 }
 
 /**
@@ -347,7 +357,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 	curr_value = pm_qos_get_value(c);
 	cpumask_clear(&cpus);
 	pm_qos_set_value(c, curr_value);
-	pm_qos_set_value_for_cpus(c, &cpus);
+	ret = pm_qos_set_value_for_cpus(c, &cpus);
 
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
@@ -358,7 +368,8 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 	 * to update the new qos restriction for the cores
 	 */
 
-	if (!cpumask_empty(&cpus)) {
+	if (!cpumask_empty(&cpus) ||
+	   (ret && prev_value != curr_value)) {
 		ret = 1;
 		if (c->notifiers)
 			blocking_notifier_call_chain(c->notifiers,
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index add83b8..209434d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3216,7 +3216,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
 	/* move first record forward until length fits into the buffer */
 	seq = dumper->cur_seq;
 	idx = dumper->cur_idx;
-	while (l > size && seq < dumper->next_seq) {
+	while (l >= size && seq < dumper->next_seq) {
 		struct printk_log *msg = log_from_idx(idx);
 
 		l -= msg_print_text(msg, true, NULL, 0);
diff --git a/kernel/resource.c b/kernel/resource.c
index 30e1bc6..bce773c 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -318,24 +318,27 @@ int release_resource(struct resource *old)
 
 EXPORT_SYMBOL(release_resource);
 
-/*
- * Finds the lowest iomem resource existing within [res->start.res->end).
- * The caller must specify res->start, res->end, res->flags, and optionally
- * desc.  If found, returns 0, res is overwritten, if not found, returns -1.
- * This function walks the whole tree and not just first level children until
- * and unless first_level_children_only is true.
+/**
+ * Finds the lowest iomem resource that covers part of [start..end].  The
+ * caller must specify start, end, flags, and desc (which may be
+ * IORES_DESC_NONE).
+ *
+ * If a resource is found, returns 0 and *res is overwritten with the part
+ * of the resource that's within [start..end]; if none is found, returns
+ * -ENODEV.  Returns -EINVAL for invalid parameters.
+ *
+ * This function walks the whole tree and not just first level children
+ * unless @first_level_children_only is true.
  */
-static int find_next_iomem_res(struct resource *res, unsigned long desc,
-			       bool first_level_children_only)
+static int find_next_iomem_res(resource_size_t start, resource_size_t end,
+			       unsigned long flags, unsigned long desc,
+			       bool first_level_children_only,
+			       struct resource *res)
 {
-	resource_size_t start, end;
 	struct resource *p;
 	bool sibling_only = false;
 
 	BUG_ON(!res);
-
-	start = res->start;
-	end = res->end;
 	BUG_ON(start >= end);
 
 	if (first_level_children_only)
@@ -344,7 +347,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
 	read_lock(&resource_lock);
 
 	for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
-		if ((p->flags & res->flags) != res->flags)
+		if ((p->flags & flags) != flags)
 			continue;
 		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
 			continue;
@@ -352,39 +355,38 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
 			p = NULL;
 			break;
 		}
-		if ((p->end >= start) && (p->start < end))
+		if ((p->end >= start) && (p->start <= end))
 			break;
 	}
 
+	if (p) {
+		/* copy data */
+		res->start = max(start, p->start);
+		res->end = min(end, p->end);
+		res->flags = p->flags;
+		res->desc = p->desc;
+	}
+
 	read_unlock(&resource_lock);
-	if (!p)
-		return -1;
-	/* copy data */
-	if (res->start < p->start)
-		res->start = p->start;
-	if (res->end > p->end)
-		res->end = p->end;
-	res->flags = p->flags;
-	res->desc = p->desc;
-	return 0;
+	return p ? 0 : -ENODEV;
 }
 
-static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
-				 bool first_level_children_only,
-				 void *arg,
+static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
+				 unsigned long flags, unsigned long desc,
+				 bool first_level_children_only, void *arg,
 				 int (*func)(struct resource *, void *))
 {
-	u64 orig_end = res->end;
+	struct resource res;
 	int ret = -1;
 
-	while ((res->start < res->end) &&
-	       !find_next_iomem_res(res, desc, first_level_children_only)) {
-		ret = (*func)(res, arg);
+	while (start < end &&
+	       !find_next_iomem_res(start, end, flags, desc,
+				    first_level_children_only, &res)) {
+		ret = (*func)(&res, arg);
 		if (ret)
 			break;
 
-		res->start = res->end + 1;
-		res->end = orig_end;
+		start = res.end + 1;
 	}
 
 	return ret;
@@ -407,13 +409,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
 		u64 end, void *arg, int (*func)(struct resource *, void *))
 {
-	struct resource res;
-
-	res.start = start;
-	res.end = end;
-	res.flags = flags;
-
-	return __walk_iomem_res_desc(&res, desc, false, arg, func);
+	return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
 }
 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 
@@ -427,13 +423,9 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 int walk_system_ram_res(u64 start, u64 end, void *arg,
 				int (*func)(struct resource *, void *))
 {
-	struct resource res;
+	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-	res.start = start;
-	res.end = end;
-	res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-
-	return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
 				     arg, func);
 }
 
@@ -444,13 +436,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
 int walk_mem_res(u64 start, u64 end, void *arg,
 		 int (*func)(struct resource *, void *))
 {
-	struct resource res;
+	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
-	res.start = start;
-	res.end = end;
-	res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-
-	return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
 				     arg, func);
 }
 
@@ -464,25 +452,25 @@ int walk_mem_res(u64 start, u64 end, void *arg,
 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 		void *arg, int (*func)(unsigned long, unsigned long, void *))
 {
+	resource_size_t start, end;
+	unsigned long flags;
 	struct resource res;
 	unsigned long pfn, end_pfn;
-	u64 orig_end;
 	int ret = -1;
 
-	res.start = (u64) start_pfn << PAGE_SHIFT;
-	res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
-	res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-	orig_end = res.end;
-	while ((res.start < res.end) &&
-		(find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
+	start = (u64) start_pfn << PAGE_SHIFT;
+	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+	while (start < end &&
+	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
+				    true, &res)) {
 		pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		end_pfn = (res.end + 1) >> PAGE_SHIFT;
 		if (end_pfn > pfn)
 			ret = (*func)(pfn, end_pfn - pfn, arg);
 		if (ret)
 			break;
-		res.start = res.end + 1;
-		res.end = orig_end;
+		start = res.end + 1;
 	}
 	return ret;
 }
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 9512fd7..6742017 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -19,8 +19,8 @@
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle.o fair.o rt.o deadline.o
 obj-y += wait.o wait_bit.o swait.o completion.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o sched_avg.o
-obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
+obj-$(CONFIG_SCHED_WALT) += walt.o boost.o sched_avg.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5a29adf..1099008 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p)
 
 	rcu_read_lock();
 	grp = task_related_thread_group(p);
-	if (update_preferred_cluster(grp, p, old_load))
+	if (update_preferred_cluster(grp, p, old_load, false))
 		set_preferred_cluster(grp);
 	rcu_read_unlock();
 }
@@ -3203,7 +3203,7 @@ void scheduler_tick(void)
 
 	rcu_read_lock();
 	grp = task_related_thread_group(curr);
-	if (update_preferred_cluster(grp, curr, old_load))
+	if (update_preferred_cluster(grp, curr, old_load, true))
 		set_preferred_cluster(grp);
 	rcu_read_unlock();
 
@@ -3215,8 +3215,36 @@ void scheduler_tick(void)
 
 struct tick_work {
 	int			cpu;
+	atomic_t		state;
 	struct delayed_work	work;
 };
+/* Values for ->state, see diagram below. */
+#define TICK_SCHED_REMOTE_OFFLINE	0
+#define TICK_SCHED_REMOTE_OFFLINING	1
+#define TICK_SCHED_REMOTE_RUNNING	2
+
+/*
+ * State diagram for ->state:
+ *
+ *
+ *          TICK_SCHED_REMOTE_OFFLINE
+ *                    |   ^
+ *                    |   |
+ *                    |   | sched_tick_remote()
+ *                    |   |
+ *                    |   |
+ *                    +--TICK_SCHED_REMOTE_OFFLINING
+ *                    |   ^
+ *                    |   |
+ * sched_tick_start() |   | sched_tick_stop()
+ *                    |   |
+ *                    V   |
+ *          TICK_SCHED_REMOTE_RUNNING
+ *
+ *
+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
+ * and sched_tick_start() are happy to leave the state in RUNNING.
+ */
 
 static struct tick_work __percpu *tick_work_cpu;
 
@@ -3229,6 +3257,7 @@ static void sched_tick_remote(struct work_struct *work)
 	struct task_struct *curr;
 	struct rq_flags rf;
 	u64 delta;
+	int os;
 
 	/*
 	 * Handle the tick only if it appears the remote CPU is running in full
@@ -3242,7 +3271,7 @@ static void sched_tick_remote(struct work_struct *work)
 
 	rq_lock_irq(rq, &rf);
 	curr = rq->curr;
-	if (is_idle_task(curr))
+	if (is_idle_task(curr) || cpu_is_offline(cpu))
 		goto out_unlock;
 
 	update_rq_clock(rq);
@@ -3262,13 +3291,18 @@ static void sched_tick_remote(struct work_struct *work)
 	/*
 	 * Run the remote tick once per second (1Hz). This arbitrary
 	 * frequency is large enough to avoid overload but short enough
-	 * to keep scheduler internal stats reasonably up to date.
+	 * to keep scheduler internal stats reasonably up to date.  But
+	 * first update state to reflect hotplug activity if required.
 	 */
-	queue_delayed_work(system_unbound_wq, dwork, HZ);
+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
+	if (os == TICK_SCHED_REMOTE_RUNNING)
+		queue_delayed_work(system_unbound_wq, dwork, HZ);
 }
 
 static void sched_tick_start(int cpu)
 {
+	int os;
 	struct tick_work *twork;
 
 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
@@ -3277,15 +3311,20 @@ static void sched_tick_start(int cpu)
 	WARN_ON_ONCE(!tick_work_cpu);
 
 	twork = per_cpu_ptr(tick_work_cpu, cpu);
-	twork->cpu = cpu;
-	INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-	queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
+		twork->cpu = cpu;
+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+	}
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 static void sched_tick_stop(int cpu)
 {
 	struct tick_work *twork;
+	int os;
 
 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
 		return;
@@ -3293,7 +3332,10 @@ static void sched_tick_stop(int cpu)
 	WARN_ON_ONCE(!tick_work_cpu);
 
 	twork = per_cpu_ptr(tick_work_cpu, cpu);
-	cancel_delayed_work_sync(&twork->work);
+	/* There cannot be competing actions, but don't rely on stop-machine. */
+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+	/* Don't cancel, as this would mess up the state machine. */
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -3301,7 +3343,6 @@ int __init sched_tick_offload_init(void)
 {
 	tick_work_cpu = alloc_percpu(struct tick_work);
 	BUG_ON(!tick_work_cpu);
-
 	return 0;
 }
 
@@ -3325,13 +3366,29 @@ struct preempt_store {
 };
 
 DEFINE_PER_CPU(struct preempt_store, the_ps);
+
+/*
+ * This is only called from __schedule() upon context switch.
+ *
+ * schedule() calls __schedule() with preemption disabled.
+ * if we had entered idle and exiting idle now, reset the preemption
+ * tracking otherwise we may think preemption is disabled the whole time
+ * when the non idle task re-enables the preemption in schedule().
+ */
+static inline void preempt_latency_reset(void)
+{
+	if (is_idle_task(this_rq()->curr))
+		this_cpu_ptr(&the_ps)->ts = 0;
+}
+
 /*
  * If the value passed in is equal to the current preempt count
  * then we just disabled preemption. Start timing the latency.
  */
 static inline void preempt_latency_start(int val)
 {
-	struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id());
+	int cpu = raw_smp_processor_id();
+	struct preempt_store *ps = &per_cpu(the_ps, cpu);
 
 	if (preempt_count() == val) {
 		unsigned long ip = get_lock_parent_ip();
@@ -3380,7 +3437,7 @@ static inline void preempt_latency_stop(int val)
 	if (preempt_count() == val) {
 		struct preempt_store *ps = &per_cpu(the_ps,
 				raw_smp_processor_id());
-		u64 delta = sched_clock() - ps->ts;
+		u64 delta = ps->ts ? (sched_clock() - ps->ts) : 0;
 
 		/*
 		 * Trace preempt disable stack if preemption
@@ -3419,6 +3476,7 @@ NOKPROBE_SYMBOL(preempt_count_sub);
 #else
 static inline void preempt_latency_start(int val) { }
 static inline void preempt_latency_stop(int val) { }
+static inline void preempt_latency_reset(void) { }
 #endif
 
 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
@@ -3642,6 +3700,7 @@ static void __sched notrace __schedule(bool preempt)
 		if (!prev->on_rq)
 			prev->last_sleep_ts = wallclock;
 
+		preempt_latency_reset();
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
 		rq->nr_switches++;
@@ -6865,11 +6924,12 @@ static void sched_update_down_migrate_values(int cap_margin_levels,
 
 	if (cap_margin_levels > 1) {
 		/*
-		 * Skip first cluster as down migrate value isn't needed
+		 * Skip last cluster as down migrate value isn't needed.
+		 * Because there is no downmigration to it.
 		 */
 		for (i = 0; i < cap_margin_levels; i++)
-			if (cluster_cpus[i+1])
-				for_each_cpu(cpu, cluster_cpus[i+1])
+			if (cluster_cpus[i])
+				for_each_cpu(cpu, cluster_cpus[i])
 					sched_capacity_margin_down[cpu] =
 					sysctl_sched_capacity_margin_down[i];
 	} else {
@@ -7179,10 +7239,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 #ifdef CONFIG_RT_GROUP_SCHED
 		if (!sched_rt_can_attach(css_tg(css), task))
 			return -EINVAL;
-#else
-		/* We don't support RT-tasks being in separate groups */
-		if (task->sched_class != &fair_sched_class)
-			return -EINVAL;
 #endif
 		/*
 		 * Serialize against wake_up_new_task() such that if its
@@ -7800,7 +7856,7 @@ const u32 sched_prio_to_wmult[40] = {
  */
 int set_task_boost(int boost, u64 period)
 {
-	if (boost < 0 || boost > 2)
+	if (boost < TASK_BOOST_NONE || boost >= TASK_BOOST_END)
 		return -EINVAL;
 	if (boost) {
 		current->boost = boost;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 31decf0..d541d5e 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -56,6 +56,7 @@ struct sugov_policy {
 	struct task_struct	*thread;
 	bool			work_in_progress;
 
+	bool			limits_changed;
 	bool			need_freq_update;
 };
 
@@ -113,8 +114,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 	    !cpufreq_this_cpu_can_update(sg_policy->policy))
 		return false;
 
-	if (unlikely(sg_policy->need_freq_update))
+	if (unlikely(sg_policy->limits_changed)) {
+		sg_policy->limits_changed = false;
+		sg_policy->need_freq_update = true;
 		return true;
+	}
 
 	/* No need to recalculate next freq for min_rate_limit_us
 	 * at least. However we might still decide to further rate
@@ -135,6 +139,15 @@ static inline bool use_pelt(void)
 #endif
 }
 
+static inline bool conservative_pl(void)
+{
+#ifdef CONFIG_SCHED_WALT
+	return sysctl_sched_conservative_pl;
+#else
+	return false;
+#endif
+}
+
 static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
 				     unsigned int next_freq)
 {
@@ -239,8 +252,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
 		return;
 
 	policy->cur = next_freq;
-	for_each_cpu(cpu, policy->cpus)
-		trace_cpu_frequency(next_freq, cpu);
+
+	if (trace_cpu_frequency_enabled()) {
+		for_each_cpu(cpu, policy->cpus)
+			trace_cpu_frequency(next_freq, cpu);
+	}
 }
 
 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -567,6 +583,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 	unsigned long nl = sg_cpu->walt_load.nl;
 	unsigned long cpu_util = sg_cpu->util;
 	bool is_hiload;
+	unsigned long pl = sg_cpu->walt_load.pl;
 
 	if (use_pelt())
 		return;
@@ -584,8 +601,11 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
 		*util = *max;
 
-	if (sg_policy->tunables->pl)
-		*util = max(*util, sg_cpu->walt_load.pl);
+	if (sg_policy->tunables->pl) {
+		if (conservative_pl())
+			pl = mult_frac(pl, TARGET_LOAD, 100);
+		*util = max(*util, pl);
+	}
 }
 
 /*
@@ -595,7 +615,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-		sg_policy->need_freq_update = true;
+		sg_policy->limits_changed = true;
 }
 
 static inline unsigned long target_util(struct sugov_policy *sg_policy,
@@ -628,7 +648,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 	if (!sugov_should_update_freq(sg_policy, time))
 		return;
 
-	busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
+	/* Limits may have changed, don't skip frequency update */
+	busy = use_pelt() && !sg_policy->need_freq_update &&
+		sugov_cpu_is_busy(sg_cpu);
 
 	sg_cpu->util = util = sugov_get_util(sg_cpu);
 	max = sg_cpu->max;
@@ -1286,6 +1308,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 	sg_policy->last_freq_update_time	= 0;
 	sg_policy->next_freq			= 0;
 	sg_policy->work_in_progress		= false;
+	sg_policy->limits_changed		= false;
 	sg_policy->need_freq_update		= false;
 	sg_policy->cached_raw_freq		= 0;
 
@@ -1356,7 +1379,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	}
 
-	sg_policy->need_freq_update = true;
+	sg_policy->limits_changed = true;
 }
 
 static struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b89fa3a..7c7b6fc 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -530,6 +530,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 {
 	struct rq *later_rq = NULL;
+	struct dl_bw *dl_b;
 
 	later_rq = find_lock_later_rq(p, rq);
 	if (!later_rq) {
@@ -558,6 +559,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
 		double_lock_balance(rq, later_rq);
 	}
 
+	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
+		/*
+		 * Inactive timer is armed (or callback is running, but
+		 * waiting for us to release rq locks). In any case, when it
+		 * will fire (or continue), it will see running_bw of this
+		 * task migrated to later_rq (and correctly handle it).
+		 */
+		sub_running_bw(&p->dl, &rq->dl);
+		sub_rq_bw(&p->dl, &rq->dl);
+
+		add_rq_bw(&p->dl, &later_rq->dl);
+		add_running_bw(&p->dl, &later_rq->dl);
+	} else {
+		sub_rq_bw(&p->dl, &rq->dl);
+		add_rq_bw(&p->dl, &later_rq->dl);
+	}
+
+	/*
+	 * And we finally need to fixup root_domain(s) bandwidth accounting,
+	 * since p is still hanging out in the old (now moved to default) root
+	 * domain.
+	 */
+	dl_b = &rq->rd->dl_bw;
+	raw_spin_lock(&dl_b->lock);
+	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+	raw_spin_unlock(&dl_b->lock);
+
+	dl_b = &later_rq->rd->dl_bw;
+	raw_spin_lock(&dl_b->lock);
+	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
+	raw_spin_unlock(&dl_b->lock);
+
 	set_task_cpu(p, later_rq->cpu);
 	double_unlock_balance(later_rq, rq);
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 75f36e5..d62b9f5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -134,11 +134,6 @@ unsigned int normalized_sysctl_sched_wakeup_granularity	= 1000000UL;
 const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
 DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
 
-#ifdef CONFIG_SCHED_WALT
-__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
-						(10 * NSEC_PER_MSEC);
-#endif
-
 #ifdef CONFIG_SMP
 /*
  * For asym packing, by default the lower numbered CPU has higher priority.
@@ -3876,7 +3871,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
 }
 
 static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu, int start_cpu)
+bias_to_this_cpu(struct task_struct *p, int cpu, int start_cpu)
 {
 	bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) &&
 			cpu_active(cpu);
@@ -3892,8 +3887,12 @@ static inline bool task_fits_capacity(struct task_struct *p,
 {
 	unsigned int margin;
 
+	/*
+	 * Derive upmigration/downmigrate margin wrt the src/dest
+	 * CPU.
+	 */
 	if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
-		margin = sched_capacity_margin_down[task_cpu(p)];
+		margin = sched_capacity_margin_down[cpu];
 	else
 		margin = sched_capacity_margin_up[task_cpu(p)];
 
@@ -3912,10 +3911,11 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
 	if (is_min_capacity_cpu(cpu)) {
 		if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
 			task_boost > 0 ||
-			schedtune_task_boost(p) > 0)
+			schedtune_task_boost(p) > 0 ||
+			walt_should_kick_upmigrate(p, cpu))
 			return false;
 	} else { /* mid cap cpu */
-		if (task_boost > 1)
+		if (task_boost > TASK_BOOST_ON_MID)
 			return false;
 	}
 
@@ -3940,6 +3940,8 @@ struct find_best_target_env {
 	bool boosted;
 	int fastpath;
 	int start_cpu;
+	bool strict_max;
+	int skip_cpu;
 };
 
 static inline void adjust_cpus_for_packing(struct task_struct *p,
@@ -4640,6 +4642,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
 	if (likely(cfs_rq->runtime_remaining > 0))
 		return;
 
+	if (cfs_rq->throttled)
+		return;
 	/*
 	 * if we're unable to extend our runtime we resched so that the active
 	 * hierarchy can be throttled
@@ -4842,6 +4846,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
 		if (!cfs_rq_throttled(cfs_rq))
 			goto next;
 
+		/* By the above check, this should never be true */
+		SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
+
 		runtime = -cfs_rq->runtime_remaining + 1;
 		if (runtime > remaining)
 			runtime = remaining;
@@ -6803,6 +6810,12 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
 	return sched_boost() != CONSERVATIVE_BOOST &&
 		get_rtg_status(p) && p->unfilter;
 }
+
+static inline bool is_many_wakeup(int sibling_count_hint)
+{
+	return sibling_count_hint >= sysctl_sched_many_wakeup_threshold;
+}
+
 #else
 static inline bool get_rtg_status(struct task_struct *p)
 {
@@ -6813,6 +6826,11 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
 {
 	return false;
 }
+
+static inline bool is_many_wakeup(int sibling_count_hint)
+{
+	return false;
+}
 #endif
 
 static int get_start_cpu(struct task_struct *p)
@@ -6821,7 +6839,8 @@ static int get_start_cpu(struct task_struct *p)
 	int start_cpu = rd->min_cap_orig_cpu;
 	int task_boost = per_task_boost(p);
 	bool boosted = schedtune_task_boost(p) > 0 ||
-			task_boost_policy(p) == SCHED_BOOST_ON_BIG;
+			task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
+			task_boost == TASK_BOOST_ON_MID;
 	bool task_skip_min = task_skip_min_cpu(p);
 
 	/*
@@ -6829,12 +6848,12 @@ static int get_start_cpu(struct task_struct *p)
 	 * or just mid will be -1, there never be any other combinations of -1s
 	 * beyond these
 	 */
-	if (task_skip_min || boosted || task_boost == 1) {
+	if (task_skip_min || boosted) {
 		start_cpu = rd->mid_cap_orig_cpu == -1 ?
 			rd->max_cap_orig_cpu : rd->mid_cap_orig_cpu;
 	}
 
-	if (task_boost == 2) {
+	if (task_boost > TASK_BOOST_ON_MID) {
 		start_cpu = rd->max_cap_orig_cpu;
 		return start_cpu;
 	}
@@ -6859,6 +6878,7 @@ enum fastpaths {
 	NONE = 0,
 	SYNC_WAKEUP,
 	PREV_CPU_FASTPATH,
+	MANY_WAKEUP,
 };
 
 static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
@@ -6901,6 +6921,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 	if (prefer_idle && boosted)
 		target_capacity = 0;
 
+	if (fbt_env->strict_max)
+		most_spare_wake_cap = LONG_MIN;
+
 	/* Find start CPU based on boost value */
 	start_cpu = fbt_env->start_cpu;
 	/* Find SD for the start CPU */
@@ -6950,6 +6973,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 			if (sched_cpu_high_irqload(i))
 				continue;
 
+			if (fbt_env->skip_cpu == i)
+				continue;
+
 			/*
 			 * p's blocked utilization is still accounted for on prev_cpu
 			 * so prev_cpu will receive a negative bias due to the double
@@ -6964,6 +6990,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 				most_spare_cap_cpu = i;
 			}
 
+			if (per_task_boost(cpu_rq(i)->curr) ==
+					TASK_BOOST_STRICT_MAX)
+				continue;
 			/*
 			 * Cumulative demand may already be accounting for the
 			 * task. If so, add just the boost-utilization to
@@ -7216,7 +7245,8 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 		 * accommodated in the higher capacity CPUs.
 		 */
 		if ((prefer_idle && best_idle_cpu != -1) ||
-		    (boosted && (best_idle_cpu != -1 || target_cpu != -1))) {
+		    (boosted && (best_idle_cpu != -1 || target_cpu != -1 ||
+		     (fbt_env->strict_max && most_spare_cap_cpu != -1)))) {
 			if (boosted) {
 				if (!next_group_higher_cap)
 					break;
@@ -7584,7 +7614,8 @@ static DEFINE_PER_CPU(cpumask_t, energy_cpus);
  * let's keep things simple by re-using the existing slow path.
  */
 
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
+				     int sync, int sibling_count_hint)
 {
 	unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX;
 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -7599,8 +7630,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 	int placement_boost = task_boost_policy(p);
 	u64 start_t = 0;
 	int delta = 0;
-	int boosted = (schedtune_task_boost(p) > 0) ||
-				(per_task_boost(p) > 0);
+	int task_boost = per_task_boost(p);
+	int boosted = (schedtune_task_boost(p) > 0) || (task_boost > 0);
 	int start_cpu = get_start_cpu(p);
 
 	if (start_cpu < 0)
@@ -7621,10 +7652,17 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 		sync = 0;
 
 	if (sysctl_sched_sync_hint_enable && sync &&
-				bias_to_waker_cpu(p, cpu, start_cpu)) {
+				bias_to_this_cpu(p, cpu, start_cpu)) {
 		best_energy_cpu = cpu;
 		fbt_env.fastpath = SYNC_WAKEUP;
-		goto sync_wakeup;
+		goto done;
+	}
+
+	if (is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
+				bias_to_this_cpu(p, prev_cpu, start_cpu)) {
+		best_energy_cpu = prev_cpu;
+		fbt_env.fastpath = MANY_WAKEUP;
+		goto done;
 	}
 
 	rcu_read_lock();
@@ -7652,6 +7690,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 		fbt_env.need_idle = need_idle;
 		fbt_env.start_cpu = start_cpu;
 		fbt_env.boosted = boosted;
+		fbt_env.strict_max = is_rtg &&
+			(task_boost == TASK_BOOST_STRICT_MAX);
+		fbt_env.skip_cpu = is_many_wakeup(sibling_count_hint) ?
+				   cpu : -1;
 
 		find_best_target(NULL, candidates, p, &fbt_env);
 	} else {
@@ -7716,7 +7758,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 	    ((prev_energy - best_energy) <= prev_energy >> 4))
 		best_energy_cpu = prev_cpu;
 
-sync_wakeup:
+done:
 
 	trace_sched_task_util(p, cpumask_bits(candidates)[0], best_energy_cpu,
 			sync, need_idle, fbt_env.fastpath, placement_boost,
@@ -7754,7 +7796,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
 
 	if (static_branch_unlikely(&sched_energy_present)) {
 		rcu_read_lock();
-		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
+		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
+						    sibling_count_hint);
 		if (unlikely(new_cpu < 0))
 			new_cpu = prev_cpu;
 		rcu_read_unlock();
@@ -7768,7 +7811,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
 			if (schedtune_prefer_idle(p) && !sched_feat(EAS_PREFER_IDLE) && !sync)
 				goto sd_loop;
 
-			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
+			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
+							    sibling_count_hint);
 			if (new_cpu >= 0)
 				return new_cpu;
 			new_cpu = prev_cpu;
@@ -8523,6 +8567,16 @@ static inline int migrate_degrades_locality(struct task_struct *p,
 }
 #endif
 
+static inline bool can_migrate_boosted_task(struct task_struct *p,
+			int src_cpu, int dst_cpu)
+{
+	if (per_task_boost(p) == TASK_BOOST_STRICT_MAX &&
+		task_in_related_thread_group(p) &&
+		(capacity_orig_of(dst_cpu) < capacity_orig_of(src_cpu)))
+		return false;
+	return true;
+}
+
 /*
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
@@ -8543,6 +8597,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
 		return 0;
 
+	/*
+	 * don't allow pull boost task to smaller cores.
+	 */
+	if (!can_migrate_boosted_task(p, env->src_cpu, env->dst_cpu))
+		return 0;
+
 	if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
 		int cpu;
 
@@ -9148,10 +9208,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
 	unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
 	struct sched_group *sdg = sd->groups;
-	struct max_cpu_capacity *mcc;
-	unsigned long max_capacity;
-	int max_cap_cpu;
-	unsigned long flags;
 
 	capacity *= arch_scale_max_freq_capacity(sd, cpu);
 	capacity >>= SCHED_CAPACITY_SHIFT;
@@ -9159,26 +9215,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 	capacity = min(capacity, thermal_cap(cpu));
 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
-	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
-
-	raw_spin_lock_irqsave(&mcc->lock, flags);
-	max_capacity = mcc->val;
-	max_cap_cpu = mcc->cpu;
-
-	if ((max_capacity > capacity && max_cap_cpu == cpu) ||
-	    (max_capacity < capacity)) {
-		mcc->val = capacity;
-		mcc->cpu = cpu;
-#ifdef CONFIG_SCHED_DEBUG
-		raw_spin_unlock_irqrestore(&mcc->lock, flags);
-		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
-				cpu, capacity);
-		goto skip_unlock;
-#endif
-	}
-	raw_spin_unlock_irqrestore(&mcc->lock, flags);
-
-skip_unlock: __attribute__ ((unused));
 	capacity = scale_rt_capacity(cpu, capacity);
 
 	if (!capacity)
@@ -9396,7 +9432,8 @@ group_similar_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
 	long diff = sg->sgc->min_capacity - ref->sgc->min_capacity;
 	long max = max(sg->sgc->min_capacity, ref->sgc->min_capacity);
 
-	return abs(diff) < max >> 3;
+	return ((abs(diff) < max >> 3) ||
+		asym_cap_siblings(group_first_cpu(sg), group_first_cpu(ref)));
 }
 
 static inline enum
@@ -9709,6 +9746,19 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 			sgs->group_type = group_classify(sg, sgs);
 		}
 
+		/*
+		 * Disallow moving tasks from asym cap sibling CPUs to other
+		 * CPUs (lower capacity) unless the asym cap sibling group has
+		 * no capacity to manage the current load.
+		 */
+		if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
+			sgs->group_no_capacity &&
+			asym_cap_sibling_group_has_capacity(env->dst_cpu,
+						env->sd->imbalance_pct)) {
+			sgs->group_no_capacity = 0;
+			sgs->group_type = group_classify(sg, sgs);
+		}
+
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
@@ -10631,7 +10681,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 			 * if the curr task on busiest CPU can't be
 			 * moved to this_cpu:
 			 */
-			if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
+			if (!cpumask_test_cpu(this_cpu,
+						&busiest->curr->cpus_allowed)
+				|| !can_migrate_boosted_task(busiest->curr,
+						cpu_of(busiest), this_cpu)) {
 				raw_spin_unlock_irqrestore(&busiest->lock,
 							    flags);
 				env.flags |= LBF_ALL_PINNED;
@@ -10685,9 +10738,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 out_balanced:
 	/*
 	 * We reach balance although we may have faced some affinity
-	 * constraints. Clear the imbalance flag if it was set.
+	 * constraints. Clear the imbalance flag only if other tasks got
+	 * a chance to move and fix the imbalance.
 	 */
-	if (sd_parent) {
+	if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 
 		if (*group_imbalance)
@@ -12039,18 +12093,18 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 void online_fair_sched_group(struct task_group *tg)
 {
 	struct sched_entity *se;
+	struct rq_flags rf;
 	struct rq *rq;
 	int i;
 
 	for_each_possible_cpu(i) {
 		rq = cpu_rq(i);
 		se = tg->se[i];
-
-		raw_spin_lock_irq(&rq->lock);
+		rq_lock_irq(rq, &rf);
 		update_rq_clock(rq);
 		attach_entity_cfs_rq(se);
 		sync_throttle(tg, i);
-		raw_spin_unlock_irq(&rq->lock);
+		rq_unlock_irq(rq, &rf);
 	}
 }
 
@@ -12611,7 +12665,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 
 		raw_spin_lock(&migration_lock);
 		rcu_read_lock();
-		new_cpu = find_energy_efficient_cpu(p, prev_cpu, 0);
+		new_cpu = find_energy_efficient_cpu(p, prev_cpu, 0, 1);
 		rcu_read_unlock();
 		if ((new_cpu != -1) && (new_cpu != prev_cpu) &&
 		    (capacity_orig_of(new_cpu) > capacity_orig_of(prev_cpu))) {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c349976..aa67df6 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -242,13 +242,14 @@ static void do_idle(void)
 		check_pgt_cache();
 		rmb();
 
+		local_irq_disable();
+
 		if (cpu_is_offline(cpu)) {
-			tick_nohz_idle_stop_tick_protected();
+			tick_nohz_idle_stop_tick();
 			cpuhp_report_idle_dead();
 			arch_cpu_idle_dead();
 		}
 
-		local_irq_disable();
 		arch_cpu_idle_enter();
 
 		/*
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 76e0eaf..dd27e632 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -235,7 +235,7 @@ static int membarrier_register_private_expedited(int flags)
 	 * groups, which use the same mm. (CLONE_VM but not
 	 * CLONE_THREAD).
 	 */
-	if (atomic_read(&mm->membarrier_state) & state)
+	if ((atomic_read(&mm->membarrier_state) & state) == state)
 		return 0;
 	atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
 	if (flags & MEMBARRIER_FLAG_SYNC_CORE)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 63c94a6..4170afd 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -140,6 +140,7 @@
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/psi.h>
+#include <linux/oom.h>
 #include "sched.h"
 
 #define CREATE_TRACE_POINTS
@@ -574,8 +575,12 @@ static u64 update_triggers(struct psi_group *group, u64 now)
 		trace_psi_event(t->state, t->threshold);
 
 		/* Generate an event */
-		if (cmpxchg(&t->event, 0, 1) == 0)
+		if (cmpxchg(&t->event, 0, 1) == 0) {
+			if (!strcmp(t->comm, ULMK_MAGIC))
+				mod_timer(&t->wdog_timer, jiffies +
+					  nsecs_to_jiffies(2 * t->win.size));
 			wake_up_interruptible(&t->event_wait);
+		}
 		t->last_event_time = now;
 	}
 
@@ -587,10 +592,14 @@ static u64 update_triggers(struct psi_group *group, u64 now)
 	return now + group->poll_min_period;
 }
 
+/*
+ * Allows sending more than one event per window.
+ */
 void psi_emergency_trigger(void)
 {
 	struct psi_group *group = &psi_system;
 	struct psi_trigger *t;
+	u64 now;
 
 	if (static_branch_likely(&psi_disabled))
 		return;
@@ -602,17 +611,55 @@ void psi_emergency_trigger(void)
 	if (!mutex_trylock(&group->trigger_lock))
 		return;
 
+	now = sched_clock();
 	list_for_each_entry(t, &group->triggers, node) {
+		if (strcmp(t->comm, ULMK_MAGIC))
+			continue;
 		trace_psi_event(t->state, t->threshold);
 
 		/* Generate an event */
-		if (cmpxchg(&t->event, 0, 1) == 0)
+		if (cmpxchg(&t->event, 0, 1) == 0) {
+			mod_timer(&t->wdog_timer, (unsigned long)t->win.size);
 			wake_up_interruptible(&t->event_wait);
+		}
+		t->last_event_time = now;
 	}
 	mutex_unlock(&group->trigger_lock);
 }
 
 /*
+ * Return true if any trigger is active.
+ */
+bool psi_is_trigger_active(void)
+{
+	struct psi_group *group = &psi_system;
+	struct psi_trigger *t;
+	bool trigger_active = false;
+	u64 now;
+
+	if (static_branch_likely(&psi_disabled))
+		return false;
+
+	/*
+	 * In unlikely case that OOM was triggered while adding/
+	 * removing triggers.
+	 */
+	if (!mutex_trylock(&group->trigger_lock))
+		return true;
+
+	now = sched_clock();
+	list_for_each_entry(t, &group->triggers, node) {
+		if (strcmp(t->comm, ULMK_MAGIC))
+			continue;
+
+		if (now <= t->last_event_time + t->win.size)
+			trigger_active = true;
+	}
+	mutex_unlock(&group->trigger_lock);
+	return trigger_active;
+}
+
+/*
  * Schedule polling if it's not already scheduled. It's safe to call even from
  * hotpath because even though kthread_queue_delayed_work takes worker->lock
  * spinlock that spinlock is never contended due to poll_scheduled atomic
@@ -1112,6 +1159,8 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
 	t->last_event_time = 0;
 	init_waitqueue_head(&t->event_wait);
 	kref_init(&t->refcount);
+	get_task_comm(t->comm, current);
+	timer_setup(&t->wdog_timer, ulmk_watchdog_fn, TIMER_DEFERRABLE);
 
 	mutex_lock(&group->trigger_lock);
 
@@ -1184,6 +1233,7 @@ static void psi_trigger_destroy(struct kref *ref)
 		}
 	}
 
+	del_timer_sync(&t->wdog_timer);
 	mutex_unlock(&group->trigger_lock);
 
 	/*
@@ -1237,8 +1287,11 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
 
 	poll_wait(file, &t->event_wait, wait);
 
-	if (cmpxchg(&t->event, 1, 0) == 1)
+	if (cmpxchg(&t->event, 1, 0) == 1) {
 		ret |= EPOLLPRI;
+		if (!strcmp(t->comm, ULMK_MAGIC))
+			ulmk_watchdog_pet(&t->wdog_timer);
+	}
 
 	kref_put(&t->refcount, psi_trigger_destroy);
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b7af759..47f9add 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -991,7 +991,6 @@ struct rq {
 	struct walt_sched_stats walt_stats;
 
 	u64			window_start;
-	s64			cum_window_start;
 	unsigned long		walt_flags;
 
 	u64			cur_irqload;
@@ -2659,6 +2658,7 @@ struct related_thread_group {
 	struct rcu_head rcu;
 	u64 last_update;
 	u64 downmigrate_ts;
+	u64 start_ts;
 };
 
 extern struct sched_cluster *sched_cluster[NR_CPUS];
@@ -2680,7 +2680,7 @@ extern unsigned int  __read_mostly sched_load_granule;
 
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
 extern int update_preferred_cluster(struct related_thread_group *grp,
-			struct task_struct *p, u32 old_load);
+			struct task_struct *p, u32 old_load, bool from_tick);
 extern void set_preferred_cluster(struct related_thread_group *grp);
 extern void add_new_task_to_grp(struct task_struct *new);
 
@@ -2699,6 +2699,35 @@ static inline int asym_cap_siblings(int cpu1, int cpu2)
 		cpumask_test_cpu(cpu2, &asym_cap_sibling_cpus));
 }
 
+static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
+{
+	int sib1, sib2;
+	int nr_running;
+	unsigned long total_util, total_capacity;
+
+	if (cpumask_empty(&asym_cap_sibling_cpus) ||
+			cpumask_test_cpu(dst_cpu, &asym_cap_sibling_cpus))
+		return false;
+
+	sib1 = cpumask_first(&asym_cap_sibling_cpus);
+	sib2 = cpumask_last(&asym_cap_sibling_cpus);
+
+	if (!cpu_active(sib1) || cpu_isolated(sib1) ||
+		!cpu_active(sib2) || cpu_isolated(sib2))
+		return false;
+
+	nr_running = cpu_rq(sib1)->cfs.h_nr_running +
+			cpu_rq(sib2)->cfs.h_nr_running;
+
+	if (nr_running <= 2)
+		return true;
+
+	total_capacity = capacity_of(sib1) + capacity_of(sib2);
+	total_util = cpu_util(sib1) + cpu_util(sib2);
+
+	return ((total_capacity * 100) > (total_util * margin));
+}
+
 static inline int cpu_max_possible_capacity(int cpu)
 {
 	return cpu_rq(cpu)->cluster->max_possible_capacity;
@@ -2914,7 +2943,11 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
 
 static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
 {
-	return is_min_capacity_cpu(cluster_first_cpu(cluster));
+	int cpu = cluster_first_cpu(cluster);
+
+	if (cpu >= num_possible_cpus())
+		return false;
+	return is_min_capacity_cpu(cpu);
 }
 
 #else	/* CONFIG_SCHED_WALT */
@@ -2978,6 +3011,11 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
 
 static inline int asym_cap_siblings(int cpu1, int cpu2) { return 0; }
 
+static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
+{
+	return false;
+}
+
 static inline void set_preferred_cluster(struct related_thread_group *grp) { }
 
 static inline bool task_in_related_thread_group(struct task_struct *p)
@@ -2995,7 +3033,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; }
 static inline u32 task_pl(struct task_struct *p) { return 0; }
 
 static inline int update_preferred_cluster(struct related_thread_group *grp,
-			 struct task_struct *p, u32 old_load)
+			 struct task_struct *p, u32 old_load, bool from_tick)
 {
 	return 0;
 }
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 39ea275..1e7bb64 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -25,10 +25,16 @@ static DEFINE_PER_CPU(u64, nr_max);
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
-unsigned int sysctl_sched_busy_hysteresis_enable_cpus;
-static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
+unsigned int sysctl_sched_busy_hyst_enable_cpus;
+unsigned int sysctl_sched_busy_hyst;
+unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
+unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
+unsigned int sysctl_sched_coloc_busy_hyst_max_ms = 5000;
+static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
+static DEFINE_PER_CPU(u64, hyst_time);
 
 #define NR_THRESHOLD_PCT		15
+#define MAX_RTGB_TIME (sysctl_sched_coloc_busy_hyst_max_ms * NSEC_PER_MSEC)
 
 /**
  * sched_get_nr_running_avg
@@ -45,6 +51,7 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
 	u64 curr_time = sched_clock();
 	u64 period = curr_time - last_get_time;
 	u64 tmp_nr, tmp_misfit;
+	bool any_hyst_time = false;
 
 	if (!period)
 		return;
@@ -91,19 +98,48 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats)
 		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
 	}
 
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(hyst_time, cpu)) {
+			any_hyst_time = true;
+			break;
+		}
+	}
+	if (any_hyst_time && get_rtgb_active_time() >= MAX_RTGB_TIME)
+		sched_update_hyst_times();
+
 	last_get_time = curr_time;
 
 }
 EXPORT_SYMBOL(sched_get_nr_running_avg);
 
+void sched_update_hyst_times(void)
+{
+	u64 std_time, rtgb_time;
+	bool rtgb_active;
+	int cpu;
+
+	rtgb_active = is_rtgb_active() && (sched_boost() != CONSERVATIVE_BOOST)
+			&& (get_rtgb_active_time() < MAX_RTGB_TIME);
+
+	for_each_possible_cpu(cpu) {
+		std_time = (BIT(cpu)
+			     & sysctl_sched_busy_hyst_enable_cpus) ?
+			     sysctl_sched_busy_hyst : 0;
+		rtgb_time = ((BIT(cpu)
+			     & sysctl_sched_coloc_busy_hyst_enable_cpus)
+			     && rtgb_active) ? sysctl_sched_coloc_busy_hyst : 0;
+		per_cpu(hyst_time, cpu) = max(std_time, rtgb_time);
+	}
+}
+
 #define BUSY_NR_RUN		3
 #define BUSY_LOAD_FACTOR	10
-static inline void update_last_busy_time(int cpu, bool dequeue,
+static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
 				unsigned long prev_nr_run, u64 curr_time)
 {
 	bool nr_run_trigger = false, load_trigger = false;
 
-	if (!(BIT(cpu) & sysctl_sched_busy_hysteresis_enable_cpus))
+	if (!per_cpu(hyst_time, cpu))
 		return;
 
 	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
@@ -114,7 +150,8 @@ static inline void update_last_busy_time(int cpu, bool dequeue,
 		load_trigger = true;
 
 	if (nr_run_trigger || load_trigger)
-		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
+		atomic64_set(&per_cpu(busy_hyst_end_time, cpu),
+				curr_time + per_cpu(hyst_time, cpu));
 }
 
 /**
@@ -145,7 +182,7 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
 	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
 		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
 
-	update_last_busy_time(cpu, !inc, nr_running, curr_time);
+	update_busy_hyst_end_time(cpu, !inc, nr_running, curr_time);
 
 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
 	per_cpu(nr_big_prod_sum, cpu) += walt_big_tasks(cpu) * diff;
@@ -169,10 +206,9 @@ unsigned int sched_get_cpu_util(int cpu)
 	util = rq->cfs.avg.util_avg;
 	capacity = capacity_orig_of(cpu);
 
-#ifdef CONFIG_SCHED_WALT
 	util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
 	util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
-#endif
+
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
 	util = (util >= capacity) ? capacity : util;
@@ -180,7 +216,13 @@ unsigned int sched_get_cpu_util(int cpu)
 	return busy;
 }
 
-u64 sched_get_cpu_last_busy_time(int cpu)
+u64 sched_lpm_disallowed_time(int cpu)
 {
-	return atomic64_read(&per_cpu(last_busy_time, cpu));
+	u64 now = sched_clock();
+	u64 bias_end_time = atomic64_read(&per_cpu(busy_hyst_end_time, cpu));
+
+	if (now < bias_end_time)
+		return bias_end_time - now;
+
+	return 0;
 }
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index df60ad6..ffdaf2f 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1976,12 +1976,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 
 		sd = *per_cpu_ptr(d.sd, i);
 
-		if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
-				cpu_rq(max_cpu)->cpu_capacity_orig))
+		if ((max_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) >
+				arch_scale_cpu_capacity(NULL, max_cpu)))
 			WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
 
-		if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
-				cpu_rq(min_cpu)->cpu_capacity_orig))
+		if ((min_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) <
+				arch_scale_cpu_capacity(NULL, min_cpu)))
 			WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
 
 		cpu_attach_domain(sd, d.rd, i);
@@ -1992,14 +1992,26 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 		int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
 		int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
 
-		if ((cpu_rq(i)->cpu_capacity_orig
-				!=  cpu_rq(min_cpu)->cpu_capacity_orig) &&
-				(cpu_rq(i)->cpu_capacity_orig
-				!=  cpu_rq(max_cpu)->cpu_capacity_orig)) {
+		if ((arch_scale_cpu_capacity(NULL, i)
+				!=  arch_scale_cpu_capacity(NULL, min_cpu)) &&
+				(arch_scale_cpu_capacity(NULL, i)
+				!=  arch_scale_cpu_capacity(NULL, max_cpu))) {
 			WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
 			break;
 		}
 	}
+
+	/*
+	 * The max_cpu_capacity reflect the original capacity which does not
+	 * change dynamically. So update the max cap CPU and its capacity
+	 * here.
+	 */
+	if (d.rd->max_cap_orig_cpu != -1) {
+		d.rd->max_cpu_capacity.cpu = d.rd->max_cap_orig_cpu;
+		d.rd->max_cpu_capacity.val = arch_scale_cpu_capacity(NULL,
+						d.rd->max_cap_orig_cpu);
+	}
+
 	rcu_read_unlock();
 
 	if (has_asym)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 2bedd91..ea9166f 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -118,7 +118,7 @@ __read_mostly unsigned int sched_ravg_hist_size = 5;
 
 static __read_mostly unsigned int sched_io_is_busy = 1;
 
-__read_mostly unsigned int sched_window_stats_policy =
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
 	WINDOW_STATS_MAX_RECENT_AVG;
 
 /* Window size (in ns) */
@@ -475,6 +475,19 @@ static u32  top_task_load(struct rq *rq)
 	}
 }
 
+unsigned int sysctl_sched_user_hint;
+static unsigned long sched_user_hint_reset_time;
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster);
+
+static inline bool should_apply_suh_freq_boost(struct sched_cluster *cluster)
+{
+	if (sched_freq_aggr_en || !sysctl_sched_user_hint ||
+				  !cluster->aggr_grp_load)
+		return false;
+
+	return is_cluster_hosting_top_app(cluster);
+}
+
 static inline u64 freq_policy_load(struct rq *rq)
 {
 	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
@@ -510,9 +523,18 @@ static inline u64 freq_policy_load(struct rq *rq)
 		break;
 	}
 
+	if (should_apply_suh_freq_boost(cluster)) {
+		if (is_suh_max())
+			load = sched_ravg_window;
+		else
+			load = div64_u64(load * sysctl_sched_user_hint,
+					 (u64)100);
+	}
+
 done:
 	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
-				load, reporting_policy, walt_rotation_enabled);
+				load, reporting_policy, walt_rotation_enabled,
+				sysctl_sched_user_hint);
 	return load;
 }
 
@@ -543,7 +565,6 @@ __cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
 		rq->old_estimated_time = pl;
 
 		nl = div64_u64(nl * (100 + boost), walt_cpu_util_freq_divisor);
-		pl = div64_u64(pl * (100 + boost), 100);
 
 		walt_load->prev_window_util = util;
 		walt_load->nl = nl;
@@ -967,6 +988,9 @@ void set_window_start(struct rq *rq)
 unsigned int max_possible_efficiency = 1;
 unsigned int min_possible_efficiency = UINT_MAX;
 
+unsigned int sysctl_sched_conservative_pl;
+unsigned int sysctl_sched_many_wakeup_threshold = 1000;
+
 #define INC_STEP 8
 #define DEC_STEP 2
 #define CONSISTENT_THRES 16
@@ -1024,7 +1048,6 @@ static inline int busy_to_bucket(u32 normalized_rt)
 /*
  * get_pred_busy - calculate predicted demand for a task on runqueue
  *
- * @rq: runqueue of task p
  * @p: task whose prediction is being updated
  * @start: starting bucket. returned prediction should not be lower than
  *         this bucket.
@@ -1040,7 +1063,7 @@ static inline int busy_to_bucket(u32 normalized_rt)
  * time and returns the latest that falls into the bucket. If no such busy
  * time exists, it returns the medium of that bucket.
  */
-static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+static u32 get_pred_busy(struct task_struct *p,
 				int start, u32 runtime)
 {
 	int i;
@@ -1098,18 +1121,18 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
 	 */
 	ret = max(runtime, ret);
 out:
-	trace_sched_update_pred_demand(rq, p, runtime,
+	trace_sched_update_pred_demand(p, runtime,
 		mult_frac((unsigned int)cur_freq_runtime, 100,
 			  sched_ravg_window), ret);
 	return ret;
 }
 
-static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+static inline u32 calc_pred_demand(struct task_struct *p)
 {
 	if (p->ravg.pred_demand >= p->ravg.curr_window)
 		return p->ravg.pred_demand;
 
-	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+	return get_pred_busy(p, busy_to_bucket(p->ravg.curr_window),
 			     p->ravg.curr_window);
 }
 
@@ -1144,7 +1167,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
 			return;
 	}
 
-	new = calc_pred_demand(rq, p);
+	new = calc_pred_demand(p);
 	old = p->ravg.pred_demand;
 
 	if (old >= new)
@@ -1670,7 +1693,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 }
 
 
-static inline u32 predict_and_update_buckets(struct rq *rq,
+static inline u32 predict_and_update_buckets(
 			struct task_struct *p, u32 runtime) {
 
 	int bidx;
@@ -1680,7 +1703,7 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
 		return 0;
 
 	bidx = busy_to_bucket(runtime);
-	pred_demand = get_pred_busy(rq, p, bidx, runtime);
+	pred_demand = get_pred_busy(p, bidx, runtime);
 	bucket_increase(p->ravg.busy_buckets, bidx);
 
 	return pred_demand;
@@ -1767,18 +1790,18 @@ static void update_history(struct rq *rq, struct task_struct *p,
 
 	p->ravg.sum = 0;
 
-	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
+	if (sysctl_sched_window_stats_policy == WINDOW_STATS_RECENT) {
 		demand = runtime;
-	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
+	} else if (sysctl_sched_window_stats_policy == WINDOW_STATS_MAX) {
 		demand = max;
 	} else {
 		avg = div64_u64(sum, sched_ravg_hist_size);
-		if (sched_window_stats_policy == WINDOW_STATS_AVG)
+		if (sysctl_sched_window_stats_policy == WINDOW_STATS_AVG)
 			demand = avg;
 		else
 			demand = max(avg, runtime);
 	}
-	pred_demand = predict_and_update_buckets(rq, p, runtime);
+	pred_demand = predict_and_update_buckets(p, runtime);
 	demand_scaled = scale_demand(demand);
 	pred_demand_scaled = scale_demand(pred_demand);
 
@@ -2607,6 +2630,9 @@ void update_best_cluster(struct related_thread_group *grp,
 		return;
 	}
 
+	if (is_suh_max())
+		demand = sched_group_upmigrate;
+
 	if (!grp->skip_min) {
 		if (demand >= sched_group_upmigrate) {
 			grp->skip_min = true;
@@ -2653,13 +2679,16 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
 	u64 combined_demand = 0;
 	bool group_boost = false;
 	u64 wallclock;
+	bool prev_skip_min = grp->skip_min;
 
-	if (list_empty(&grp->tasks))
-		return;
+	if (list_empty(&grp->tasks)) {
+		grp->skip_min = false;
+		goto out;
+	}
 
 	if (!hmp_capable()) {
 		grp->skip_min = false;
-		return;
+		goto out;
 	}
 
 	wallclock = sched_ktime_clock();
@@ -2693,6 +2722,13 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
 	grp->last_update = wallclock;
 	update_best_cluster(grp, combined_demand, group_boost);
 	trace_sched_set_preferred_cluster(grp, combined_demand);
+out:
+	if (grp->id == DEFAULT_CGROUP_COLOC_ID
+	    && grp->skip_min != prev_skip_min) {
+		if (grp->skip_min)
+			grp->start_ts = sched_clock();
+		sched_update_hyst_times();
+	}
 }
 
 void set_preferred_cluster(struct related_thread_group *grp)
@@ -2703,13 +2739,16 @@ void set_preferred_cluster(struct related_thread_group *grp)
 }
 
 int update_preferred_cluster(struct related_thread_group *grp,
-		struct task_struct *p, u32 old_load)
+		struct task_struct *p, u32 old_load, bool from_tick)
 {
 	u32 new_load = task_load(p);
 
 	if (!grp)
 		return 0;
 
+	if (unlikely(from_tick && is_suh_max()))
+		return 1;
+
 	/*
 	 * Update if task's load has changed significantly or a complete window
 	 * has passed since we last updated preference
@@ -2724,8 +2763,6 @@ int update_preferred_cluster(struct related_thread_group *grp,
 #define ADD_TASK	0
 #define REM_TASK	1
 
-#define DEFAULT_CGROUP_COLOC_ID 1
-
 static inline struct related_thread_group*
 lookup_related_thread_group(unsigned int group_id)
 {
@@ -2963,6 +3000,22 @@ int sync_cgroup_colocation(struct task_struct *p, bool insert)
 }
 #endif
 
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster)
+{
+	struct related_thread_group *grp;
+	bool grp_on_min;
+
+	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+
+	if (!grp)
+		return false;
+
+	grp_on_min = !grp->skip_min &&
+		     (sched_boost_policy() != SCHED_BOOST_ON_BIG);
+
+	return (is_min_capacity_cluster(cluster) == grp_on_min);
+}
+
 static unsigned long max_cap[NR_CPUS];
 static unsigned long thermal_cap_cpu[NR_CPUS];
 
@@ -3146,7 +3199,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
 }
 
-static bool is_rtgb_active(void)
+bool is_rtgb_active(void)
 {
 	struct related_thread_group *grp;
 
@@ -3154,6 +3207,19 @@ static bool is_rtgb_active(void)
 	return grp && grp->skip_min;
 }
 
+u64 get_rtgb_active_time(void)
+{
+	struct related_thread_group *grp;
+	u64 now = sched_clock();
+
+	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+
+	if (grp && grp->skip_min && grp->start_ts)
+		return now - grp->start_ts;
+
+	return 0;
+}
+
 /*
  * Runs in hard-irq context. This should ideally run just after the latest
  * window roll-over.
@@ -3223,6 +3289,10 @@ void walt_irq_work(struct irq_work *irq_work)
 		rtgb_active = false;
 	}
 
+	if (!is_migration && sysctl_sched_user_hint && time_after(jiffies,
+					sched_user_hint_reset_time))
+		sysctl_sched_user_hint = 0;
+
 	for_each_sched_cluster(cluster) {
 		cpumask_t cluster_online_cpus;
 		unsigned int num_cpus, i = 1;
@@ -3395,7 +3465,6 @@ void walt_sched_init_rq(struct rq *rq)
 
 	rq->walt_stats.cumulative_runnable_avg_scaled = 0;
 	rq->window_start = 0;
-	rq->cum_window_start = 0;
 	rq->walt_stats.nr_big_tasks = 0;
 	rq->walt_flags = 0;
 	rq->cur_irqload = 0;
@@ -3435,3 +3504,26 @@ void walt_sched_init_rq(struct rq *rq)
 	rq->cum_window_demand_scaled = 0;
 	rq->notif_pending = false;
 }
+
+int walt_proc_user_hint_handler(struct ctl_table *table,
+				int write, void __user *buffer, size_t *lenp,
+				loff_t *ppos)
+{
+	int ret;
+	unsigned int old_value;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+
+	sched_user_hint_reset_time = jiffies + HZ;
+	old_value = sysctl_sched_user_hint;
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	if (ret || !write || (old_value == sysctl_sched_user_hint))
+		goto unlock;
+
+	irq_work_queue(&walt_migration_irq_work);
+
+unlock:
+	mutex_unlock(&mutex);
+	return ret;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 152a9df..7fe6726 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -40,7 +40,6 @@ extern struct mutex cluster_lock;
 extern rwlock_t related_thread_group_lock;
 extern __read_mostly unsigned int sched_ravg_hist_size;
 extern __read_mostly unsigned int sched_freq_aggregate;
-extern __read_mostly unsigned int sched_window_stats_policy;
 extern __read_mostly unsigned int sched_group_upmigrate;
 extern __read_mostly unsigned int sched_group_downmigrate;
 
@@ -306,6 +305,26 @@ static inline void walt_enable_frequency_aggregation(bool enable)
 	sched_freq_aggr_en = enable;
 }
 
+static inline bool is_suh_max(void)
+{
+	return sysctl_sched_user_hint == sched_user_hint_max;
+}
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+	struct related_thread_group *rtg = p->grp;
+
+	if (is_suh_max() && rtg && rtg->id == DEFAULT_CGROUP_COLOC_ID &&
+			    rtg->skip_min && p->unfilter)
+		return is_min_capacity_cpu(cpu);
+
+	return false;
+}
+
+extern bool is_rtgb_active(void);
+extern u64 get_rtgb_active_time(void);
+
 #else /* CONFIG_SCHED_WALT */
 
 static inline void walt_sched_init_rq(struct rq *rq) { }
@@ -386,6 +405,16 @@ static inline u64 sched_irqload(int cpu)
 {
 	return 0;
 }
+
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+	return false;
+}
+
+static inline u64 get_rtgb_active_time(void)
+{
+	return 0;
+}
 #endif /* CONFIG_SCHED_WALT */
 
 #endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 4011898..4a321c6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1369,6 +1369,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
 	rcu_read_unlock();
 
 	if (!ret && sig) {
+		check_panic_on_foreground_kill(p);
 		ret = do_send_sig_info(sig, info, p, type);
 		if (capable(CAP_KILL) && sig == SIGKILL) {
 			add_to_oom_reaper(p);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 85847b6..acdccea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -63,6 +63,7 @@
 #include <linux/binfmts.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/coredump.h>
+#include <linux/sched/stat.h>
 #include <linux/kexec.h>
 #include <linux/bpf.h>
 #include <linux/mount.h>
@@ -141,6 +142,11 @@ static int six_hundred_forty_kb = 640 * 1024;
 #endif
 static int two_hundred_fifty_five = 255;
 
+#ifdef CONFIG_SCHED_WALT
+const int sched_user_hint_max = 1000;
+static unsigned int ns_per_sec = NSEC_PER_SEC;
+static unsigned int one_hundred_thousand = 100000;
+#endif
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
 static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
 
@@ -229,6 +235,10 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
 #endif
 static int proc_dopipe_max_size(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp, loff_t *ppos);
+#ifdef CONFIG_SCHED_WALT
+static int proc_douintvec_minmax_schedhyst(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
 /* Note: sysrq code uses its own private copy */
@@ -345,6 +355,24 @@ static struct ctl_table kern_table[] = {
 #endif
 #ifdef CONFIG_SCHED_WALT
 	{
+		.procname	= "sched_user_hint",
+		.data           = &sysctl_sched_user_hint,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= walt_proc_user_hint_handler,
+		.extra1		= &zero,
+		.extra2		= (void *)&sched_user_hint_max,
+	},
+	{
+		.procname	= "sched_window_stats_policy",
+		.data		= &sysctl_sched_window_stats_policy,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &four,
+	},
+	{
 		.procname       = "sched_cpu_high_irqload",
 		.data           = &sysctl_sched_cpu_high_irqload,
 		.maxlen         = sizeof(unsigned int),
@@ -378,6 +406,24 @@ static struct ctl_table kern_table[] = {
 		.extra2		= &three,
 	},
 	{
+		.procname	= "sched_conservative_pl",
+		.data		= &sysctl_sched_conservative_pl,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "sched_many_wakeup_threshold",
+		.data		= &sysctl_sched_many_wakeup_threshold,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &two,
+		.extra2		= &one_thousand,
+	},
+	{
 		.procname	= "sched_walt_rotate_big_tasks",
 		.data		= &sysctl_sched_walt_rotate_big_tasks,
 		.maxlen		= sizeof(unsigned int),
@@ -429,6 +475,51 @@ static struct ctl_table kern_table[] = {
 		.extra1         = &one,
 		.extra2		= &two_hundred_fifty_five,
 	},
+	{
+		.procname	= "sched_busy_hysteresis_enable_cpus",
+		.data		= &sysctl_sched_busy_hyst_enable_cpus,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax_schedhyst,
+		.extra1		= &zero,
+		.extra2		= &two_hundred_fifty_five,
+	},
+	{
+		.procname	= "sched_busy_hyst_ns",
+		.data		= &sysctl_sched_busy_hyst,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax_schedhyst,
+		.extra1		= &zero,
+		.extra2		= &ns_per_sec,
+	},
+	{
+		.procname	= "sched_coloc_busy_hysteresis_enable_cpus",
+		.data		= &sysctl_sched_coloc_busy_hyst_enable_cpus,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax_schedhyst,
+		.extra1		= &zero,
+		.extra2		= &two_hundred_fifty_five,
+	},
+	{
+		.procname	= "sched_coloc_busy_hyst_ns",
+		.data		= &sysctl_sched_coloc_busy_hyst,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax_schedhyst,
+		.extra1		= &zero,
+		.extra2		= &ns_per_sec,
+	},
+	{
+		.procname	= "sched_coloc_busy_hyst_max_ms",
+		.data		= &sysctl_sched_coloc_busy_hyst_max_ms,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax_schedhyst,
+		.extra1		= &zero,
+		.extra2		= &one_hundred_thousand,
+	},
 #endif
 #ifdef CONFIG_SMP
 	{
@@ -445,15 +536,6 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= sched_updown_migrate_handler,
 	},
-	{
-		.procname	= "sched_busy_hysteresis_enable_cpus",
-		.data		= &sysctl_sched_busy_hysteresis_enable_cpus,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_douintvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &two_hundred_fifty_five,
-	},
 #endif
 #ifdef CONFIG_SCHED_DEBUG
 	{
@@ -2979,6 +3061,19 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
 }
 #endif
 
+#ifdef CONFIG_SCHED_WALT
+static int proc_douintvec_minmax_schedhyst(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
+
+	if (!ret && write)
+		sched_update_hyst_times();
+
+	return ret;
+}
+#endif
+
 static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
 				     void __user *buffer,
 				     size_t *lenp, loff_t *ppos,
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fdeb9bc..f4255a6 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -676,7 +676,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 	enum  alarmtimer_type type;
 
 	if (!alarmtimer_get_rtcdev())
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	if (!capable(CAP_WAKE_ALARM))
 		return -EPERM;
@@ -794,7 +794,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
 	int ret = 0;
 
 	if (!alarmtimer_get_rtcdev())
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	if (flags & ~TIMER_ABSTIME)
 		return -EINVAL;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 76801b9..d62d7ae 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
 	struct sighand_struct *sighand;
 	struct task_struct *p = timer->it.cpu.task;
 
-	WARN_ON_ONCE(p == NULL);
+	if (WARN_ON_ONCE(!p))
+		return -EINVAL;
 
 	/*
 	 * Protect against sighand release/switch in exit/exec and process/
@@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 	u64 old_expires, new_expires, old_incr, val;
 	int ret;
 
-	WARN_ON_ONCE(p == NULL);
+	if (WARN_ON_ONCE(!p))
+		return -EINVAL;
 
 	/*
 	 * Use the to_ktime conversion because that clamps the maximum
@@ -716,10 +718,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 
 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
 {
-	u64 now;
 	struct task_struct *p = timer->it.cpu.task;
+	u64 now;
 
-	WARN_ON_ONCE(p == NULL);
+	if (WARN_ON_ONCE(!p))
+		return;
 
 	/*
 	 * Easy part: convert the reload time.
@@ -1004,12 +1007,13 @@ static void check_process_timers(struct task_struct *tsk,
  */
 static void posix_cpu_timer_rearm(struct k_itimer *timer)
 {
+	struct task_struct *p = timer->it.cpu.task;
 	struct sighand_struct *sighand;
 	unsigned long flags;
-	struct task_struct *p = timer->it.cpu.task;
 	u64 now;
 
-	WARN_ON_ONCE(p == NULL);
+	if (WARN_ON_ONCE(!p))
+		return;
 
 	/*
 	 * Fetch the current sample and update the timer's expiry time.
@@ -1206,7 +1210,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 	u64 now;
 	int ret;
 
-	WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
+	if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
+		return;
+
 	ret = cpu_timer_sample_group(clock_idx, tsk, &now);
 
 	if (oldval && ret != -EINVAL) {
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index a59641f..a836efd3 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-	int bc_moved;
 	/*
-	 * We try to cancel the timer first. If the callback is on
-	 * flight on some other cpu then we let it handle it. If we
-	 * were able to cancel the timer nothing can rearm it as we
-	 * own broadcast_lock.
+	 * This is called either from enter/exit idle code or from the
+	 * broadcast handler. In all cases tick_broadcast_lock is held.
 	 *
-	 * However we can also be called from the event handler of
-	 * ce_broadcast_hrtimer itself when it expires. We cannot
-	 * restart the timer because we are in the callback, but we
-	 * can set the expiry time and let the callback return
-	 * HRTIMER_RESTART.
+	 * hrtimer_cancel() cannot be called here neither from the
+	 * broadcast handler nor from the enter/exit idle code. The idle
+	 * code can run into the problem described in bc_shutdown() and the
+	 * broadcast handler cannot wait for itself to complete for obvious
+	 * reasons.
 	 *
-	 * Since we are in the idle loop at this point and because
-	 * hrtimer_{start/cancel} functions call into tracing,
-	 * calls to these functions must be bound within RCU_NONIDLE.
+	 * Each caller tries to arm the hrtimer on its own CPU, but if the
+	 * hrtimer callbback function is currently running, then
+	 * hrtimer_start() cannot move it and the timer stays on the CPU on
+	 * which it is assigned at the moment.
+	 *
+	 * As this can be called from idle code, the hrtimer_start()
+	 * invocation has to be wrapped with RCU_NONIDLE() as
+	 * hrtimer_start() can call into tracing.
 	 */
-	RCU_NONIDLE({
-			bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-			if (bc_moved)
-				hrtimer_start(&bctimer, expires,
-					      HRTIMER_MODE_ABS_PINNED);});
-	if (bc_moved) {
-		/* Bind the "device" to the cpu */
-		bc->bound_on = smp_processor_id();
-	} else if (bc->bound_on == smp_processor_id()) {
-		hrtimer_set_expires(&bctimer, expires);
-	}
+	RCU_NONIDLE( {
+		hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+		/*
+		 * The core tick broadcast mode expects bc->bound_on to be set
+		 * correctly to prevent a CPU which has the broadcast hrtimer
+		 * armed from going deep idle.
+		 *
+		 * As tick_broadcast_lock is held, nothing can change the cpu
+		 * base which was just established in hrtimer_start() above. So
+		 * the below access is safe even without holding the hrtimer
+		 * base lock.
+		 */
+		bc->bound_on = bctimer.base->cpu_base->cpu;
+	} );
 	return 0;
 }
 
@@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
 	ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-	if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-		if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-			return HRTIMER_RESTART;
-
 	return HRTIMER_NORESTART;
 }
 
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 443edcd..c2708e1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -823,7 +823,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 
 	} while (read_seqcount_retry(&tk_core.seq, seq));
 
-	return base + nsecs;
+	return ktime_add_ns(base, nsecs);
 }
 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index e0d0fd71..5bbb433 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1634,24 +1634,26 @@ void timer_clear_idle(void)
 static int collect_expired_timers(struct timer_base *base,
 				  struct hlist_head *heads)
 {
+	unsigned long now = READ_ONCE(jiffies);
+
 	/*
 	 * NOHZ optimization. After a long idle sleep we need to forward the
 	 * base to current jiffies. Avoid a loop by searching the bitfield for
 	 * the next expiring timer.
 	 */
-	if ((long)(jiffies - base->clk) > 2) {
+	if ((long)(now - base->clk) > 2) {
 		unsigned long next = __next_timer_interrupt(base);
 
 		/*
 		 * If the next timer is ahead of time forward to current
 		 * jiffies, otherwise forward to the next expiry time:
 		 */
-		if (time_after(next, jiffies)) {
+		if (time_after(next, now)) {
 			/*
 			 * The call site will increment base->clk and then
 			 * terminate the expiry loop immediately.
 			 */
-			base->clk = jiffies;
+			base->clk = now;
 			return 0;
 		}
 		base->clk = next;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f322cbf..b3ce6a9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3113,6 +3113,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
 		hnd = &iter->probe_entry->hlist;
 
 	hash = iter->probe->ops.func_hash->filter_hash;
+
+	/*
+	 * A probe being registered may temporarily have an empty hash
+	 * and it's at the end of the func_probes list.
+	 */
+	if (!hash || hash == EMPTY_HASH)
+		return NULL;
+
 	size = 1 << hash->size_bits;
 
  retry:
@@ -3550,21 +3558,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 	struct ftrace_hash *hash;
 	struct list_head *mod_head;
 	struct trace_array *tr = ops->private;
-	int ret = 0;
+	int ret = -ENOMEM;
 
 	ftrace_ops_init(ops);
 
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
 
+	if (tr && trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 	if (!iter)
-		return -ENOMEM;
+		goto out;
 
-	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-		kfree(iter);
-		return -ENOMEM;
-	}
+	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+		goto out;
 
 	iter->ops = ops;
 	iter->flags = flag;
@@ -3594,13 +3603,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
 		if (!iter->hash) {
 			trace_parser_put(&iter->parser);
-			kfree(iter);
-			ret = -ENOMEM;
 			goto out_unlock;
 		}
 	} else
 		iter->hash = hash;
 
+	ret = 0;
+
 	if (file->f_mode & FMODE_READ) {
 		iter->pg = ftrace_pages_start;
 
@@ -3612,7 +3621,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 			/* Failed */
 			free_ftrace_hash(iter->hash);
 			trace_parser_put(&iter->parser);
-			kfree(iter);
 		}
 	} else
 		file->private_data = iter;
@@ -3620,6 +3628,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
 	mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+	if (ret) {
+		kfree(iter);
+		if (tr)
+			trace_array_put(tr);
+	}
+
 	return ret;
 }
 
@@ -4308,12 +4323,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
 	mutex_unlock(&ftrace_lock);
 
+	/*
+	 * Note, there's a small window here that the func_hash->filter_hash
+	 * may be NULL or empty. Need to be carefule when reading the loop.
+	 */
 	mutex_lock(&probe->ops.func_hash->regex_lock);
 
 	orig_hash = &probe->ops.func_hash->filter_hash;
 	old_hash = *orig_hash;
 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
 
+	if (!hash) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
 	ret = ftrace_match_records(hash, glob, strlen(glob));
 
 	/* Nothing found? */
@@ -5008,6 +5032,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
 	mutex_unlock(&iter->ops->func_hash->regex_lock);
 	free_ftrace_hash(iter->hash);
+	if (iter->tr)
+		trace_array_put(iter->tr);
 	kfree(iter);
 
 	return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 56d2963..3f4fd25 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2950,6 +2950,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 
 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
 	if (!call_filter_check_discard(call, entry, buffer, event)) {
+		len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
+		stm_log(OST_ENTITY_TRACE_PRINTK, tbuffer, len+1);
+
 		__buffer_unlock_commit(buffer, event);
 		ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
 	}
@@ -4157,9 +4160,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
 	if (tracing_disabled)
 		return -ENODEV;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	ret = seq_open(file, &show_traces_seq_ops);
-	if (ret)
+	if (ret) {
+		trace_array_put(tr);
 		return ret;
+	}
 
 	m = file->private_data;
 	m->private = tr;
@@ -4167,6 +4175,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
 	return 0;
 }
 
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+	struct trace_array *tr = inode->i_private;
+
+	trace_array_put(tr);
+	return seq_release(inode, file);
+}
+
 static ssize_t
 tracing_write_stub(struct file *filp, const char __user *ubuf,
 		   size_t count, loff_t *ppos)
@@ -4197,8 +4213,8 @@ static const struct file_operations tracing_fops = {
 static const struct file_operations show_traces_fops = {
 	.open		= show_traces_open,
 	.read		= seq_read,
-	.release	= seq_release,
 	.llseek		= seq_lseek,
+	.release	= show_traces_release,
 };
 
 static ssize_t
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index e6945b5..f5b3bf0 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
 		goto out;
 	}
 
+	mutex_lock(&event_mutex);
 	ret = perf_trace_event_init(tp_event, p_event);
 	if (ret)
 		destroy_local_trace_kprobe(tp_event);
+	mutex_unlock(&event_mutex);
 out:
 	kfree(func);
 	return ret;
@@ -282,8 +284,10 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
 
 void perf_kprobe_destroy(struct perf_event *p_event)
 {
+	mutex_lock(&event_mutex);
 	perf_trace_event_close(p_event);
 	perf_trace_event_unreg(p_event);
+	mutex_unlock(&event_mutex);
 
 	destroy_local_trace_kprobe(p_event->tp_event);
 }
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 71afb6e..32e814e 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
 		return NULL;
 	}
 
+	alias->var_ref_idx = var_ref->var_ref_idx;
+
 	return alias;
 }
 
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 1e6db9c..8030e24d 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
 		if (enter)
 			nmi_ts_start = time_get();
 		else
-			nmi_total_ts = time_get() - nmi_ts_start;
+			nmi_total_ts += time_get() - nmi_ts_start;
 	}
 
 	if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
 		/* Keep a running maximum ever recorded hardware latency */
 		if (sample > tr->max_latency)
 			tr->max_latency = sample;
+		if (outer_sample > tr->max_latency)
+			tr->max_latency = outer_sample;
 	}
 
 out:
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d681cf3..7ae1316 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -14,6 +14,7 @@
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/ftrace.h>
+#include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/sysctl.h>
 
@@ -636,7 +637,8 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
 	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
 	delta = sched_clock() - is->ts;
 
-	if (delta > sysctl_irqsoff_tracing_threshold_ns)
+	if (!is_idle_task(current) &&
+			delta > sysctl_irqsoff_tracing_threshold_ns)
 		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
 						is->caddr[2], is->caddr[3]);
 	lockdep_on();
diff --git a/lib/Kconfig b/lib/Kconfig
index a3928d4..7f5c74d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -590,6 +590,15 @@
 	bool
 	select STACKTRACE
 
+config STACK_HASH_ORDER_SHIFT
+	int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
+	range 12 20
+	default 20
+	depends on STACKDEPOT
+	help
+	 Select the hash size as a power of 2 for the stackdepot hash table.
+	 Choose a lower value to reduce the memory impact.
+
 config SBITMAP
 	bool
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6c164cd..488ae5a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -155,6 +155,18 @@
 	  actual pointer values, ignoring the kptr_restrict setting.
 	  Not to be enabled on production builds.
 
+config DEBUG_MODULE_LOAD_INFO
+        bool "Use prints for module info under a debug flag"
+	help
+	  If you say Y here the resulting kernel image will include
+	  debug prints which was kept under DEBUG_MODULE_LOAD_INFO.
+	  This will be used by developer to debug loadable modules in
+	  the kernel.
+	  Say Y here only if you plan to debug the kernel.
+	  Not to be enabled on production builds.
+
+	  If unsure, say N.
+
 endmenu # "printk and dmesg options"
 
 menu "Compile-time checks and compiler options"
@@ -590,7 +602,7 @@
 	int "Maximum kmemleak early log entries"
 	depends on DEBUG_KMEMLEAK
 	range 200 40000
-	default 400
+	default 16000
 	help
 	  Kmemleak must track all the memory allocations to avoid
 	  reporting false positives. Since memory may be allocated or
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48f..9050275 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 	struct logic_pio_hwaddr *range;
 	resource_size_t start;
 	resource_size_t end;
-	resource_size_t mmio_sz = 0;
+	resource_size_t mmio_end = 0;
 	resource_size_t iio_sz = MMIO_UPPER_LIMIT;
 	int ret = 0;
 
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 	end = new_range->hw_start + new_range->size;
 
 	mutex_lock(&io_range_mutex);
-	list_for_each_entry_rcu(range, &io_range_list, list) {
+	list_for_each_entry(range, &io_range_list, list) {
 		if (range->fwnode == new_range->fwnode) {
 			/* range already there */
 			goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 			/* for MMIO ranges we need to check for overlap */
 			if (start >= range->hw_start + range->size ||
 			    end < range->hw_start) {
-				mmio_sz += range->size;
+				mmio_end = range->io_start + range->size;
 			} else {
 				ret = -EFAULT;
 				goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 
 	/* range not registered yet, check for available space */
 	if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
-		if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+		if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
 			/* if it's too big check if 64K space can be reserved */
-			if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+			if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
 				ret = -E2BIG;
 				goto end_register;
 			}
 			new_range->size = SZ_64K;
 			pr_warn("Requested IO range too big, new size set to 64K\n");
 		}
-		new_range->io_start = mmio_sz;
+		new_range->io_start = mmio_end;
 	} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
 		if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
 			ret = -E2BIG;
@@ -99,6 +99,20 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 }
 
 /**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+	mutex_lock(&io_range_mutex);
+	list_del_rcu(&range->list);
+	mutex_unlock(&io_range_mutex);
+	synchronize_rcu();
+}
+
+/**
  * find_io_range_by_fwnode - find logical PIO range for given FW node
  * @fwnode: FW node handle associated with logical PIO range
  *
@@ -108,26 +122,38 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
  */
 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
 {
-	struct logic_pio_hwaddr *range;
+	struct logic_pio_hwaddr *range, *found_range = NULL;
 
+	rcu_read_lock();
 	list_for_each_entry_rcu(range, &io_range_list, list) {
-		if (range->fwnode == fwnode)
-			return range;
+		if (range->fwnode == fwnode) {
+			found_range = range;
+			break;
+		}
 	}
-	return NULL;
+	rcu_read_unlock();
+
+	return found_range;
 }
 
 /* Return a registered range given an input PIO token */
 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
 {
-	struct logic_pio_hwaddr *range;
+	struct logic_pio_hwaddr *range, *found_range = NULL;
 
+	rcu_read_lock();
 	list_for_each_entry_rcu(range, &io_range_list, list) {
-		if (in_range(pio, range->io_start, range->size))
-			return range;
+		if (in_range(pio, range->io_start, range->size)) {
+			found_range = range;
+			break;
+		}
 	}
-	pr_err("PIO entry token %lx invalid\n", pio);
-	return NULL;
+	rcu_read_unlock();
+
+	if (!found_range)
+		pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+	return found_range;
 }
 
 /**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
 {
 	struct logic_pio_hwaddr *range;
 
+	rcu_read_lock();
 	list_for_each_entry_rcu(range, &io_range_list, list) {
 		if (range->flags != LOGIC_PIO_CPU_MMIO)
 			continue;
-		if (in_range(addr, range->hw_start, range->size))
-			return addr - range->hw_start + range->io_start;
+		if (in_range(addr, range->hw_start, range->size)) {
+			unsigned long cpuaddr;
+
+			cpuaddr = addr - range->hw_start + range->io_start;
+
+			rcu_read_unlock();
+			return cpuaddr;
+		}
 	}
-	pr_err("addr %llx not registered in io_range_list\n",
-	       (unsigned long long) addr);
+	rcu_read_unlock();
+
+	pr_err("addr %pa not registered in io_range_list\n", &addr);
+
 	return ~0UL;
 }
 
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index e513459..241f963 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -146,8 +146,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
 	return stack;
 }
 
-#define STACK_HASH_ORDER 20
-#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER_SHIFT)
 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
 #define STACK_HASH_SEED 0x9747b28c
 
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 5939549..9135c29 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -93,9 +93,9 @@
  *       goto errout;
  *   }
  *
- *   pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
  *   if (pos != UINT_MAX)
- *       panic("Oh my god, dancing chickens at \%d\n", pos);
+ *       panic("Oh my god, dancing chickens at %d\n", pos);
  *
  *   textsearch_destroy(conf);
  */
diff --git a/mm/compaction.c b/mm/compaction.c
index f7eec61..3ac625b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
 	/* Ensure the start of the pageblock or zone is online and valid */
 	block_pfn = pageblock_start_pfn(pfn);
-	block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+	block_pfn = max(block_pfn, zone->zone_start_pfn);
+	block_page = pfn_to_online_page(block_pfn);
 	if (block_page) {
 		page = block_page;
 		pfn = block_pfn;
 	}
 
 	/* Ensure the end of the pageblock or zone is online and valid */
-	block_pfn += pageblock_nr_pages;
+	block_pfn = pageblock_end_pfn(pfn) - 1;
 	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
 	end_page = pfn_to_online_page(block_pfn);
 	if (!end_page)
@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
 		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
 		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
-	} while (page < end_page);
+	} while (page <= end_page);
 
 	return false;
 }
@@ -842,13 +843,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/*
 		 * Periodically drop the lock (if held) regardless of its
-		 * contention, to give chance to IRQs. Abort async compaction
-		 * if contended.
+		 * contention, to give chance to IRQs. Abort completely if
+		 * a fatal signal is pending.
 		 */
 		if (!(low_pfn % SWAP_CLUSTER_MAX)
 		    && compact_unlock_should_abort(zone_lru_lock(zone), flags,
-								&locked, cc))
-			break;
+								&locked, cc)) {
+			low_pfn = 0;
+			goto fatal_pending;
+		}
 
 		if (!pfn_valid_within(low_pfn))
 			goto isolate_fail;
@@ -1060,6 +1063,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
 						nr_scanned, nr_isolated);
 
+fatal_pending:
 	cc->total_migrate_scanned += nr_scanned;
 	if (nr_isolated)
 		count_compact_events(COMPACTISOLATED, nr_isolated);
@@ -1230,7 +1234,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
 
 	/* Pageblock boundaries */
 	start_pfn = pageblock_start_pfn(pfn);
-	end_pfn = min(start_pfn + pageblock_nr_pages, zone_end_pfn(cc->zone));
+	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
 
 	/* Scan before */
 	if (start_pfn != pfn) {
@@ -1241,7 +1245,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
 
 	/* Scan after */
 	start_pfn = pfn + nr_isolated;
-	if (start_pfn != end_pfn)
+	if (start_pfn < end_pfn)
 		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
 
 	/* Skip this pageblock in the future as it's full or nearly full */
@@ -2075,6 +2079,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
 	const bool sync = cc->mode != MIGRATE_ASYNC;
 	bool update_cached;
 
+	/*
+	 * These counters track activities during zone compaction.  Initialize
+	 * them before compacting a new zone.
+	 */
+	cc->total_migrate_scanned = 0;
+	cc->total_free_scanned = 0;
+	cc->nr_migratepages = 0;
+	cc->nr_freepages = 0;
+	INIT_LIST_HEAD(&cc->freepages);
+	INIT_LIST_HEAD(&cc->migratepages);
+
 	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
 	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
 							cc->classzone_idx);
@@ -2278,10 +2293,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 {
 	enum compact_result ret;
 	struct compact_control cc = {
-		.nr_freepages = 0,
-		.nr_migratepages = 0,
-		.total_migrate_scanned = 0,
-		.total_free_scanned = 0,
 		.order = order,
 		.search_order = order,
 		.gfp_mask = gfp_mask,
@@ -2302,8 +2313,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 
 	if (capture)
 		current->capture_control = &capc;
-	INIT_LIST_HEAD(&cc.freepages);
-	INIT_LIST_HEAD(&cc.migratepages);
 
 	ret = compact_zone(&cc, &capc);
 
@@ -2405,8 +2414,6 @@ static void compact_node(int nid)
 	struct zone *zone;
 	struct compact_control cc = {
 		.order = -1,
-		.total_migrate_scanned = 0,
-		.total_free_scanned = 0,
 		.mode = MIGRATE_SYNC,
 		.ignore_skip_hint = true,
 		.whole_zone = true,
@@ -2420,11 +2427,7 @@ static void compact_node(int nid)
 		if (!populated_zone(zone))
 			continue;
 
-		cc.nr_freepages = 0;
-		cc.nr_migratepages = 0;
 		cc.zone = zone;
-		INIT_LIST_HEAD(&cc.freepages);
-		INIT_LIST_HEAD(&cc.migratepages);
 
 		compact_zone(&cc, NULL);
 
@@ -2534,8 +2537,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
 	struct compact_control cc = {
 		.order = pgdat->kcompactd_max_order,
 		.search_order = pgdat->kcompactd_max_order,
-		.total_migrate_scanned = 0,
-		.total_free_scanned = 0,
 		.classzone_idx = pgdat->kcompactd_classzone_idx,
 		.mode = MIGRATE_SYNC_LIGHT,
 		.ignore_skip_hint = false,
@@ -2559,16 +2560,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
 							COMPACT_CONTINUE)
 			continue;
 
-		cc.nr_freepages = 0;
-		cc.nr_migratepages = 0;
-		cc.total_migrate_scanned = 0;
-		cc.total_free_scanned = 0;
-		cc.zone = zone;
-		INIT_LIST_HEAD(&cc.freepages);
-		INIT_LIST_HEAD(&cc.migratepages);
-
 		if (kthread_should_stop())
 			return;
+
+		cc.zone = zone;
 		status = compact_zone(&cc, NULL);
 
 		if (status == COMPACT_SUCCESS) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a21b2ca..8dbf67f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -33,6 +33,7 @@
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
 #include <linux/oom.h>
+#include <linux/page_owner.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -2478,6 +2479,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	}
 
 	ClearPageCompound(head);
+
+	split_page_owner(head, HPAGE_PMD_ORDER);
+
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
 		/* Additional pin to radix tree of swap cache */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1710826..84de70b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1073,11 +1073,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
 	struct page *page;
 
 	for (i = start_pfn; i < end_pfn; i++) {
-		if (!pfn_valid(i))
+		page = pfn_to_online_page(i);
+		if (!page)
 			return false;
 
-		page = pfn_to_page(i);
-
 		if (page_zone(page) != z)
 			return false;
 
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index aa0338c..1589165 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -126,7 +126,7 @@
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-				 __GFP_NOWARN | __GFP_NOFAIL)
+				 __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e7cc0c..65da189 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
 		css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+					struct mem_cgroup *dead_memcg)
 {
-	struct mem_cgroup *memcg = dead_memcg;
 	struct mem_cgroup_reclaim_iter *iter;
 	struct mem_cgroup_per_node *mz;
 	int nid;
 	int i;
 
-	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-		for_each_node(nid) {
-			mz = mem_cgroup_nodeinfo(memcg, nid);
-			for (i = 0; i <= DEF_PRIORITY; i++) {
-				iter = &mz->iter[i];
-				cmpxchg(&iter->position,
-					dead_memcg, NULL);
-			}
+	for_each_node(nid) {
+		mz = mem_cgroup_nodeinfo(from, nid);
+		for (i = 0; i <= DEF_PRIORITY; i++) {
+			iter = &mz->iter[i];
+			cmpxchg(&iter->position,
+				dead_memcg, NULL);
 		}
 	}
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+	struct mem_cgroup *memcg = dead_memcg;
+	struct mem_cgroup *last;
+
+	do {
+		__invalidate_reclaim_iterators(memcg, dead_memcg);
+		last = memcg;
+	} while ((memcg = parent_mem_cgroup(memcg)));
+
+	/*
+	 * When cgruop1 non-hierarchy mode is used,
+	 * parent_mem_cgroup() does not walk all the way up to the
+	 * cgroup root (root_mem_cgroup). So we have to handle
+	 * dead_memcg from cgroup root separately.
+	 */
+	if (last != root_mem_cgroup)
+		__invalidate_reclaim_iterators(root_mem_cgroup,
+						dead_memcg);
+}
+
 /**
  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
  * @memcg: hierarchy root
@@ -2618,6 +2637,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
 
 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+
+		/*
+		 * Enforce __GFP_NOFAIL allocation because callers are not
+		 * prepared to see failures and likely do not have any failure
+		 * handling code.
+		 */
+		if (gfp & __GFP_NOFAIL) {
+			page_counter_charge(&memcg->kmem, nr_pages);
+			return 0;
+		}
 		cancel_charge(memcg, nr_pages);
 		return -ENOMEM;
 	}
diff --git a/mm/memfd.c b/mm/memfd.c
index a9ece5f..908379a 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -34,11 +34,12 @@ static void memfd_tag_pins(struct address_space *mapping)
 	void __rcu **slot;
 	pgoff_t start;
 	struct page *page;
+	unsigned int tagged = 0;
 
 	lru_add_drain();
 	start = 0;
-	rcu_read_lock();
 
+	xa_lock_irq(&mapping->i_pages);
 	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
 		page = radix_tree_deref_slot(slot);
 		if (!page || radix_tree_exception(page)) {
@@ -47,18 +48,19 @@ static void memfd_tag_pins(struct address_space *mapping)
 				continue;
 			}
 		} else if (page_count(page) - page_mapcount(page) > 1) {
-			xa_lock_irq(&mapping->i_pages);
 			radix_tree_tag_set(&mapping->i_pages, iter.index,
 					   MEMFD_TAG_PINNED);
-			xa_unlock_irq(&mapping->i_pages);
 		}
 
-		if (need_resched()) {
-			slot = radix_tree_iter_resume(slot, &iter);
-			cond_resched_rcu();
-		}
+		if (++tagged % 1024)
+			continue;
+
+		slot = radix_tree_iter_resume(slot, &iter);
+		xa_unlock_irq(&mapping->i_pages);
+		cond_resched();
+		xa_lock_irq(&mapping->i_pages);
 	}
-	rcu_read_unlock();
+	xa_unlock_irq(&mapping->i_pages);
 }
 
 /*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3b2e4a2..4bbc07b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -202,7 +202,6 @@ struct to_kill {
 	struct task_struct *tsk;
 	unsigned long addr;
 	short size_shift;
-	char addr_valid;
 };
 
 /*
@@ -327,22 +326,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
 		}
 	}
 	tk->addr = page_address_in_vma(p, vma);
-	tk->addr_valid = 1;
 	if (is_zone_device_page(p))
 		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
 	else
 		tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
 	/*
-	 * In theory we don't have to kill when the page was
-	 * munmaped. But it could be also a mremap. Since that's
-	 * likely very rare kill anyways just out of paranoia, but use
-	 * a SIGKILL because the error is not contained anymore.
+	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+	 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+	 * so "tk->size_shift == 0" effectively checks no mapping on
+	 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+	 * to a process' address space, it's possible not all N VMAs
+	 * contain mappings for the page, but at least one VMA does.
+	 * Only deliver SIGBUS with payload derived from the VMA that
+	 * has a mapping for the page.
 	 */
-	if (tk->addr == -EFAULT || tk->size_shift == 0) {
+	if (tk->addr == -EFAULT) {
 		pr_info("Memory failure: Unable to find user space address %lx in %s\n",
 			page_to_pfn(p), tsk->comm);
-		tk->addr_valid = 0;
+	} else if (tk->size_shift == 0) {
+		kfree(tk);
+		return;
 	}
 	get_task_struct(tsk);
 	tk->tsk = tsk;
@@ -369,7 +373,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
 			 * make sure the process doesn't catch the
 			 * signal and then access the memory. Just kill it.
 			 */
-			if (fail || tk->addr_valid == 0) {
+			if (fail || tk->addr == -EFAULT) {
 				pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
 				       pfn, tk->tsk->comm, tk->tsk->pid);
 				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1258,17 +1262,19 @@ int memory_failure(unsigned long pfn, int flags)
 	if (!sysctl_memory_failure_recovery)
 		panic("Memory failure on page %lx", pfn);
 
-	if (!pfn_valid(pfn)) {
+	p = pfn_to_online_page(pfn);
+	if (!p) {
+		if (pfn_valid(pfn)) {
+			pgmap = get_dev_pagemap(pfn, NULL);
+			if (pgmap)
+				return memory_failure_dev_pagemap(pfn, flags,
+								  pgmap);
+		}
 		pr_err("Memory failure: %#lx: memory outside kernel control\n",
 			pfn);
 		return -ENXIO;
 	}
 
-	pgmap = get_dev_pagemap(pfn, NULL);
-	if (pgmap)
-		return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
-	p = pfn_to_page(pfn);
 	if (PageHuge(p))
 		return memory_failure_hugetlb(pfn, flags);
 	if (TestSetPageHWPoison(p)) {
diff --git a/mm/memory.c b/mm/memory.c
index fc9c36b..b619cc2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3078,6 +3078,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 	struct page *page = NULL, *swapcache;
 	struct mem_cgroup *memcg;
 	swp_entry_t entry;
+	struct swap_info_struct *si;
+	bool skip_swapcache = false;
 	pte_t pte;
 	int locked;
 	int exclusive = 0;
@@ -3119,15 +3121,24 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+
+	/*
+	 * lookup_swap_cache below can fail and before the SWP_SYNCHRONOUS_IO
+	 * check is made, another process can populate the swapcache, delete
+	 * the swap entry and decrement the swap count. So decide on taking
+	 * the SWP_SYNCHRONOUS_IO path before the lookup. In the event of the
+	 * race described, the victim process will find a swap_count > 1
+	 * and can then take the readahead path instead of SWP_SYNCHRONOUS_IO.
+	 */
+	si = swp_swap_info(entry);
+	if (si->flags & SWP_SYNCHRONOUS_IO && __swap_count(si, entry) == 1)
+		skip_swapcache = true;
+
 	page = lookup_swap_cache(entry, vma, vmf->address);
 	swapcache = page;
 
 	if (!page) {
-		struct swap_info_struct *si = swp_swap_info(entry);
-
-		if (si->flags & SWP_SYNCHRONOUS_IO &&
-				__swap_count(si, entry) == 1) {
-			/* skip swapcache */
+		if (skip_swapcache) {
 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
 							vmf->address);
 			if (page) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c32bcba..85aa15b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -709,6 +709,7 @@ static void  __free_pages_memory(unsigned long start,
 			> bootloader_memory_limit)
 			order--;
 
+		kernel_map_pages(page, 1 << order, 1);
 		__free_pages_hotplug(page, order);
 		onlined_pages += (1UL << order);
 	}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d78b843..4b81d09 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -406,7 +406,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 	},
 };
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags);
 
 struct queue_pages {
@@ -432,11 +432,14 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- *        page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ *        existing page was already on a node that does not follow the
+ *        policy.
  */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
@@ -454,23 +457,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 	if (is_huge_zero_page(page)) {
 		spin_unlock(ptl);
 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+		ret = 2;
 		goto out;
 	}
-	if (!queue_pages_required(page, qp)) {
-		ret = 1;
+	if (!queue_pages_required(page, qp))
 		goto unlock;
-	}
 
-	ret = 1;
 	flags = qp->flags;
 	/* go to thp migration */
 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-		if (!vma_migratable(walk->vma)) {
-			ret = -EIO;
+		if (!vma_migratable(walk->vma) ||
+		    migrate_page_add(page, qp->pagelist, flags)) {
+			ret = 1;
 			goto unlock;
 		}
-
-		migrate_page_add(page, qp->pagelist, flags);
 	} else
 		ret = -EIO;
 unlock:
@@ -482,6 +482,13 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 /*
  * Scan through pages checking if pages follow certain conditions,
  * and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ *        on a node that does not follow the policy.
  */
 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 			unsigned long end, struct mm_walk *walk)
@@ -491,17 +498,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 	struct queue_pages *qp = walk->private;
 	unsigned long flags = qp->flags;
 	int ret;
+	bool has_unmovable = false;
 	pte_t *pte;
 	spinlock_t *ptl;
 
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-		if (ret > 0)
-			return 0;
-		else if (ret < 0)
+		if (ret != 2)
 			return ret;
 	}
+	/* THP was split, fall through to pte walk */
 
 	if (pmd_trans_unstable(pmd))
 		return 0;
@@ -522,14 +529,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 		if (!queue_pages_required(page, qp))
 			continue;
 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-			if (!vma_migratable(vma))
+			/* MPOL_MF_STRICT must be specified if we get here */
+			if (!vma_migratable(vma)) {
+				has_unmovable = true;
 				break;
-			migrate_page_add(page, qp->pagelist, flags);
+			}
+
+			/*
+			 * Do not abort immediately since there may be
+			 * temporary off LRU pages in the range.  Still
+			 * need migrate other LRU pages.
+			 */
+			if (migrate_page_add(page, qp->pagelist, flags))
+				has_unmovable = true;
 		} else
 			break;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
+
+	if (has_unmovable)
+		return 1;
+
 	return addr != end ? -EIO : 0;
 }
 
@@ -644,7 +665,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  *
  * If pages found in a given range are on a set of nodes (determined by
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -939,7 +966,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 /*
  * page migration, thp tail pages can be passed.
  */
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
 	struct page *head = compound_head(page);
@@ -952,8 +979,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 			mod_node_page_state(page_pgdat(head),
 				NR_ISOLATED_ANON + page_is_file_cache(head),
 				hpage_nr_pages(head));
+		} else if (flags & MPOL_MF_STRICT) {
+			/*
+			 * Non-movable page may reach here.  And, there may be
+			 * temporary off LRU pages or non-LRU movable pages.
+			 * Treat them as unmovable pages since they can't be
+			 * isolated, so they can't be moved at the moment.  It
+			 * should return -EIO for this case too.
+			 */
+			return -EIO;
 		}
 	}
+
+	return 0;
 }
 
 /* page allocation callback for NUMA node migration */
@@ -1156,9 +1194,10 @@ static struct page *new_page(struct page *page, unsigned long start)
 }
 #else
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
+	return -EIO;
 }
 
 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1181,6 +1220,7 @@ static long do_mbind(unsigned long start, unsigned long len,
 	struct mempolicy *new;
 	unsigned long end;
 	int err;
+	int ret;
 	LIST_HEAD(pagelist);
 
 	if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1242,10 +1282,15 @@ static long do_mbind(unsigned long start, unsigned long len,
 	if (err)
 		goto mpol_out;
 
-	err = queue_pages_range(mm, start, end, nmask,
+	ret = queue_pages_range(mm, start, end, nmask,
 			  flags | MPOL_MF_INVERT, &pagelist);
-	if (!err)
-		err = mbind_range(mm, start, end, new);
+
+	if (ret < 0) {
+		err = -EIO;
+		goto up_out;
+	}
+
+	err = mbind_range(mm, start, end, new);
 
 	if (!err) {
 		int nr_failed = 0;
@@ -1258,13 +1303,14 @@ static long do_mbind(unsigned long start, unsigned long len,
 				putback_movable_pages(&pagelist);
 		}
 
-		if (nr_failed && (flags & MPOL_MF_STRICT))
+		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
 			err = -EIO;
 	} else
 		putback_movable_pages(&pagelist);
 
+up_out:
 	up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
 	mpol_put(new);
 	return err;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 1c679e0..c00733e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2332,16 +2332,13 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
  */
 static void migrate_vma_collect(struct migrate_vma *migrate)
 {
-	struct mm_walk mm_walk;
-
-	mm_walk.pmd_entry = migrate_vma_collect_pmd;
-	mm_walk.pte_entry = NULL;
-	mm_walk.pte_hole = migrate_vma_collect_hole;
-	mm_walk.hugetlb_entry = NULL;
-	mm_walk.test_walk = NULL;
-	mm_walk.vma = migrate->vma;
-	mm_walk.mm = migrate->vma->vm_mm;
-	mm_walk.private = migrate;
+	struct mm_walk mm_walk = {
+		.pmd_entry = migrate_vma_collect_pmd,
+		.pte_hole = migrate_vma_collect_hole,
+		.vma = migrate->vma,
+		.mm = migrate->vma->vm_mm,
+		.private = migrate,
+	};
 
 	mmu_notifier_invalidate_range_start(mm_walk.mm,
 					    migrate->start,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4272af2..117a8e0 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -52,14 +52,15 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
 
-#define ULMK_MAGIC "lmkd"
-
 int sysctl_panic_on_oom =
 IS_ENABLED(CONFIG_DEBUG_PANIC_ON_OOM) ? 2 : 0;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
 int sysctl_reap_mem_on_sigkill = 1;
 
+static int panic_on_adj_zero;
+module_param(panic_on_adj_zero, int, 0644);
+
 /*
  * Serializes oom killer invocations (out_of_memory()) from all contexts to
  * prevent from over eager oom killing (e.g. when the oom killer is invoked
@@ -76,42 +77,122 @@ DEFINE_MUTEX(oom_lock);
  */
 
 #ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
+
+/* The maximum amount of time to loop in should_ulmk_retry() */
+#define ULMK_TIMEOUT (20 * HZ)
+
+#define ULMK_DBG_POLICY_TRIGGER (BIT(0))
+#define ULMK_DBG_POLICY_WDOG (BIT(1))
+#define ULMK_DBG_POLICY_POSITIVE_ADJ (BIT(2))
+#define ULMK_DBG_POLICY_ALL (BIT(3) - 1)
+static unsigned int ulmk_dbg_policy;
+module_param(ulmk_dbg_policy, uint, 0644);
+
+static atomic64_t ulmk_wdog_expired = ATOMIC64_INIT(0);
 static atomic64_t ulmk_kill_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
 static unsigned long psi_emergency_jiffies = INITIAL_JIFFIES;
+/* Prevents contention on the mutex_trylock in psi_emergency_jiffies */
 static DEFINE_MUTEX(ulmk_retry_lock);
 
-
-/*
- * psi_emergency_jiffies represents the last ULMK emergency event.
- * Give ULMK a 2 second window to handle this event.
- * If ULMK has made some progress since then, send another.
- * Repeat as necessary.
- */
-bool should_ulmk_retry(void)
+static bool ulmk_kill_possible(void)
 {
-	unsigned long now, last_kill;
+	struct task_struct *tsk;
 	bool ret = false;
 
-	mutex_lock(&ulmk_retry_lock);
+	rcu_read_lock();
+	for_each_process(tsk) {
+		if (tsk->flags & PF_KTHREAD)
+			continue;
+
+		task_lock(tsk);
+		if (tsk->signal->oom_score_adj >= 0) {
+			ret = true;
+			task_unlock(tsk);
+			break;
+		}
+		task_unlock(tsk);
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/*
+ * If CONFIG_DEBUG_PANIC_ON_OOM is enabled, attempt to determine *why*
+ * we are in this state.
+ * 1) No events were sent by PSI to userspace
+ * 2) PSI sent an event to userspace, but userspace was not able to
+ * receive the event. Possible causes of this include waiting for a
+ * mutex which is held by a process in direct relcaim. Or the userspace
+ * component has crashed.
+ * 3) Userspace received the event, but decided not to kill anything.
+ */
+bool should_ulmk_retry(gfp_t gfp_mask)
+{
+	unsigned long now, last_kill;
+	bool ret = true;
+	bool wdog_expired, trigger_active;
+
+	struct oom_control oc = {
+		.zonelist = node_zonelist(first_memory_node, gfp_mask),
+		.nodemask = NULL,
+		.memcg = NULL,
+		.gfp_mask = gfp_mask,
+		.order = 0,
+		/* Also causes check_panic_on_oom not to panic */
+		.only_positive_adj = true,
+	};
+
+	if (!sysctl_panic_on_oom)
+		return false;
+
+	if (gfp_mask & __GFP_RETRY_MAYFAIL)
+		return false;
+
+	/* Someone else is already checking. */
+	if (!mutex_trylock(&ulmk_retry_lock))
+		return true;
+
 	now = jiffies;
 	last_kill = atomic64_read(&ulmk_kill_jiffies);
-	if (time_before(now, psi_emergency_jiffies + 2 * HZ)) {
-		ret = true;
-		goto out;
-	}
+	wdog_expired = atomic64_read(&ulmk_wdog_expired);
+	trigger_active = psi_is_trigger_active();
 
-	if (time_after_eq(last_kill, psi_emergency_jiffies)) {
+	if (time_after(last_kill, psi_emergency_jiffies)) {
 		psi_emergency_jiffies = now;
+		ret = true;
+	} else if (time_after(now, psi_emergency_jiffies + ULMK_TIMEOUT)) {
+		ret = false;
+	} else if (!trigger_active) {
+		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_TRIGGER);
 		psi_emergency_trigger();
 		ret = true;
-		goto out;
+	} else if (wdog_expired) {
+		mutex_lock(&oom_lock);
+		ret = out_of_memory(&oc);
+		mutex_unlock(&oom_lock);
+		BUG_ON(!ret && ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
+	} else if (!ulmk_kill_possible()) {
+		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
+		ret = false;
 	}
 
-out:
 	mutex_unlock(&ulmk_retry_lock);
 	return ret;
 }
 
+void ulmk_watchdog_fn(struct timer_list *t)
+{
+	atomic64_set(&ulmk_wdog_expired, 1);
+	BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_WDOG);
+}
+
+void ulmk_watchdog_pet(struct timer_list *t)
+{
+	del_timer_sync(t);
+	atomic64_set(&ulmk_wdog_expired, 0);
+}
+
 void ulmk_update_last_kill(void)
 {
 	atomic64_set(&ulmk_kill_jiffies, jiffies);
@@ -256,7 +337,8 @@ static bool is_dump_unreclaim_slabs(void)
  * task consuming the most memory to avoid subsequent oom failures.
  */
 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-			  const nodemask_t *nodemask, unsigned long totalpages)
+			  const nodemask_t *nodemask, unsigned long totalpages,
+			  bool only_positive_adj)
 {
 	long points;
 	long adj;
@@ -275,6 +357,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 	 */
 	adj = (long)p->signal->oom_score_adj;
 	if (adj == OOM_SCORE_ADJ_MIN ||
+			(only_positive_adj && adj < 0) ||
 			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 			in_vfork(p)) {
 		task_unlock(p);
@@ -396,7 +479,8 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
 		goto select;
 	}
 
-	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
+	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages,
+				oc->only_positive_adj);
 	if (!points || points < oc->chosen_points)
 		goto next;
 
@@ -932,11 +1016,12 @@ static void __oom_kill_process(struct task_struct *victim)
 	 */
 	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
 	mark_oom_victim(victim);
-	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB oom_score_adj=%hd\n",
 		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
 		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
-		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
+		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)),
+		p->signal->oom_score_adj);
 	task_unlock(victim);
 
 	/*
@@ -994,7 +1079,8 @@ static int oom_kill_memcg_member(struct task_struct *task, void *unused)
 	return 0;
 }
 
-static void oom_kill_process(struct oom_control *oc, const char *message)
+static void oom_kill_process(struct oom_control *oc, const char *message,
+				bool quiet)
 {
 	struct task_struct *p = oc->chosen;
 	unsigned int points = oc->chosen_points;
@@ -1021,7 +1107,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 	}
 	task_unlock(p);
 
-	if (__ratelimit(&oom_rs))
+	if (!quiet && __ratelimit(&oom_rs))
 		dump_header(oc, p);
 
 	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
@@ -1051,7 +1137,8 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 			 * oom_badness() returns 0 if the thread is unkillable
 			 */
 			child_points = oom_badness(child,
-				oc->memcg, oc->nodemask, oc->totalpages);
+				oc->memcg, oc->nodemask, oc->totalpages,
+				oc->only_positive_adj);
 			if (child_points > victim_points) {
 				put_task_struct(victim);
 				victim = child;
@@ -1070,6 +1157,12 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 	 */
 	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
 
+	/*
+	 * If ->only_positive_adj = true in oom context,
+	 * consider them as kill from ulmk.
+	 */
+	if (oc->only_positive_adj)
+		ulmk_update_last_kill();
 	__oom_kill_process(victim);
 
 	/*
@@ -1100,7 +1193,7 @@ static void check_panic_on_oom(struct oom_control *oc,
 			return;
 	}
 	/* Do not panic for oom kills triggered by sysrq */
-	if (is_sysrq_oom(oc))
+	if (is_sysrq_oom(oc) || oc->only_positive_adj)
 		return;
 	dump_header(oc, NULL);
 	panic("Out of memory: %s panic_on_oom is enabled\n",
@@ -1166,9 +1259,10 @@ bool out_of_memory(struct oom_control *oc)
 	 * The OOM killer does not compensate for IO-less reclaim.
 	 * pagefault_out_of_memory lost its gfp context so we have to
 	 * make sure exclude 0 mask - all other users should have at least
-	 * ___GFP_DIRECT_RECLAIM to get here.
+	 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
+	 * invoke the OOM killer even if it is a GFP_NOFS allocation.
 	 */
-	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
+	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
 		return true;
 
 	/*
@@ -1185,7 +1279,8 @@ bool out_of_memory(struct oom_control *oc)
 	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
 		get_task_struct(current);
 		oc->chosen = current;
-		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
+		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)",
+				 false);
 		return true;
 	}
 
@@ -1199,12 +1294,14 @@ bool out_of_memory(struct oom_control *oc)
 		 * system level, we cannot survive this and will enter
 		 * an endless loop in the allocator. Bail out now.
 		 */
-		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc) &&
+		    !oc->only_positive_adj)
 			panic("System is deadlocked on memory\n");
 	}
 	if (oc->chosen && oc->chosen != (void *)-1UL)
 		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
-				 "Memory cgroup out of memory");
+				 "Memory cgroup out of memory",
+			IS_ENABLED(CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER));
 	return !!oc->chosen;
 }
 
@@ -1256,13 +1353,26 @@ void add_to_oom_reaper(struct task_struct *p)
 
 	task_unlock(p);
 
-	if (strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
+	if (!strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
 			&& p->signal->oom_score_adj == 0) {
 		show_mem(SHOW_MEM_FILTER_NODES, NULL);
 		show_mem_call_notifiers();
-		if (sysctl_oom_dump_tasks)
-			dump_tasks(NULL, NULL);
 	}
 
 	put_task_struct(p);
 }
+
+/*
+ * Should be called prior to sending sigkill. To guarantee that the
+ * process to-be-killed is still untouched.
+ */
+void check_panic_on_foreground_kill(struct task_struct *p)
+{
+	if (unlikely(!strcmp(current->comm, ULMK_MAGIC)
+			&& p->signal->oom_score_adj == 0
+			&& panic_on_adj_zero)) {
+		show_mem(SHOW_MEM_FILTER_NODES, NULL);
+		show_mem_call_notifiers();
+		panic("Attempt to kill foreground task: %s", p->comm);
+	}
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91f812d..fe70c43 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4582,7 +4582,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 				&compaction_retries))
 		goto retry;
 
-	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry())
+	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry(gfp_mask))
 		goto retry;
 
 	/* Deal with possible cpuset update races before we start OOM killing */
@@ -7644,11 +7644,11 @@ static void __setup_per_zone_wmarks(void)
 			    mult_frac(zone->managed_pages,
 				      watermark_scale_factor, 10000));
 
+		zone->watermark_boost = 0;
 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) +
 					low + min;
 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) +
 					low + min * 2;
-		zone->watermark_boost = 0;
 
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -8473,7 +8473,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 		zone->free_area[order].nr_free--;
 		for (i = 0; i < (1 << order); i++)
 			SetPageReserved((page+i));
-		post_alloc_hook(page, order, GFP_NOWAIT);
 		pfn += (1 << order);
 	}
 	spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 97ac8c7..a4a603e3 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -10,6 +10,8 @@
 #include <linux/migrate.h>
 #include <linux/stackdepot.h>
 #include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
 
 #include "internal.h"
 
@@ -24,6 +26,9 @@ struct page_owner {
 	short last_migrate_reason;
 	gfp_t gfp_mask;
 	depot_stack_handle_t handle;
+	int pid;
+	u64 ts_nsec;
+	u64 free_ts_nsec;
 };
 
 static bool page_owner_disabled =
@@ -115,12 +120,15 @@ void __reset_page_owner(struct page *page, unsigned int order)
 {
 	int i;
 	struct page_ext *page_ext;
+	u64 free_ts_nsec = local_clock();
 
 	for (i = 0; i < (1 << order); i++) {
 		page_ext = lookup_page_ext(page + i);
 		if (unlikely(!page_ext))
 			continue;
+		get_page_owner(page_ext)->free_ts_nsec = free_ts_nsec;
 		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
+		__set_bit(PAGE_EXT_PG_FREE, &page_ext->flags);
 	}
 }
 
@@ -183,8 +191,12 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
 	page_owner->order = order;
 	page_owner->gfp_mask = gfp_mask;
 	page_owner->last_migrate_reason = -1;
+	page_owner->pid = current->pid;
+	page_owner->ts_nsec = local_clock();
+	page_owner->free_ts_nsec = 0;
 
 	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+	__clear_bit(PAGE_EXT_PG_FREE, &page_ext->flags);
 }
 
 noinline void __set_page_owner(struct page *page, unsigned int order,
@@ -192,12 +204,24 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
 	depot_stack_handle_t handle;
+	int i;
 
 	if (unlikely(!page_ext))
 		return;
 
 	handle = save_stack(gfp_mask);
 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
+
+	/* set page owner for tail pages if any */
+	for (i = 1; i < (1 << order); i++) {
+		page_ext = lookup_page_ext(page + i);
+
+		if (unlikely(!page_ext))
+			continue;
+
+		/* mark tail pages as order 0 individual pages */
+		__set_page_owner_handle(page_ext, handle, 0, gfp_mask);
+	}
 }
 
 void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -243,6 +267,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 	new_page_owner->last_migrate_reason =
 		old_page_owner->last_migrate_reason;
 	new_page_owner->handle = old_page_owner->handle;
+	new_page_owner->pid = old_page_owner->pid;
+	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
+	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
 
 	/*
 	 * We don't clear the bit on the oldpage as it's going to be freed
@@ -277,7 +304,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 	 * not matter as the mixed block count will still be correct
 	 */
 	for (; pfn < end_pfn; ) {
-		if (!pfn_valid(pfn)) {
+		page = pfn_to_online_page(pfn);
+		if (!page) {
 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 			continue;
 		}
@@ -285,13 +313,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 		block_end_pfn = min(block_end_pfn, end_pfn);
 
-		page = pfn_to_page(pfn);
 		pageblock_mt = get_pageblock_migratetype(page);
 
 		for (; pfn < block_end_pfn; pfn++) {
 			if (!pfn_valid_within(pfn))
 				continue;
 
+			/* The pageblock is online, no need to recheck. */
 			page = pfn_to_page(pfn);
 
 			if (page_zone(page) != zone)
@@ -360,9 +388,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 		return -ENOMEM;
 
 	ret = snprintf(kbuf, count,
-			"Page allocated via order %u, mask %#x(%pGg)\n",
+			"Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns\n",
 			page_owner->order, page_owner->gfp_mask,
-			&page_owner->gfp_mask);
+			&page_owner->gfp_mask, page_owner->pid,
+			page_owner->ts_nsec);
 
 	if (ret >= count)
 		goto err;
@@ -445,8 +474,9 @@ void __dump_page_owner(struct page *page)
 	}
 
 	depot_fetch_stack(handle, &trace);
-	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
-		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu ns\n",
+		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
+		 page_owner->pid, page_owner->ts_nsec);
 	print_stack_trace(&trace, 0);
 
 	if (page_owner->last_migrate_reason != -1)
diff --git a/mm/rmap.c b/mm/rmap.c
index a77f9b2..94e2488 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1466,7 +1466,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 			/*
 			 * No need to invalidate here it will synchronize on
 			 * against the special swap migration pte.
+			 *
+			 * The assignment to subpage above was computed from a
+			 * swap PTE which results in an invalid pointer.
+			 * Since only PAGE_SIZE pages can currently be
+			 * migrated, just set it to page. This will need to be
+			 * changed when hugepage migrations to device private
+			 * memory are supported.
 			 */
+			subpage = page;
 			goto discard;
 		}
 
diff --git a/mm/slub.c b/mm/slub.c
index 755d155..9e5a7e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4811,7 +4811,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		}
 	}
 
-	get_online_mems();
+	/*
+	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+	 * already held which will conflict with an existing lock order:
+	 *
+	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
+	 *
+	 * We don't really need mem_hotplug_lock (to hold off
+	 * slab_mem_going_offline_callback) here because slab's memory hot
+	 * unplug code doesn't destroy the kmem_cache->node[] data.
+	 */
+
 #ifdef CONFIG_SLUB_DEBUG
 	if (flags & SO_ALL) {
 		struct kmem_cache_node *n;
@@ -4852,7 +4862,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			x += sprintf(buf + x, " N%d=%lu",
 					node, nodes[node]);
 #endif
-	put_online_mems();
 	kfree(nodes);
 	return x + sprintf(buf + x, "\n");
 }
diff --git a/mm/usercopy.c b/mm/usercopy.c
index ac85aeb..45a6eac 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/sched/task.h>
@@ -237,7 +238,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
 	if (!virt_addr_valid(ptr))
 		return;
 
-	page = virt_to_head_page(ptr);
+	/*
+	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+	 * highmem page or fallback to virt_to_page(). The following
+	 * is effectively a highmem-aware virt_to_head_page().
+	 */
+	page = compound_head(kmap_to_page((void *)ptr));
 
 	if (PageSlab(page)) {
 		/* Check slab allocator for flags and size. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d515d13..7bc3a22 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -918,7 +918,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
 	unsigned long nva_start_addr, unsigned long size,
 	enum fit_type type)
 {
-	struct vmap_area *lva;
+	struct vmap_area *lva = NULL;
 
 	if (type == FL_FIT_TYPE) {
 		/*
@@ -977,7 +977,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
 	if (type != FL_FIT_TYPE) {
 		augment_tree_propagate_from(va);
 
-		if (type == NE_FIT_TYPE)
+		if (lva)	/* type == NE_FIT_TYPE */
 			insert_vmap_area_augment(lva, &va->rb_node,
 				&free_vmap_area_root, &free_vmap_area_list);
 	}
@@ -3149,9 +3149,19 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 			goto overflow;
 
 		/*
+		 * If required width exeeds current VA block, move
+		 * base downwards and then recheck.
+		 */
+		if (base + end > va->va_end) {
+			base = pvm_determine_end_from_reverse(&va, align) - end;
+			term_area = area;
+			continue;
+		}
+
+		/*
 		 * If this VA does not fit, move base downwards and recheck.
 		 */
-		if (base + start < va->va_start || base + end > va->va_end) {
+		if (base + start < va->va_start) {
 			va = node_to_va(rb_prev(&va->rb_node));
 			base = pvm_determine_end_from_reverse(&va, align) - end;
 			term_area = area;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 45dcb4f..cf75fca 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -463,6 +463,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * "hierarchy" or "local").
  *
  * To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
  */
 int vmpressure_register_event(struct mem_cgroup *memcg,
 			      struct eventfd_ctx *eventfd, const char *args)
@@ -470,7 +473,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
 	struct vmpressure_event *ev;
 	enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
-	enum vmpressure_levels level = -1;
+	enum vmpressure_levels level;
 	char *spec, *spec_orig;
 	char *token;
 	int ret = 0;
@@ -483,20 +486,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 
 	/* Find required level */
 	token = strsep(&spec, ",");
-	level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
-	if (level < 0) {
-		ret = level;
+	ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+	if (ret < 0)
 		goto out;
-	}
+	level = ret;
 
 	/* Find optional mode */
 	token = strsep(&spec, ",");
 	if (token) {
-		mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
-		if (mode < 0) {
-			ret = mode;
+		ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+		if (ret < 0)
 			goto out;
-		}
+		mode = ret;
 	}
 
 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -512,6 +513,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 	mutex_lock(&vmpr->events_lock);
 	list_add(&ev->node, &vmpr->events);
 	mutex_unlock(&vmpr->events_lock);
+	ret = 0;
 out:
 	kfree(spec_orig);
 	return ret;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3d12b19..0ec269f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,9 +87,6 @@ struct scan_control {
 	/* Can pages be swapped as part of reclaim? */
 	unsigned int may_swap:1;
 
-	/* e.g. boosted watermark reclaim leaves slabs alone */
-	unsigned int may_shrinkslab:1;
-
 	/*
 	 * Cgroups are not reclaimed below their configured memory.low,
 	 * unless we threaten to OOM. If any cgroups are skipped due to
@@ -2807,10 +2804,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
 			node_lru_pages += lru_pages;
 
-			if (sc->may_shrinkslab) {
-				shrink_slab(sc->gfp_mask, pgdat->node_id,
-				    memcg, sc->priority);
-			}
+			shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
+					sc->priority);
 
 			/* Record the group's reclaim efficiency */
 			vmpressure(sc->gfp_mask, memcg, false,
@@ -3295,7 +3290,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
 		.may_swap = 1,
-		.may_shrinkslab = 1,
 	};
 
 	/*
@@ -3340,7 +3334,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 		.may_unmap = 1,
 		.reclaim_idx = MAX_NR_ZONES - 1,
 		.may_swap = !noswap,
-		.may_shrinkslab = 1,
 	};
 	unsigned long lru_pages;
 
@@ -3387,7 +3380,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
 		.may_swap = may_swap,
-		.may_shrinkslab = 1,
 	};
 
 	/*
@@ -3699,7 +3691,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		 */
 		sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
 		sc.may_swap = !nr_boost_reclaim;
-		sc.may_shrinkslab = !nr_boost_reclaim;
 
 		/*
 		 * Do some background aging of the anon list, to give
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 2cc2ec7..fd1d172 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1162,7 +1162,7 @@ const char * const vmstat_text[] = {
 	"nr_vmscan_immediate_reclaim",
 	"nr_dirtied",
 	"nr_written",
-	"", /* nr_indirectly_reclaimable */
+	"nr_indirectly_reclaimable",
 	"nr_unreclaimable_pages",
 
 	/* enum writeback_stat_item counters */
@@ -1568,9 +1568,9 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
 		   "\n        present  %lu"
 		   "\n        managed  %lu",
 		   zone_page_state(zone, NR_FREE_PAGES),
-		   min_wmark_pages(zone),
-		   low_wmark_pages(zone),
-		   high_wmark_pages(zone),
+		   min_wmark_pages(zone) - zone->watermark_boost,
+		   low_wmark_pages(zone) - zone->watermark_boost,
+		   high_wmark_pages(zone) - zone->watermark_boost,
 		   zone->spanned_pages,
 		   zone->present_pages,
 		   zone->managed_pages);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d97f19..0ae2f5f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
 #include <linux/zpool.h>
 #include <linux/mount.h>
 #include <linux/migrate.h>
+#include <linux/wait.h>
 #include <linux/pagemap.h>
 #include <linux/fs.h>
 
@@ -267,6 +268,10 @@ struct zs_pool {
 #ifdef CONFIG_COMPACTION
 	struct inode *inode;
 	struct work_struct free_work;
+	/* A wait queue for when migration races with async_free_zspage() */
+	struct wait_queue_head migration_wait;
+	atomic_long_t isolated_pages;
+	bool destroying;
 #endif
 };
 
@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
 	zspage->isolated--;
 }
 
+static void putback_zspage_deferred(struct zs_pool *pool,
+				    struct size_class *class,
+				    struct zspage *zspage)
+{
+	enum fullness_group fg;
+
+	fg = putback_zspage(class, zspage);
+	if (fg == ZS_EMPTY)
+		schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+	VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+	atomic_long_dec(&pool->isolated_pages);
+	/*
+	 * There's no possibility of racing, since wait_for_isolated_drain()
+	 * checks the isolated count under &class->lock after enqueuing
+	 * on migration_wait.
+	 */
+	if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+		wake_up_all(&pool->migration_wait);
+}
+
 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 				struct page *newpage, struct page *oldpage)
 {
@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
 	 */
 	if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
 		get_zspage_mapping(zspage, &class_idx, &fullness);
+		atomic_long_inc(&pool->isolated_pages);
 		remove_zspage(class, zspage, fullness);
 	}
 
@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
 	 * Page migration is done so let's putback isolated zspage to
 	 * the list if @page is final isolated subpage in the zspage.
 	 */
-	if (!is_zspage_isolated(zspage))
-		putback_zspage(class, zspage);
+	if (!is_zspage_isolated(zspage)) {
+		/*
+		 * We cannot race with zs_destroy_pool() here because we wait
+		 * for isolation to hit zero before we start destroying.
+		 * Also, we ensure that everyone can see pool->destroying before
+		 * we start waiting.
+		 */
+		putback_zspage_deferred(pool, class, zspage);
+		zs_pool_dec_isolated(pool);
+	}
 
 	reset_page(page);
 	put_page(page);
@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
 	spin_lock(&class->lock);
 	dec_zspage_isolation(zspage);
 	if (!is_zspage_isolated(zspage)) {
-		fg = putback_zspage(class, zspage);
 		/*
 		 * Due to page_lock, we cannot free zspage immediately
 		 * so let's defer.
 		 */
-		if (fg == ZS_EMPTY)
-			schedule_work(&pool->free_work);
+		putback_zspage_deferred(pool, class, zspage);
+		zs_pool_dec_isolated(pool);
 	}
 	spin_unlock(&class->lock);
 }
@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
 	return 0;
 }
 
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+	return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+	/*
+	 * We're in the process of destroying the pool, so there are no
+	 * active allocations. zs_page_isolate() fails for completely free
+	 * zspages, so we need only wait for the zs_pool's isolated
+	 * count to hit zero.
+	 */
+	wait_event(pool->migration_wait,
+		   pool_isolated_are_drained(pool));
+}
+
 static void zs_unregister_migration(struct zs_pool *pool)
 {
+	pool->destroying = true;
+	/*
+	 * We need a memory barrier here to ensure global visibility of
+	 * pool->destroying. Thus pool->isolated pages will either be 0 in which
+	 * case we don't care, or it will be > 0 and pool->destroying will
+	 * ensure that we wake up once isolation hits 0.
+	 */
+	smp_mb();
+	wait_for_isolated_drain(pool); /* This can block */
 	flush_work(&pool->free_work);
 	iput(pool->inode);
 }
@@ -2366,6 +2432,10 @@ struct zs_pool *zs_create_pool(const char *name)
 	if (!pool->name)
 		goto err;
 
+#ifdef CONFIG_COMPACTION
+	init_waitqueue_head(&pool->migration_wait);
+#endif
+
 	if (create_cache(pool))
 		goto err;
 
diff --git a/net/9p/client.c b/net/9p/client.c
index b615aae..d62f83f 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -296,6 +296,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
 
 	p9pdu_reset(&req->tc);
 	p9pdu_reset(&req->rc);
+	req->t_err = 0;
 	req->status = REQ_STATUS_ALLOC;
 	init_waitqueue_head(&req->wq);
 	INIT_LIST_HEAD(&req->req_list);
diff --git a/net/Kconfig b/net/Kconfig
index f46a913..f1704f5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -293,7 +293,6 @@
 	bool "enable BPF Just In Time compiler"
 	depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
 	depends on MODULES
-	depends on !CFI
 	---help---
 	  Berkeley Packet Filter filtering capabilities are normally handled
 	  by an interpreter. This option allows kernel to generate a native
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 795fbc6..9abb18f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1028,6 +1028,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
 	 */
 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 		goto out;
+
+	rc = -EPERM;
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+		goto out;
+
 	rc = -ENOMEM;
 	sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
 	if (!sk)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5d01edf..44ec492 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -858,6 +858,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
 		break;
 
 	case SOCK_RAW:
+		if (!capable(CAP_NET_RAW))
+			return -EPERM;
 		break;
 	default:
 		return -ESOCKTNOSUPPORT;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 0b7b36f..36f2441 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -463,17 +463,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
  * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm_packet: potential OGM in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-				      __be16 tvlv_len)
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+			  const struct batadv_ogm_packet *ogm_packet)
 {
 	int next_buff_pos = 0;
 
-	next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-	next_buff_pos += ntohs(tvlv_len);
+	/* check if there is enough space for the header */
+	next_buff_pos += buff_pos + sizeof(*ogm_packet);
+	if (next_buff_pos > packet_len)
+		return false;
+
+	/* check if there is enough space for the optional TVLV */
+	next_buff_pos += ntohs(ogm_packet->tvlv_len);
 
 	return (next_buff_pos <= packet_len) &&
 	       (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -501,7 +507,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
 
 	/* adjust all flags and log packets */
 	while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-					 batadv_ogm_packet->tvlv_len)) {
+					 batadv_ogm_packet)) {
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet
 		 */
@@ -1852,7 +1858,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
 	/* unpack the aggregated packets and process them one by one */
 	while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-					 ogm_packet->tvlv_len)) {
+					 ogm_packet)) {
 		batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
 
 		ogm_offset += BATADV_OGM_HLEN;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 2948b41..d241ccc 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -643,17 +643,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
  * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
-				     __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+			 const struct batadv_ogm2_packet *ogm2_packet)
 {
 	int next_buff_pos = 0;
 
-	next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
-	next_buff_pos += ntohs(tvlv_len);
+	/* check if there is enough space for the header */
+	next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+	if (next_buff_pos > packet_len)
+		return false;
+
+	/* check if there is enough space for the optional TVLV */
+	next_buff_pos += ntohs(ogm2_packet->tvlv_len);
 
 	return (next_buff_pos <= packet_len) &&
 	       (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -830,7 +836,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
 	ogm_packet = (struct batadv_ogm2_packet *)skb->data;
 
 	while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-					ogm_packet->tvlv_len)) {
+					ogm_packet)) {
 		batadv_v_ogm_process(skb, ogm_offset, if_incoming);
 
 		ogm_offset += BATADV_OGM2_HLEN;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 0d9459b..c328209 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -118,7 +118,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 {
 	struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
 
-	return attr ? nla_get_u32(attr) : 0;
+	return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
 }
 
 /**
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0adcddb..3e7badb 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5545,11 +5545,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
 		return send_conn_param_neg_reply(hdev, handle,
 						 HCI_ERROR_UNKNOWN_CONN_ID);
 
-	if (min < hcon->le_conn_min_interval ||
-	    max > hcon->le_conn_max_interval)
-		return send_conn_param_neg_reply(hdev, handle,
-						 HCI_ERROR_INVALID_LL_PARAMS);
-
 	if (hci_check_conn_params(min, max, latency, timeout))
 		return send_conn_param_neg_reply(hdev, handle,
 						 HCI_ERROR_INVALID_LL_PARAMS);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a54dadf..260ef54 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5287,14 +5287,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
 	memset(&rsp, 0, sizeof(rsp));
 
-	if (min < hcon->le_conn_min_interval ||
-	    max > hcon->le_conn_max_interval) {
-		BT_DBG("requested connection interval exceeds current bounds.");
-		err = -EINVAL;
-	} else {
-		err = hci_check_conn_params(min, max, latency, to_multiplier);
-	}
-
+	err = hci_check_conn_params(min, max, latency, to_multiplier);
 	if (err)
 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
 	else
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6d9f48b..5519881 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -419,7 +419,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
 	struct nlmsghdr *nlh;
 	struct nlattr *nest;
 
-	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
 	if (!nlh)
 		return -EMSGSIZE;
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0bb4d71..7d249af 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -225,7 +225,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
 			return NF_DROP;
 		}
 
-		ADD_COUNTER(*(counter_base + i), 1, skb->len);
+		ADD_COUNTER(*(counter_base + i), skb->len, 1);
 
 		/* these should only watch: not modify, nor tell us
 		 * what to do with the packet
@@ -963,8 +963,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
 			continue;
 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
 		for (i = 0; i < nentries; i++)
-			ADD_COUNTER(counters[i], counter_base[i].pcnt,
-				    counter_base[i].bcnt);
+			ADD_COUNTER(counters[i], counter_base[i].bcnt,
+				    counter_base[i].pcnt);
 	}
 }
 
@@ -1289,7 +1289,7 @@ static int do_update_counters(struct net *net, const char *name,
 
 	/* we add to the counters of the first cpu */
 	for (i = 0; i < num_counters; i++)
-		ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
+		ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
 
 	write_unlock_bh(&t->lock);
 	ret = 0;
@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
 	return 0;
 }
 
+static int ebt_compat_init_offsets(unsigned int number)
+{
+	if (number > INT_MAX)
+		return -EINVAL;
+
+	/* also count the base chain policies */
+	number += NF_BR_NUMHOOKS;
+
+	return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
 
 static int compat_table_info(const struct ebt_table_info *info,
 			     struct compat_ebt_replace *newinfo)
 {
 	unsigned int size = info->entries_size;
 	const void *entries = info->entries;
+	int ret;
 
 	newinfo->entries_size = size;
-	if (info->nentries) {
-		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
-						 info->nentries);
-		if (ret)
-			return ret;
-	}
+	ret = ebt_compat_init_offsets(info->nentries);
+	if (ret)
+		return ret;
 
 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
 							entries, newinfo);
@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
 
 	xt_compat_lock(NFPROTO_BRIDGE);
 
-	if (tmp.nentries) {
-		ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-		if (ret < 0)
-			goto out_unlock;
-	}
+	ret = ebt_compat_init_offsets(tmp.nentries);
+	if (ret < 0)
+		goto out_unlock;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
 	if (ret < 0)
@@ -2268,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
 	state.buf_kern_len = size64;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-	if (WARN_ON(ret < 0))
+	if (WARN_ON(ret < 0)) {
+		vfree(entries_tmp);
 		goto out_unlock;
+	}
 
 	vfree(entries_tmp);
 	tmp.entries_size = size64;
diff --git a/net/can/gw.c b/net/can/gw.c
index 53859346..bd21614 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
 	pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
 		max_hops);
 
-	register_pernet_subsys(&cangw_pernet_ops);
+	ret = register_pernet_subsys(&cangw_pernet_ops);
+	if (ret)
+		return ret;
+
+	ret = -ENOMEM;
 	cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
 				      0, 0, NULL);
-
 	if (!cgw_cache)
-		return -ENOMEM;
+		goto out_cache_create;
 
 	/* set notifier */
 	notifier.notifier_call = cgw_notifier;
-	register_netdevice_notifier(&notifier);
+	ret = register_netdevice_notifier(&notifier);
+	if (ret)
+		goto out_register_notifier;
 
 	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
 				   NULL, cgw_dump_jobs, 0);
-	if (ret) {
-		unregister_netdevice_notifier(&notifier);
-		kmem_cache_destroy(cgw_cache);
-		return -ENOBUFS;
-	}
+	if (ret)
+		goto out_rtnl_register1;
 
-	/* Only the first call to rtnl_register_module can fail */
-	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
-			     cgw_create_job, NULL, 0);
-	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
-			     cgw_remove_job, NULL, 0);
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+				   cgw_create_job, NULL, 0);
+	if (ret)
+		goto out_rtnl_register2;
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+				   cgw_remove_job, NULL, 0);
+	if (ret)
+		goto out_rtnl_register3;
 
 	return 0;
+
+out_rtnl_register3:
+	rtnl_unregister(PF_CAN, RTM_NEWROUTE);
+out_rtnl_register2:
+	rtnl_unregister(PF_CAN, RTM_GETROUTE);
+out_rtnl_register1:
+	unregister_netdevice_notifier(&notifier);
+out_register_notifier:
+	kmem_cache_destroy(cgw_cache);
+out_cache_create:
+	unregister_pernet_subsys(&cangw_pernet_ops);
+
+	return ret;
 }
 
 static __exit void cgw_module_exit(void)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 60934bd..76c41a8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
 	struct ceph_osds up, acting;
 	bool force_resend = false;
 	bool unpaused = false;
-	bool legacy_change;
+	bool legacy_change = false;
 	bool split = false;
 	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
 	bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
 		t->osd = acting.primary;
 	}
 
-	if (unpaused || legacy_change || force_resend ||
-	    (split && con && CEPH_HAVE_FEATURE(con->peer_features,
-					       RESEND_ON_SPLIT)))
+	if (unpaused || legacy_change || force_resend || split)
 		ct_res = CALC_TARGET_NEED_RESEND;
 	else
 		ct_res = CALC_TARGET_NO_ACTION;
 
 out:
-	dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+	dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
+	     legacy_change, force_resend, split, ct_res, t->osd);
 	return ct_res;
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index e369e1b..f7928f9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8592,6 +8592,8 @@ int register_netdevice(struct net_device *dev)
 	ret = notifier_to_errno(ret);
 	if (ret) {
 		rollback_registered(dev);
+		rcu_barrier();
+
 		dev->reg_state = NETREG_UNREGISTERED;
 	}
 	/*
diff --git a/net/core/filter.c b/net/core/filter.c
index c996380..e6fa885 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7234,13 +7234,13 @@ sk_reuseport_is_valid_access(int off, int size,
 		return size == size_default;
 
 	/* Fields that allow narrowing */
-	case offsetof(struct sk_reuseport_md, eth_protocol):
+	case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
 		if (size < FIELD_SIZEOF(struct sk_buff, protocol))
 			return false;
 		/* fall through */
-	case offsetof(struct sk_reuseport_md, ip_protocol):
-	case offsetof(struct sk_reuseport_md, bind_inany):
-	case offsetof(struct sk_reuseport_md, len):
+	case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
+	case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
+	case bpf_ctx_range(struct sk_reuseport_md, len):
 		bpf_ctx_record_field_size(info, size_default);
 		return bpf_ctx_narrow_access_ok(off, size, size_default);
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3ae8998..a581cf1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
 		txq = netdev_get_tx_queue(dev, q_index);
 		HARD_TX_LOCK(dev, txq, smp_processor_id());
 		if (netif_xmit_frozen_or_stopped(txq) ||
-		    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
 			skb_queue_head(&npinfo->txq, skb);
 			HARD_TX_UNLOCK(dev, txq);
 			local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
 				HARD_TX_UNLOCK(dev, txq);
 
-				if (status == NETDEV_TX_OK)
+				if (dev_xmit_complete(status))
 					break;
 
 			}
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
 	}
 
-	if (status != NETDEV_TX_OK) {
+	if (!dev_xmit_complete(status)) {
 		skb_queue_tail(&npinfo->txq, skb);
 		schedule_delayed_work(&npinfo->tx_work,0);
 	}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9b9f696..0629ca8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3530,6 +3530,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
 	int pos;
 	int dummy;
 
+	if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+	    (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+		/* gso_size is untrusted, and we have a frag_list with a linear
+		 * non head_frag head.
+		 *
+		 * (we assume checking the first list_skb member suffices;
+		 * i.e if either of the list_skb members have non head_frag
+		 * head, then the first one has too).
+		 *
+		 * If head_skb's headlen does not fit requested gso_size, it
+		 * means that the frag_list members do NOT terminate on exact
+		 * gso_size boundaries. Hence we cannot perform skb_frag_t page
+		 * sharing. Therefore we must fallback to copying the frag_list
+		 * skbs; we do so by disabling SG.
+		 */
+		if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+			features &= ~NETIF_F_SG;
+	}
+
 	__skb_push(head_skb, doffset);
 	proto = skb_network_protocol(head_skb, &dummy);
 	if (unlikely(!proto))
diff --git a/net/core/sock.c b/net/core/sock.c
index 9c32e8e..f881eea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1563,8 +1563,6 @@ static void __sk_destruct(struct rcu_head *head)
 		sk_filter_uncharge(sk, filter);
 		RCU_INIT_POINTER(sk->sk_filter, NULL);
 	}
-	if (rcu_access_pointer(sk->sk_reuseport_cb))
-		reuseport_detach_sock(sk);
 
 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
@@ -1587,7 +1585,14 @@ static void __sk_destruct(struct rcu_head *head)
 
 void sk_destruct(struct sock *sk)
 {
-	if (sock_flag(sk, SOCK_RCU_FREE))
+	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+
+	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+		reuseport_detach_sock(sk);
+		use_call_rcu = true;
+	}
+
+	if (use_call_rcu)
 		call_rcu(&sk->sk_rcu, __sk_destruct);
 	else
 		__sk_destruct(&sk->sk_rcu);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index ba5cba5..fd38cf1 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -292,8 +292,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
 
 select_by_hash:
 		/* no bpf or invalid bpf result: fall back to hash usage */
-		if (!sk2)
-			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
+		if (!sk2) {
+			int i, j;
+
+			i = j = reciprocal_scale(hash, socks);
+			while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+				i++;
+				if (i >= reuse->num_socks)
+					i = 0;
+				if (i == j)
+					goto out;
+			}
+			sk2 = reuse->socks[i];
+		}
 	}
 
 out:
diff --git a/net/core/stream.c b/net/core/stream.c
index 7d329fb..7f5eaa9 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 	int err = 0;
 	long vm_wait = 0;
 	long current_timeo = *timeo_p;
-	bool noblock = (*timeo_p ? false : true);
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 			goto do_error;
-		if (!*timeo_p) {
-			if (noblock)
-				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-			goto do_nonblock;
-		}
+		if (!*timeo_p)
+			goto do_eagain;
 		if (signal_pending(current))
 			goto do_interrupted;
 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 do_error:
 	err = -EPIPE;
 	goto out;
-do_nonblock:
+do_eagain:
+	/* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+	 * be generated later.
+	 * When TCP receives ACK packets that make room, tcp_check_space()
+	 * only calls tcp_new_space() if SOCK_NOSPACE is set.
+	 */
+	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 	err = -EAGAIN;
 	goto out;
 do_interrupted:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 142b294..b0b9413 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
 {
 	int port;
 
+	if (!ds->ops->port_mdb_add)
+		return;
+
 	for_each_set_bit(port, bitmap, ds->num_ports)
 		ds->ops->port_mdb_add(ds, port, mdb);
 }
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index bc6b912..8981974 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1018,6 +1018,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
 
 	switch (sock->type) {
 	case SOCK_RAW:
+		rc = -EPERM;
+		if (!capable(CAP_NET_RAW))
+			goto out;
 		proto = &ieee802154_raw_prot;
 		ops = &ieee802154_raw_ops;
 		break;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f915abf..80107a6 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -19,6 +19,7 @@
 #include <net/sock.h>
 #include <net/route.h>
 #include <net/tcp_states.h>
+#include <net/sock_reuseport.h>
 
 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -73,6 +74,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
 	}
 	inet->inet_daddr = fl4->daddr;
 	inet->inet_dport = usin->sin_port;
+	reuseport_has_conns(sk, true);
 	sk->sk_state = TCP_ESTABLISHED;
 	sk_set_txhash(sk);
 	inet->inet_id = jiffies;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ad75c46..0167e23 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -587,7 +587,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
 
 	if (!rt)
 		goto out;
-	net = dev_net(rt->dst.dev);
+
+	if (rt->dst.dev)
+		net = dev_net(rt->dst.dev);
+	else if (skb_in->dev)
+		net = dev_net(skb_in->dev);
+	else
+		goto out;
 
 	/*
 	 *	Find the original header. It is expected to be valid, of course.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 3c73483..0b87558 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1531,6 +1531,7 @@ static void erspan_setup(struct net_device *dev)
 	struct ip_tunnel *t = netdev_priv(dev);
 
 	ether_setup(dev);
+	dev->max_mtu = 0;
 	dev->netdev_ops = &erspan_netdev_ops;
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index c200065..6367ecd 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -23,9 +23,6 @@ raw_get_hashinfo(const struct inet_diag_req_v2 *r)
 		return &raw_v6_hashinfo;
 #endif
 	} else {
-		pr_warn_once("Unexpected inet family %d\n",
-			     r->sdiag_family);
-		WARN_ON_ONCE(1);
 		return ERR_PTR(-EINVAL);
 	}
 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 232581c..69127f60 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -908,16 +908,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
 	if (peer->rate_tokens == 0 ||
 	    time_after(jiffies,
 		       (peer->rate_last +
-			(ip_rt_redirect_load << peer->rate_tokens)))) {
+			(ip_rt_redirect_load << peer->n_redirects)))) {
 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
 		peer->rate_last = jiffies;
-		++peer->rate_tokens;
 		++peer->n_redirects;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
 		if (log_martians &&
-		    peer->rate_tokens == ip_rt_redirect_number)
+		    peer->n_redirects == ip_rt_redirect_number)
 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
 					     &ip_hdr(skb)->saddr, inet_iif(skb),
 					     &ip_hdr(skb)->daddr, &gw);
@@ -1477,7 +1476,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
 	prev = cmpxchg(p, orig, rt);
 	if (prev == orig) {
 		if (orig) {
-			dst_dev_put(&orig->dst);
+			rt_add_uncached_list(orig);
 			dst_release(&orig->dst);
 		}
 	} else {
@@ -2382,14 +2381,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
 	int orig_oif = fl4->flowi4_oif;
 	unsigned int flags = 0;
 	struct rtable *rth;
-	int err = -ENETUNREACH;
+	int err;
 
 	if (fl4->saddr) {
-		rth = ERR_PTR(-EINVAL);
 		if (ipv4_is_multicast(fl4->saddr) ||
 		    ipv4_is_lbcast(fl4->saddr) ||
-		    ipv4_is_zeronet(fl4->saddr))
+		    ipv4_is_zeronet(fl4->saddr)) {
+			rth = ERR_PTR(-EINVAL);
 			goto out;
+		}
+
+		rth = ERR_PTR(-ENETUNREACH);
 
 		/* I removed check for oif == dev_out->oif here.
 		   It was wrong for two reasons:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ed0789..0c29bdd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -934,6 +934,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 	return mss_now;
 }
 
+/* In some cases, both sendpage() and sendmsg() could have added
+ * an skb to the write queue, but failed adding payload on it.
+ * We need to remove it to consume less memory, but more
+ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
+ * users.
+ */
+static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+{
+	if (skb && !skb->len) {
+		tcp_unlink_write_queue(skb, sk);
+		if (tcp_write_queue_empty(sk))
+			tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+		sk_wmem_free_skb(sk, skb);
+	}
+}
+
 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 			 size_t size, int flags)
 {
@@ -1056,6 +1072,7 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 	return copied;
 
 do_error:
+	tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
 	if (copied)
 		goto out;
 out_err:
@@ -1409,17 +1426,11 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 	sock_zerocopy_put(uarg);
 	return copied + copied_syn;
 
-do_fault:
-	if (!skb->len) {
-		tcp_unlink_write_queue(skb, sk);
-		/* It is the one place in all of TCP, except connection
-		 * reset, where we can be unlinking the send_head.
-		 */
-		tcp_check_send_head(sk, skb);
-		sk_wmem_free_skb(sk, skb);
-	}
-
 do_error:
+	skb = tcp_write_queue_tail(sk);
+do_fault:
+	tcp_remove_empty_skb(sk, skb);
+
 	if (copied + copied_syn)
 		goto out;
 out_err:
@@ -2523,6 +2534,9 @@ void tcp_write_queue_purge(struct sock *sk)
 	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
 	sk_mem_reclaim(sk);
 	tcp_clear_all_retrans_hints(tcp_sk(sk));
+	tcp_sk(sk)->highest_sack = NULL;
+	tcp_sk(sk)->sacked_out = 0;
+	tcp_sk(sk)->wqp_called = 1;
 	tcp_sk(sk)->packets_out = 0;
 	inet_csk(sk)->icsk_backoff = 0;
 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5de1d76..57e8dad 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -260,7 +260,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
 {
-	tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+	tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 }
 
 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8454512..53f910b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2029,7 +2029,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
 		if (len <= skb->len)
 			break;
 
-		if (unlikely(TCP_SKB_CB(skb)->eor))
+		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
 			return false;
 
 		len -= skb->len;
@@ -2145,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
 			 * we need to propagate it to the new skb.
 			 */
 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+			tcp_skb_collapse_tstamp(nskb, skb);
 			tcp_unlink_write_queue(skb, sk);
 			sk_wmem_free_skb(sk, skb);
 		} else {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 17335a3..9d775b8 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -219,7 +219,7 @@ static int tcp_write_timeout(struct sock *sk)
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct net *net = sock_net(sk);
-	bool expired, do_reset;
+	bool expired = false, do_reset;
 	int retry_until;
 
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
@@ -251,9 +251,10 @@ static int tcp_write_timeout(struct sock *sk)
 			if (tcp_out_of_resources(sk, do_reset))
 				return 1;
 		}
+	}
+	if (!expired)
 		expired = retransmits_timed_out(sk, retry_until,
 						icsk->icsk_user_timeout);
-	}
 	tcp_fastopen_active_detect_blackhole(sk, expired);
 
 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1a880bc2..22b290e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -449,12 +449,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			if (sk->sk_reuseport) {
+			if (sk->sk_reuseport &&
+			    sk->sk_state != TCP_ESTABLISHED) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-				if (result)
+				if (result && !reuseport_has_conns(sk, false))
 					return result;
 			}
 			badness = score;
@@ -780,6 +781,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
 	int is_udplite = IS_UDPLITE(sk);
 	int offset = skb_transport_offset(skb);
 	int len = skb->len - offset;
+	int datalen = len - sizeof(*uh);
 	__wsum csum = 0;
 
 	/*
@@ -813,10 +815,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
 			return -EIO;
 		}
 
-		skb_shinfo(skb)->gso_size = cork->gso_size;
-		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
-		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
-							 cork->gso_size);
+		if (datalen > cork->gso_size) {
+			skb_shinfo(skb)->gso_size = cork->gso_size;
+			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+								 cork->gso_size);
+		}
 		goto csum_partial;
 	}
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6bd1ba10..9638c5e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -997,7 +997,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
 	int err = 0;
 
 	if (addr_type == IPV6_ADDR_ANY ||
-	    addr_type & IPV6_ADDR_MULTICAST ||
+	    (addr_type & IPV6_ADDR_MULTICAST &&
+	     !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
 	    (!(idev->dev->flags & IFF_LOOPBACK) &&
 	     addr_type & IPV6_ADDR_LOOPBACK))
 		return ERR_PTR(-EADDRNOTAVAIL);
@@ -2252,8 +2253,17 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
 		return addrconf_ifid_ieee1394(eui, dev);
 	case ARPHRD_TUNNEL6:
 	case ARPHRD_IP6GRE:
-	case ARPHRD_RAWIP:
 		return addrconf_ifid_ip6tnl(eui, dev);
+	case ARPHRD_RAWIP: {
+		struct in6_addr lladdr;
+
+		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+			get_random_bytes(eui, 8);
+		else
+			memcpy(eui, lladdr.s6_addr + 8, 8);
+
+		return 0;
+	}
 	}
 	return -1;
 }
@@ -5700,13 +5710,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 	switch (event) {
 	case RTM_NEWADDR:
 		/*
-		 * If the address was optimistic
-		 * we inserted the route at the start of
-		 * our DAD process, so we don't need
-		 * to do it again
+		 * If the address was optimistic we inserted the route at the
+		 * start of our DAD process, so we don't need to do it again.
+		 * If the device was taken down in the middle of the DAD
+		 * cycle there is a race where we could get here without a
+		 * host route, so nothing to insert. That will be fixed when
+		 * the device is brought up.
 		 */
-		if (!rcu_access_pointer(ifp->rt->fib6_node))
+		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
 			ip6_ins_rt(net, ifp->rt);
+		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
+			pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
+				&ifp->addr, ifp->idev->dev->name);
+		}
+
 		if (ifp->idev->cnf.forwarding)
 			addrconf_join_anycast(ifp);
 		if (!ipv6_addr_any(&ifp->peer_addr))
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index aea3cb4..90fee80 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -31,6 +31,7 @@
 #include <net/ip6_route.h>
 #include <net/tcp_states.h>
 #include <net/dsfield.h>
+#include <net/sock_reuseport.h>
 
 #include <linux/errqueue.h>
 #include <linux/uaccess.h>
@@ -258,6 +259,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
 		goto out;
 	}
 
+	reuseport_has_conns(sk, true);
 	sk->sk_state = TCP_ESTABLISHED;
 	sk_set_txhash(sk);
 out:
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a53ef07..a23516e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -988,7 +988,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
 		if (unlikely(!tun_info ||
 			     !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 			     ip_tunnel_info_af(tun_info) != AF_INET6))
-			return -EINVAL;
+			goto tx_err;
 
 		key = &tun_info->key;
 		memset(&fl6, 0, sizeof(fl6));
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 67275f1..a925d82 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
 {
 	struct sk_buff *skb, *next;
 
-	list_for_each_entry_safe(skb, next, head, list)
+	list_for_each_entry_safe(skb, next, head, list) {
+		skb_list_del_init(skb);
 		dst_input(skb);
+	}
 }
 
 static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
@@ -220,6 +222,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
 	if (ipv6_addr_is_multicast(&hdr->saddr))
 		goto err;
 
+	/* While RFC4291 is not explicit about v4mapped addresses
+	 * in IPv6 headers, it seems clear linux dual-stack
+	 * model can not deal properly with these.
+	 * Security models could be fooled by ::ffff:127.0.0.1 for example.
+	 *
+	 * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+	 */
+	if (ipv6_addr_v4mapped(&hdr->saddr))
+		goto err;
+
 	skb->transport_header = skb->network_header + sizeof(*hdr);
 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index dbab62e3..2d80e91 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -791,14 +791,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 	if (pmc) {
 		im->idev = pmc->idev;
 		if (im->mca_sfmode == MCAST_INCLUDE) {
-			im->mca_tomb = pmc->mca_tomb;
-			im->mca_sources = pmc->mca_sources;
+			swap(im->mca_tomb, pmc->mca_tomb);
+			swap(im->mca_sources, pmc->mca_sources);
 			for (psf = im->mca_sources; psf; psf = psf->sf_next)
 				psf->sf_crcount = idev->mc_qrv;
 		} else {
 			im->mca_crcount = idev->mc_qrv;
 		}
 		in6_dev_put(pmc->idev);
+		ip6_mc_clear_src(pmc);
 		kfree(pmc);
 	}
 	spin_unlock_bh(&im->mca_lock);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 4c04bcc..5c9be85 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -228,7 +228,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
 	return 0;
 }
 
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
 {
 	remove_proc_entry("icmp6", net->proc_net);
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a792a80..b3e3c05 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3109,7 +3109,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
 	rt->fib6_metric = cfg->fc_metric;
 	rt->fib6_nh.nh_weight = 1;
 
-	rt->fib6_type = cfg->fc_type;
+	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
 
 	/* We cannot add true routes via loopback here,
 	   they would result in kernel looping; promote them to reject routes
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a8d9e02..00a2313 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -177,13 +177,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			if (sk->sk_reuseport) {
+			if (sk->sk_reuseport &&
+			    sk->sk_state != TCP_ESTABLISHED) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-				if (result)
+				if (result && !reuseport_has_conns(sk, false))
 					return result;
 			}
 			result = sk;
@@ -1071,6 +1072,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
 	__wsum csum = 0;
 	int offset = skb_transport_offset(skb);
 	int len = skb->len - offset;
+	int datalen = len - sizeof(*uh);
 
 	/*
 	 * Create a UDP header
@@ -1103,8 +1105,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
 			return -EIO;
 		}
 
-		skb_shinfo(skb)->gso_size = cork->gso_size;
-		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+		if (datalen > cork->gso_size) {
+			skb_shinfo(skb)->gso_size = cork->gso_size;
+			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+								 cork->gso_size);
+		}
 		goto csum_partial;
 	}
 
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index b919db0..8cc0e5a 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -382,7 +382,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 	struct bpf_prog *prog = psock->bpf_prog;
 
-	return (*prog->bpf_func)(skb, prog->insnsi);
+	return BPF_PROG_RUN(prog, skb);
 }
 
 static int kcm_read_sock_done(struct strparser *strp, int err)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 40c5102..a48e83b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1471,6 +1471,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 	if (is_multicast_ether_addr(mac))
 		return -EINVAL;
 
+	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+	    sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    !sdata->u.mgd.associated)
+		return -EINVAL;
+
 	sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
 	if (!sta)
 		return -ENOMEM;
@@ -1478,10 +1483,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 		sta->sta.tdls = true;
 
-	if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
-	    !sdata->u.mgd.associated)
-		return -EINVAL;
-
 	err = sta_apply_parameters(local, sta, params);
 	if (err) {
 		sta_info_free(local, sta);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index d37d4ac..316250a 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
 	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+	struct txq_info *txqi;
 	int len;
 
+	if (!sdata->vif.txq)
+		return 0;
+
+	txqi = to_txq_info(sdata->vif.txq);
+
 	spin_lock_bh(&local->fq.lock);
 	rcu_read_lock();
 
@@ -659,7 +664,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
 	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
 	DEBUGFS_ADD(hw_queues);
 
-	if (sdata->local->ops->wake_tx_queue)
+	if (sdata->local->ops->wake_tx_queue &&
+	    sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+	    sdata->vif.type != NL80211_IFTYPE_NAN)
 		DEBUGFS_ADD(aqm);
 }
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b5c0624..26e8fa7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -160,10 +160,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 	memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
 	ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
 
+	memset(chandef, 0, sizeof(struct cfg80211_chan_def));
 	chandef->chan = channel;
 	chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
 	chandef->center_freq1 = channel->center_freq;
-	chandef->center_freq2 = 0;
 
 	if (!ht_oper || !sta_ht_cap.ht_supported) {
 		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
@@ -2554,7 +2554,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 
 	rcu_read_lock();
 	ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-	if (WARN_ON_ONCE(ssid == NULL))
+	if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+		      "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
 		ssid_len = 0;
 	else
 		ssid_len = ssid[1];
@@ -5039,7 +5040,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
 	rcu_read_lock();
 	ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-	if (!ssidie) {
+	if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
 		rcu_read_unlock();
 		kfree(assoc_data);
 		return -EINVAL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7523d99..b12f23c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2372,11 +2372,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
 		      skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
 		     sdata->control_port_over_nl80211)) {
 		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-		bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
+		bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
 
 		cfg80211_rx_control_port(dev, skb, noencrypt);
 		dev_kfree_skb(skb);
 	} else {
+		memset(skb->cb, 0, sizeof(skb->cb));
+
 		/* deliver to local stack */
 		if (rx->napi)
 			napi_gro_receive(rx->napi, skb);
@@ -2470,8 +2472,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
 
 	if (skb) {
 		skb->protocol = eth_type_trans(skb, dev);
-		memset(skb->cb, 0, sizeof(skb->cb));
-
 		ieee80211_deliver_skb_to_local_stack(skb, rx);
 	}
 
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 534a604..9afd3fc 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -264,6 +264,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
 	/* IEEE80211_RADIOTAP_RATE rate */
 	if (info->status.rates[0].idx >= 0 &&
 	    !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+					     RATE_INFO_FLAGS_DMG |
+					     RATE_INFO_FLAGS_EDMG |
 					     IEEE80211_TX_RC_VHT_MCS)))
 		len += 2;
 
@@ -315,6 +317,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
 	/* IEEE80211_RADIOTAP_RATE */
 	if (info->status.rates[0].idx >= 0 &&
 	    !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+					     RATE_INFO_FLAGS_DMG |
+					     RATE_INFO_FLAGS_EDMG |
 					     IEEE80211_TX_RC_VHT_MCS))) {
 		u16 rate;
 
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c596385..f101a64 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3527,9 +3527,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
 	}
 
 	/* Always allow software iftypes */
-	if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
-	    (iftype == NL80211_IFTYPE_AP_VLAN &&
-	     local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
+	if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
 		if (radar_detect)
 			return -EINVAL;
 		return 0;
@@ -3564,7 +3562,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
 
 		if (sdata_iter == sdata ||
 		    !ieee80211_sdata_running(sdata_iter) ||
-		    local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+		    cfg80211_iftype_allowed(local->hw.wiphy,
+					    wdev_iter->iftype, 0, 1))
 			continue;
 
 		params.iftype_num[wdev_iter->iftype]++;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 13ade57..4f01321 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
 
 	e.id = ip_to_id(map, ip);
 
-	if (opt->flags & IPSET_DIM_ONE_SRC)
+	if (opt->flags & IPSET_DIM_TWO_SRC)
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
 	else
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 1577f2f..e2538c57 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
 		return -ENOENT;
 
 	write_lock_bh(&ip_set_ref_lock);
-	if (set->ref != 0) {
+	if (set->ref != 0 || set->ref_netlink != 0) {
 		ret = -IPSET_ERR_REFERENCED;
 		goto out;
 	}
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index fd87de3..16ec822 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
 	struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	if (opt->flags & IPSET_DIM_ONE_SRC)
+	if (opt->flags & IPSET_DIM_TWO_SRC)
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
 	else
 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27eff89..c6073d1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  * table location, we assume id gets exposed to userspace.
  *
  * Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
  *
  * 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
  */
 u32 nf_ct_get_id(const struct nf_conn *ct)
 {
@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
 	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
 
 	a = (unsigned long)ct;
-	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
-	c = (unsigned long)ct->ext;
-	d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+	b = (unsigned long)ct->master;
+	c = (unsigned long)nf_ct_net(ct);
+	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
 				   &ct_id_seed);
 #ifdef CONFIG_64BIT
 	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index a11c304..efc14c7 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
 		i++;
 	}
 
-	pr_debug("Skipped up to `%c'!\n", skip);
+	pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
 
 	*numoff = i;
 	*numlen = getnum(data + i, dlen - i, cmd, term, numoff);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 5df7486..8ade405 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -203,7 +203,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 		return err;
 	}
 
-	flow->timeout = (u32)jiffies;
+	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 29ff59d..24fddf0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -121,9 +121,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
 		return;
 
 	list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
-		if (trans->msg_type == NFT_MSG_NEWSET &&
-		    nft_trans_set(trans) == set) {
-			set->bound = true;
+		switch (trans->msg_type) {
+		case NFT_MSG_NEWSET:
+			if (nft_trans_set(trans) == set)
+				nft_trans_set_bound(trans) = true;
+			break;
+		case NFT_MSG_NEWSETELEM:
+			if (nft_trans_elem_set(trans) == set)
+				nft_trans_elem_set_bound(trans) = true;
 			break;
 		}
 	}
@@ -3424,8 +3429,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 			      NFT_SET_OBJECT))
 			return -EINVAL;
 		/* Only one of these operations is supported */
-		if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
-			     (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
+		if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
+			     (NFT_SET_MAP | NFT_SET_OBJECT))
+			return -EOPNOTSUPP;
+		if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+			     (NFT_SET_EVAL | NFT_SET_OBJECT))
 			return -EOPNOTSUPP;
 	}
 
@@ -6656,7 +6664,7 @@ static int __nf_tables_abort(struct net *net)
 			break;
 		case NFT_MSG_NEWSET:
 			trans->ctx.table->use--;
-			if (nft_trans_set(trans)->bound) {
+			if (nft_trans_set_bound(trans)) {
 				nft_trans_destroy(trans);
 				break;
 			}
@@ -6668,7 +6676,7 @@ static int __nf_tables_abort(struct net *net)
 			nft_trans_destroy(trans);
 			break;
 		case NFT_MSG_NEWSETELEM:
-			if (nft_trans_elem_set(trans)->bound) {
+			if (nft_trans_elem_set_bound(trans)) {
 				nft_trans_destroy(trans);
 				break;
 			}
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index af1497a..69d6173 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
 static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
 {
 	struct nft_connlimit *priv = nft_expr_priv(expr);
+	bool ret;
 
-	return nf_conncount_gc_list(net, &priv->list);
+	local_bh_disable();
+	ret = nf_conncount_gc_list(net, &priv->list);
+	local_bh_enable();
+
+	return ret;
 }
 
 static struct nft_expr_type nft_connlimit_type;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 6e0c260..1ef8cb7 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -71,11 +71,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
 {
 	struct nft_flow_offload *priv = nft_expr_priv(expr);
 	struct nf_flowtable *flowtable = &priv->flowtable->data;
+	struct tcphdr _tcph, *tcph = NULL;
 	enum ip_conntrack_info ctinfo;
 	struct nf_flow_route route;
 	struct flow_offload *flow;
 	enum ip_conntrack_dir dir;
-	bool is_tcp = false;
 	struct nf_conn *ct;
 	int ret;
 
@@ -88,7 +88,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
 
 	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
 	case IPPROTO_TCP:
-		is_tcp = true;
+		tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
+					  sizeof(_tcph), &_tcph);
+		if (unlikely(!tcph || tcph->fin || tcph->rst))
+			goto out;
 		break;
 	case IPPROTO_UDP:
 		break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
 	if (!flow)
 		goto err_flow_alloc;
 
-	if (is_tcp) {
+	if (tcph) {
 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
 		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
 	}
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
 	return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
+static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
+	[NFTA_FLOW_TABLE_NAME]	= { .type = NLA_STRING,
+				    .len = NFT_NAME_MAXLEN - 1 },
+};
+
 static int nft_flow_offload_init(const struct nft_ctx *ctx,
 				 const struct nft_expr *expr,
 				 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
 	.name		= "flow_offload",
 	.ops		= &nft_flow_offload_ops,
+	.policy		= nft_flow_offload_policy,
 	.maxattr	= NFTA_FLOW_MAX,
 	.owner		= THIS_MODULE,
 };
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 161c345..55754d9 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
 	if (IS_ERR(set))
 		return PTR_ERR(set);
 
-	if (set->flags & NFT_SET_EVAL)
-		return -EOPNOTSUPP;
-
 	priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
 	err = nft_validate_register_load(priv->sreg, set->klen);
 	if (err < 0)
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index d7f3776..637ce3e 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
 		return;
 	}
 
-	/* So that subsequent socket matching not to require other lookups. */
-	skb->sk = sk;
-
 	switch(priv->key) {
 	case NFT_SOCKET_TRANSPARENT:
 		nft_reg_store8(dest, inet_sk_transparent(sk));
@@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
 		WARN_ON(1);
 		regs->verdict.code = NFT_BREAK;
 	}
+
+	if (sk != skb->sk)
+		sock_gen_put(sk);
 }
 
 static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 6b56f41..3241fee 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -57,25 +57,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
 	nfnl_acct_put(info->nfacct);
 }
 
-static struct xt_match nfacct_mt_reg __read_mostly = {
-	.name       = "nfacct",
-	.family     = NFPROTO_UNSPEC,
-	.checkentry = nfacct_mt_checkentry,
-	.match      = nfacct_mt,
-	.destroy    = nfacct_mt_destroy,
-	.matchsize  = sizeof(struct xt_nfacct_match_info),
-	.usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
-	.me         = THIS_MODULE,
+static struct xt_match nfacct_mt_reg[] __read_mostly = {
+	{
+		.name       = "nfacct",
+		.revision   = 0,
+		.family     = NFPROTO_UNSPEC,
+		.checkentry = nfacct_mt_checkentry,
+		.match      = nfacct_mt,
+		.destroy    = nfacct_mt_destroy,
+		.matchsize  = sizeof(struct xt_nfacct_match_info),
+		.usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "nfacct",
+		.revision   = 1,
+		.family     = NFPROTO_UNSPEC,
+		.checkentry = nfacct_mt_checkentry,
+		.match      = nfacct_mt,
+		.destroy    = nfacct_mt_destroy,
+		.matchsize  = sizeof(struct xt_nfacct_match_info_v1),
+		.usersize   = offsetof(struct xt_nfacct_match_info_v1, nfacct),
+		.me         = THIS_MODULE,
+	},
 };
 
 static int __init nfacct_mt_init(void)
 {
-	return xt_register_match(&nfacct_mt_reg);
+	return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 static void __exit nfacct_mt_exit(void)
 {
-	xt_unregister_match(&nfacct_mt_reg);
+	xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 module_init(nfacct_mt_init);
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 05f00fb..cd15ea7 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -104,11 +104,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
 	if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
 	    (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
 	     info->invert & XT_PHYSDEV_OP_BRIDGED) &&
-	    par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
-	    (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
+	    par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
 		pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
-		if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
-			return -EINVAL;
+		return -EINVAL;
 	}
 
 	if (!brnf_probed) {
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
index 24b7742..c427244 100644
--- a/net/netfilter/xt_quota2.c
+++ b/net/netfilter/xt_quota2.c
@@ -296,8 +296,8 @@ static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
 	}
 
 	list_del(&e->list);
-	remove_proc_entry(e->name, proc_xt_quota);
 	spin_unlock_bh(&counter_list_lock);
+	remove_proc_entry(e->name, proc_xt_quota);
 	kfree(e);
 }
 
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ae29627..e0a2cb8 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -119,9 +119,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 	llcp_sock->service_name = kmemdup(llcp_addr.service_name,
 					  llcp_sock->service_name_len,
 					  GFP_KERNEL);
-
+	if (!llcp_sock->service_name) {
+		ret = -ENOMEM;
+		goto put_dev;
+	}
 	llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+		kfree(llcp_sock->service_name);
+		llcp_sock->service_name = NULL;
 		ret = -EADDRINUSE;
 		goto put_dev;
 	}
@@ -1011,10 +1016,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
 	    sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	if (sock->type == SOCK_RAW)
+	if (sock->type == SOCK_RAW) {
+		if (!capable(CAP_NET_RAW))
+			return -EPERM;
 		sock->ops = &llcp_rawsock_ops;
-	else
+	} else {
 		sock->ops = &llcp_sock_ops;
+	}
 
 	sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
 	if (sk == NULL)
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 9f2875e..b366226 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -981,7 +981,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
 	int rc;
 	u32 idx;
 
-	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    !info->attrs[NFC_ATTR_TARGET_INDEX])
 		return -EINVAL;
 
 	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -1029,7 +1030,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
 	struct sk_buff *msg = NULL;
 	u32 idx;
 
-	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    !info->attrs[NFC_ATTR_FIRMWARE_NAME])
 		return -EINVAL;
 
 	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0f5ce77..8e396c7 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2239,7 +2239,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
 	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
 	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
 	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
-	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
 	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
 	[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
 	[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e982b..7e25a6a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 
 	mutex_lock(&po->pg_vec_lock);
 
+	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+	 * we need to confirm it under protection of pg_vec_lock.
+	 */
+	if (unlikely(!po->tx_ring.pg_vec)) {
+		err = -EBUSY;
+		goto out;
+	}
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 64f9562..4cea3532 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -156,7 +156,7 @@ static void psample_group_destroy(struct psample_group *group)
 {
 	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
 	list_del(&group->list);
-	kfree(group);
+	kfree_rcu(group, rcu);
 }
 
 static struct psample_group *
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 2c9d7de..5a81dfb4 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -202,14 +202,14 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
 			    struct sk_buff *skb)
 {
-	const struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_ctrl_pkt pkt = {0,};
 	u64 pl_buf = 0;
 
-	if (!hdr || !skb || !skb->data)
+	if (!hdr || !skb)
 		return;
 
 	if (hdr->type == QRTR_TYPE_DATA) {
-		pl_buf = *(u64 *)(skb->data + QRTR_HDR_MAX_SIZE);
+		skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pl_buf, sizeof(pl_buf));
 		QRTR_INFO(node->ilc,
 			  "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n",
 			  hdr->size, hdr->confirm_rx,
@@ -218,21 +218,21 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
 			  (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32),
 			  current->comm);
 	} else {
-		pkt = (struct qrtr_ctrl_pkt *)(skb->data + QRTR_HDR_MAX_SIZE);
+		skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pkt, sizeof(pkt));
 		if (hdr->type == QRTR_TYPE_NEW_SERVER ||
 		    hdr->type == QRTR_TYPE_DEL_SERVER)
 			QRTR_INFO(node->ilc,
 				  "TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
-				  hdr->type, le32_to_cpu(pkt->server.service),
-				  le32_to_cpu(pkt->server.instance),
-				  le32_to_cpu(pkt->server.node),
-				  le32_to_cpu(pkt->server.port));
+				  hdr->type, le32_to_cpu(pkt.server.service),
+				  le32_to_cpu(pkt.server.instance),
+				  le32_to_cpu(pkt.server.node),
+				  le32_to_cpu(pkt.server.port));
 		else if (hdr->type == QRTR_TYPE_DEL_CLIENT ||
 			 hdr->type == QRTR_TYPE_RESUME_TX)
 			QRTR_INFO(node->ilc,
 				  "TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
-				  hdr->type, le32_to_cpu(pkt->client.node),
-				  le32_to_cpu(pkt->client.port));
+				  hdr->type, le32_to_cpu(pkt.client.node),
+				  le32_to_cpu(pkt.client.port));
 		else if (hdr->type == QRTR_TYPE_HELLO ||
 			 hdr->type == QRTR_TYPE_BYE)
 			QRTR_INFO(node->ilc,
@@ -241,13 +241,13 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
 		else if (hdr->type == QRTR_TYPE_DEL_PROC)
 			QRTR_INFO(node->ilc,
 				  "TX CTRL: cmd:0x%x node[0x%x]\n",
-				  hdr->type, pkt->proc.node);
+				  hdr->type, pkt.proc.node);
 	}
 }
 
 static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
 {
-	const struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_ctrl_pkt pkt = {0,};
 	struct qrtr_cb *cb;
 	u64 pl_buf = 0;
 
@@ -257,28 +257,28 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
 	cb = (struct qrtr_cb *)skb->cb;
 
 	if (cb->type == QRTR_TYPE_DATA) {
-		pl_buf = *(u64 *)(skb->data);
+		skb_copy_bits(skb, 0, &pl_buf, sizeof(pl_buf));
 		QRTR_INFO(node->ilc,
 			  "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n",
 			  skb->len, cb->confirm_rx, cb->src_node, cb->src_port,
 			  cb->dst_node, cb->dst_port,
 			  (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
 	} else {
-		pkt = (struct qrtr_ctrl_pkt *)(skb->data);
+		skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
 		if (cb->type == QRTR_TYPE_NEW_SERVER ||
 		    cb->type == QRTR_TYPE_DEL_SERVER)
 			QRTR_INFO(node->ilc,
 				  "RX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
-				  cb->type, le32_to_cpu(pkt->server.service),
-				  le32_to_cpu(pkt->server.instance),
-				  le32_to_cpu(pkt->server.node),
-				  le32_to_cpu(pkt->server.port));
+				  cb->type, le32_to_cpu(pkt.server.service),
+				  le32_to_cpu(pkt.server.instance),
+				  le32_to_cpu(pkt.server.node),
+				  le32_to_cpu(pkt.server.port));
 		else if (cb->type == QRTR_TYPE_DEL_CLIENT ||
 			 cb->type == QRTR_TYPE_RESUME_TX)
 			QRTR_INFO(node->ilc,
 				  "RX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
-				  cb->type, le32_to_cpu(pkt->client.node),
-				  le32_to_cpu(pkt->client.port));
+				  cb->type, le32_to_cpu(pkt.client.node),
+				  le32_to_cpu(pkt.client.port));
 		else if (cb->type == QRTR_TYPE_HELLO ||
 			 cb->type == QRTR_TYPE_BYE)
 			QRTR_INFO(node->ilc,
@@ -384,27 +384,29 @@ static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
 {
 	struct qrtr_tx_flow_waiter *waiter;
 	struct qrtr_tx_flow_waiter *temp;
-	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_ctrl_pkt pkt = {0,};
 	struct qrtr_tx_flow *flow;
 	struct sockaddr_qrtr src;
 	struct qrtr_sock *ipc;
 	struct sk_buff *skbn;
 	unsigned long key;
 
-	pkt = (struct qrtr_ctrl_pkt *)skb->data;
-	if (le32_to_cpu(pkt->cmd) != QRTR_TYPE_RESUME_TX)
+	skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+	if (le32_to_cpu(pkt.cmd) != QRTR_TYPE_RESUME_TX)
 		return;
 
 	src.sq_family = AF_QIPCRTR;
-	src.sq_node = le32_to_cpu(pkt->client.node);
-	src.sq_port = le32_to_cpu(pkt->client.port);
+	src.sq_node = le32_to_cpu(pkt.client.node);
+	src.sq_port = le32_to_cpu(pkt.client.port);
 	key = (u64)src.sq_node << 32 | src.sq_port;
 
-	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
-	if (!flow)
-		return;
-
 	mutex_lock(&node->qrtr_tx_lock);
+	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+	if (!flow) {
+		mutex_unlock(&node->qrtr_tx_lock);
+		return;
+	}
+
 	atomic_set(&flow->pending, 0);
 	wake_up_interruptible_all(&node->resume_tx);
 
@@ -545,8 +547,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 	hdr->size = cpu_to_le32(len);
 	hdr->confirm_rx = !!confirm_rx;
 
-	skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
 	qrtr_log_tx_msg(node, hdr, skb);
+	rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+	if (rc) {
+		pr_err("%s: failed to pad size %lu to %lu rc:%d\n", __func__,
+		       len, ALIGN(len, 4) + sizeof(*hdr), rc);
+		return rc;
+	}
 
 	mutex_lock(&node->ep_lock);
 	if (node->ep)
@@ -609,11 +616,18 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
 	if (tnode)
 		return;
 
+	/* If the node is not in qrtr_all_epts, it is being released and should
+	 * not be inserted into the lookup table.
+	 */
 	down_write(&qrtr_node_lock);
-	radix_tree_insert(&qrtr_nodes, nid, node);
-
-	if (node->nid == QRTR_EP_NID_AUTO)
-		node->nid = nid;
+	list_for_each_entry(tnode, &qrtr_all_epts, item) {
+		if (tnode == node) {
+			radix_tree_insert(&qrtr_nodes, nid, node);
+			if (node->nid == QRTR_EP_NID_AUTO)
+				node->nid = nid;
+			break;
+		}
+	}
 	up_write(&qrtr_node_lock);
 
 	snprintf(name, sizeof(name), "qrtr_%d", nid);
@@ -686,24 +700,18 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 	struct sk_buff *skb;
 	struct qrtr_cb *cb;
 	unsigned int size;
-	int err = -ENOMEM;
-	int frag = false;
+	int errcode;
 	unsigned int ver;
 	size_t hdrlen;
 
 	if (len & 3)
 		return -EINVAL;
 
-	skb = netdev_alloc_skb(NULL, len);
-	if (!skb) {
-		skb = alloc_skb_with_frags(0, len, 0, &err, GFP_ATOMIC);
-		if (!skb) {
-			pr_err("%s memory allocation failed\n", __func__);
-			return -ENOMEM;
-		}
-		frag = true;
-	}
+	skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
 
+	skb_reserve(skb, sizeof(*v1));
 	cb = (struct qrtr_cb *)skb->cb;
 
 	/* Version field in v1 is little endian, so this works for both cases */
@@ -758,13 +766,9 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 
 	pm_wakeup_ws_event(node->ws, 0, true);
 
-	if (frag) {
-		skb->data_len = size;
-		skb->len = size;
-		skb_store_bits(skb, 0, data + hdrlen, size);
-	} else {
-		skb_put_data(skb, data + hdrlen, size);
-	}
+	skb->data_len = size;
+	skb->len = size;
+	skb_store_bits(skb, 0, data + hdrlen, size);
 	qrtr_log_rx_msg(node, skb);
 
 	skb_queue_tail(&node->rx_queue, skb);
@@ -806,6 +810,24 @@ static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
 static struct qrtr_sock *qrtr_port_lookup(int port);
 static void qrtr_port_put(struct qrtr_sock *ipc);
 
+/* Prepare skb for forwarding by allocating enough linear memory to align and
+ * add the header since qrtr transports do not support fragmented skbs
+ */
+static void qrtr_skb_align_linearize(struct sk_buff *skb)
+{
+	int nhead = ALIGN(skb->len, 4) + sizeof(struct qrtr_hdr_v1);
+	int rc;
+
+	if (!skb_is_nonlinear(skb))
+		return;
+
+	rc = pskb_expand_head(skb, nhead, 0, GFP_KERNEL);
+	skb_condense(skb);
+	if (rc)
+		pr_err("%s: failed:%d to allocate linear skb size:%d\n",
+		       __func__, rc, nhead);
+}
+
 static bool qrtr_must_forward(struct qrtr_node *src,
 			      struct qrtr_node *dst, u32 type)
 {
@@ -836,6 +858,7 @@ static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
 	struct qrtr_node *src;
 	struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
 
+	qrtr_skb_align_linearize(skb);
 	src = qrtr_node_lookup(cb->src_node);
 	down_read(&qrtr_node_lock);
 	list_for_each_entry(node, &qrtr_all_epts, item) {
@@ -870,6 +893,7 @@ static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
 	struct sockaddr_qrtr to = {AF_QIPCRTR, cb->dst_node, cb->dst_port};
 	struct qrtr_node *node;
 
+	qrtr_skb_align_linearize(skb);
 	node = qrtr_node_lookup(cb->dst_node);
 	if (!node)
 		return;
@@ -885,7 +909,7 @@ static void qrtr_node_rx_work(struct kthread_work *work)
 {
 	struct qrtr_node *node = container_of(work, struct qrtr_node,
 					      read_data);
-	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_ctrl_pkt pkt = {0,};
 	struct sk_buff *skb;
 
 	while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
@@ -899,9 +923,9 @@ static void qrtr_node_rx_work(struct kthread_work *work)
 			qrtr_fwd_ctrl_pkt(skb);
 
 		if (cb->type == QRTR_TYPE_NEW_SERVER &&
-		    skb->len == sizeof(*pkt)) {
-			pkt = (void *)skb->data;
-			qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+		    skb->len == sizeof(pkt)) {
+			skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+			qrtr_node_assign(node, le32_to_cpu(pkt.server.node));
 		}
 
 		if (cb->type == QRTR_TYPE_RESUME_TX) {
@@ -1385,7 +1409,7 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 			  unsigned int);
 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
 	struct sock *sk = sock->sk;
-	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_ctrl_pkt pkt;
 	struct qrtr_node *node;
 	struct qrtr_node *srv_node;
 	struct sk_buff *skb;
@@ -1478,8 +1502,8 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 		ipc->state = QRTR_STATE_MULTI;
 
 		/* drop new server cmds that are not forwardable to dst node*/
-		pkt = (struct qrtr_ctrl_pkt *)skb->data;
-		srv_node = qrtr_node_lookup(pkt->server.node);
+		skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
+		srv_node = qrtr_node_lookup(le32_to_cpu(pkt.server.node));
 		if (!qrtr_must_forward(srv_node, node, type)) {
 			rc = 0;
 			kfree_skb(skb);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 0f4398e..93e3365 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -239,34 +239,33 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 		goto out;
 	}
 
+	/* The transport can be set using SO_RDS_TRANSPORT option before the
+	 * socket is bound.
+	 */
+	if (rs->rs_transport) {
+		trans = rs->rs_transport;
+		if (!trans->laddr_check ||
+		    trans->laddr_check(sock_net(sock->sk),
+				       binding_addr, scope_id) != 0) {
+			ret = -ENOPROTOOPT;
+			goto out;
+		}
+	} else {
+		trans = rds_trans_get_preferred(sock_net(sock->sk),
+						binding_addr, scope_id);
+		if (!trans) {
+			ret = -EADDRNOTAVAIL;
+			pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+					    __func__, binding_addr);
+			goto out;
+		}
+		rs->rs_transport = trans;
+	}
+
 	sock_set_flag(sk, SOCK_RCU_FREE);
 	ret = rds_add_bound(rs, binding_addr, &port, scope_id);
 	if (ret)
-		goto out;
-
-	if (rs->rs_transport) { /* previously bound */
-		trans = rs->rs_transport;
-		if (trans->laddr_check(sock_net(sock->sk),
-				       binding_addr, scope_id) != 0) {
-			ret = -ENOPROTOOPT;
-			rds_remove_bound(rs);
-		} else {
-			ret = 0;
-		}
-		goto out;
-	}
-	trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
-					scope_id);
-	if (!trans) {
-		ret = -EADDRNOTAVAIL;
-		rds_remove_bound(rs);
-		pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
-				    __func__, binding_addr);
-		goto out;
-	}
-
-	rs->rs_transport = trans;
-	ret = 0;
+		rs->rs_transport = NULL;
 
 out:
 	release_sock(sk);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index eba75c1..ba33790 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
 	refcount_set(&rds_ibdev->refcount, 1);
 	INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
 
+	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+	INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
 	rds_ibdev->max_wrs = device->attrs.max_qp_wr;
 	rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
 
@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
 		device->name,
 		rds_ibdev->use_fastreg ? "FRMR" : "FMR");
 
-	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
-	INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
 	down_write(&rds_ib_devices_lock);
 	list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
 	up_write(&rds_ib_devices_lock);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 504cd6b..c0b9455 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -803,6 +803,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
 
 	minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
 	minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+	minfo6.tos = 0;
 
 	if (flip) {
 		minfo6.laddr = *daddr;
@@ -816,6 +817,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
 		minfo6.fport = inc->i_hdr.h_dport;
 	}
 
+	minfo6.flags = 0;
+
 	rds_info_copy(iter, &minfo6, sizeof(minfo6));
 }
 #endif
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d76e5e5..7319d3c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 
 service_in_use:
 	write_unlock(&local->services_lock);
-	rxrpc_put_local(local);
+	rxrpc_unuse_local(local);
 	ret = -EADDRINUSE;
 error_unlock:
 	release_sock(&rx->sk);
@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
 	rxrpc_queue_work(&rxnet->service_conn_reaper);
 	rxrpc_queue_work(&rxnet->client_conn_reaper);
 
-	rxrpc_put_local(rx->local);
+	rxrpc_unuse_local(rx->local);
 	rx->local = NULL;
 	key_put(rx->key);
 	rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7444d8b..8d72e94 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -258,7 +258,8 @@ struct rxrpc_security {
  */
 struct rxrpc_local {
 	struct rcu_head		rcu;
-	atomic_t		usage;
+	atomic_t		active_users;	/* Number of users of the local endpoint */
+	atomic_t		usage;		/* Number of references to the structure */
 	struct rxrpc_net	*rxnet;		/* The network ns in which this resides */
 	struct list_head	link;
 	struct socket		*socket;	/* my UDP socket */
@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
 void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
 
 /*
  * proc.c
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d591f54..7965600 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
 {
 	_enter("%p,%p", local, skb);
 
-	skb_queue_tail(&local->event_queue, skb);
-	rxrpc_queue_local(local);
+	if (rxrpc_get_local_maybe(local)) {
+		skb_queue_tail(&local->event_queue, skb);
+		rxrpc_queue_local(local);
+	} else {
+		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+	}
 }
 
 /*
@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
 {
 	CHECK_SLAB_OKAY(&local->usage);
 
-	skb_queue_tail(&local->reject_queue, skb);
-	rxrpc_queue_local(local);
+	if (rxrpc_get_local_maybe(local)) {
+		skb_queue_tail(&local->reject_queue, skb);
+		rxrpc_queue_local(local);
+	} else {
+		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+	}
 }
 
 /*
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 10317db..c752ad4 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
 	local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
 	if (local) {
 		atomic_set(&local->usage, 1);
+		atomic_set(&local->active_users, 1);
 		local->rxnet = rxnet;
 		INIT_LIST_HEAD(&local->link);
 		INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
 		local->debug_id = atomic_inc_return(&rxrpc_debug_id);
 		memcpy(&local->srx, srx, sizeof(*srx));
 		local->srx.srx_service = 0;
-		trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+		trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
 	}
 
 	_leave(" = %p", local);
@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 		 * bind the transport socket may still fail if we're attempting
 		 * to use a local address that the dying object is still using.
 		 */
-		if (!rxrpc_get_local_maybe(local)) {
-			cursor = cursor->next;
-			list_del_init(&local->link);
+		if (!rxrpc_use_local(local))
 			break;
-		}
 
 		age = "old";
 		goto found;
@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 	if (ret < 0)
 		goto sock_error;
 
-	list_add_tail(&local->link, cursor);
+	if (cursor != &rxnet->local_endpoints)
+		list_replace_init(cursor, &local->link);
+	else
+		list_add_tail(&local->link, cursor);
 	age = "new";
 
 found:
@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
 	int n;
 
 	n = atomic_inc_return(&local->usage);
-	trace_rxrpc_local(local, rxrpc_local_got, n, here);
+	trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
 	return local;
 }
 
@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
 	if (local) {
 		int n = atomic_fetch_add_unless(&local->usage, 1, 0);
 		if (n > 0)
-			trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+			trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+					  n + 1, here);
 		else
 			local = NULL;
 	}
@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
 }
 
 /*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
  */
 void rxrpc_queue_local(struct rxrpc_local *local)
 {
 	const void *here = __builtin_return_address(0);
+	unsigned int debug_id = local->debug_id;
+	int n = atomic_read(&local->usage);
 
 	if (rxrpc_queue_work(&local->processor))
-		trace_rxrpc_local(local, rxrpc_local_queued,
-				  atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
-	_enter("%d", local->debug_id);
-	rxrpc_queue_work(&local->processor);
+		trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+	else
+		rxrpc_put_local(local);
 }
 
 /*
@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
 
 	if (local) {
 		n = atomic_dec_return(&local->usage);
-		trace_rxrpc_local(local, rxrpc_local_put, n, here);
+		trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
 
 		if (n == 0)
-			__rxrpc_put_local(local);
+			call_rcu(&local->rcu, rxrpc_local_rcu);
+	}
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+	unsigned int au;
+
+	local = rxrpc_get_local_maybe(local);
+	if (!local)
+		return NULL;
+
+	au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+	if (au == 0) {
+		rxrpc_put_local(local);
+		return NULL;
+	}
+
+	return local;
+}
+
+/*
+ * Cease using a local endpoint.  Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+	unsigned int au;
+
+	if (local) {
+		au = atomic_dec_return(&local->active_users);
+		if (au == 0)
+			rxrpc_queue_local(local);
+		else
+			rxrpc_put_local(local);
 	}
 }
 
@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 
 	_enter("%d", local->debug_id);
 
-	/* We can get a race between an incoming call packet queueing the
-	 * processor again and the work processor starting the destruction
-	 * process which will shut down the UDP socket.
-	 */
-	if (local->dead) {
-		_leave(" [already dead]");
-		return;
-	}
-	local->dead = true;
-
 	mutex_lock(&rxnet->local_mutex);
 	list_del_init(&local->link);
 	mutex_unlock(&rxnet->local_mutex);
@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 	 */
 	rxrpc_purge_queue(&local->reject_queue);
 	rxrpc_purge_queue(&local->event_queue);
-
-	_debug("rcu local %d", local->debug_id);
-	call_rcu(&local->rcu, rxrpc_local_rcu);
 }
 
 /*
- * Process events on an endpoint
+ * Process events on an endpoint.  The work item carries a ref which
+ * we must release.
  */
 static void rxrpc_local_processor(struct work_struct *work)
 {
@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
 		container_of(work, struct rxrpc_local, processor);
 	bool again;
 
-	trace_rxrpc_local(local, rxrpc_local_processing,
+	trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
 			  atomic_read(&local->usage), NULL);
 
 	do {
 		again = false;
-		if (atomic_read(&local->usage) == 0)
-			return rxrpc_local_destroyer(local);
+		if (atomic_read(&local->active_users) == 0) {
+			rxrpc_local_destroyer(local);
+			break;
+		}
 
 		if (!skb_queue_empty(&local->reject_queue)) {
 			rxrpc_reject_packets(local);
@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
 			again = true;
 		}
 	} while (again);
+
+	rxrpc_put_local(local);
 }
 
 /*
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bd2fa3b..dc7fdaf 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
 		spin_lock_bh(&rxnet->peer_hash_lock);
 		list_add_tail(&peer->keepalive_link,
 			      &rxnet->peer_keepalive[slot & mask]);
-		rxrpc_put_peer(peer);
+		rxrpc_put_peer_locked(peer);
 	}
 
 	spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 5691b7d..71547e8 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -441,6 +441,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 }
 
 /*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+	const void *here = __builtin_return_address(0);
+	int n;
+
+	n = atomic_dec_return(&peer->usage);
+	trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+	if (n == 0) {
+		hash_del_rcu(&peer->hash_link);
+		list_del_init(&peer->keepalive_link);
+		kfree_rcu(peer, rcu);
+	}
+}
+
+/*
  * Make sure all peer records have been discarded.
  */
 void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c..5d6ab4f 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 			rxrpc_set_call_completion(call,
 						  RXRPC_CALL_LOCAL_ERROR,
 						  0, ret);
+			rxrpc_notify_socket(call);
 			goto out;
 		}
 		_debug("need instant resend %d", ret);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 7c4a4b8..f2c4bfc 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1307,11 +1307,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
 			  struct netlink_ext_ack *extack)
 {
 	size_t attr_size = 0;
-	int ret = 0;
+	int loop, ret;
 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-	ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
-			      &attr_size, true, extack);
+	for (loop = 0; loop < 10; loop++) {
+		ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+				      actions, &attr_size, true, extack);
+		if (ret != -EAGAIN)
+			break;
+	}
+
 	if (ret < 0)
 		return ret;
 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1361,11 +1366,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
 		 */
 		if (n->nlmsg_flags & NLM_F_REPLACE)
 			ovr = 1;
-replay:
 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
 				     extack);
-		if (ret == -EAGAIN)
-			goto replay;
 		break;
 	case RTM_DELACTION:
 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 20fae5c..800846d 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -413,7 +413,7 @@ static __net_init int bpf_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, bpf_net_id);
 
-	return tc_action_net_init(tn, &act_bpf_ops);
+	return tc_action_net_init(net, tn, &act_bpf_ops);
 }
 
 static void __net_exit bpf_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 6054367..538dedd 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -215,7 +215,7 @@ static __net_init int connmark_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, connmark_net_id);
 
-	return tc_action_net_init(tn, &act_connmark_ops);
+	return tc_action_net_init(net, tn, &act_connmark_ops);
 }
 
 static void __net_exit connmark_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 4043719..1e26944 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -678,7 +678,7 @@ static __net_init int csum_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, csum_net_id);
 
-	return tc_action_net_init(tn, &act_csum_ops);
+	return tc_action_net_init(net, tn, &act_csum_ops);
 }
 
 static void __net_exit csum_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 72d3347..dfef962 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -263,7 +263,7 @@ static __net_init int gact_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, gact_net_id);
 
-	return tc_action_net_init(tn, &act_gact_ops);
+	return tc_action_net_init(net, tn, &act_gact_ops);
 }
 
 static void __net_exit gact_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 24047e0..bac353b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -887,7 +887,7 @@ static __net_init int ife_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, ife_net_id);
 
-	return tc_action_net_init(tn, &act_ife_ops);
+	return tc_action_net_init(net, tn, &act_ife_ops);
 }
 
 static void __net_exit ife_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 334f3a05..01d3669 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -65,12 +65,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
 	return 0;
 }
 
-static void ipt_destroy_target(struct xt_entry_target *t)
+static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
 {
 	struct xt_tgdtor_param par = {
 		.target   = t->u.kernel.target,
 		.targinfo = t->data,
 		.family   = NFPROTO_IPV4,
+		.net      = net,
 	};
 	if (par.target->destroy != NULL)
 		par.target->destroy(&par);
@@ -82,7 +83,7 @@ static void tcf_ipt_release(struct tc_action *a)
 	struct tcf_ipt *ipt = to_ipt(a);
 
 	if (ipt->tcfi_t) {
-		ipt_destroy_target(ipt->tcfi_t);
+		ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
 		kfree(ipt->tcfi_t);
 	}
 	kfree(ipt->tcfi_tname);
@@ -182,7 +183,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
 
 	spin_lock_bh(&ipt->tcf_lock);
 	if (ret != ACT_P_CREATED) {
-		ipt_destroy_target(ipt->tcfi_t);
+		ipt_destroy_target(ipt->tcfi_t, net);
 		kfree(ipt->tcfi_tname);
 		kfree(ipt->tcfi_t);
 	}
@@ -353,7 +354,7 @@ static __net_init int ipt_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
 
-	return tc_action_net_init(tn, &act_ipt_ops);
+	return tc_action_net_init(net, tn, &act_ipt_ops);
 }
 
 static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -403,7 +404,7 @@ static __net_init int xt_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, xt_net_id);
 
-	return tc_action_net_init(tn, &act_xt_ops);
+	return tc_action_net_init(net, tn, &act_xt_ops);
 }
 
 static void __net_exit xt_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 548614b..399e3be 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -419,7 +419,7 @@ static __net_init int mirred_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
 
-	return tc_action_net_init(tn, &act_mirred_ops);
+	return tc_action_net_init(net, tn, &act_mirred_ops);
 }
 
 static void __net_exit mirred_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 6198289..d1b47a1 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -317,7 +317,7 @@ static __net_init int nat_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, nat_net_id);
 
-	return tc_action_net_init(tn, &act_nat_ops);
+	return tc_action_net_init(net, tn, &act_nat_ops);
 }
 
 static void __net_exit nat_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 82d258b..33c0cc5 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -488,7 +488,7 @@ static __net_init int pedit_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, pedit_net_id);
 
-	return tc_action_net_init(tn, &act_pedit_ops);
+	return tc_action_net_init(net, tn, &act_pedit_ops);
 }
 
 static void __net_exit pedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 997c34d..4db2595 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -342,7 +342,7 @@ static __net_init int police_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, police_net_id);
 
-	return tc_action_net_init(tn, &act_police_ops);
+	return tc_action_net_init(net, tn, &act_police_ops);
 }
 
 static void __net_exit police_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ac37654..ea0738c 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -99,7 +99,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 	s->tcf_action = parm->action;
 	s->rate = rate;
 	s->psample_group_num = psample_group_num;
-	RCU_INIT_POINTER(s->psample_group, psample_group);
+	rcu_swap_protected(s->psample_group, psample_group,
+			   lockdep_is_held(&s->tcf_lock));
 
 	if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
 		s->truncate = true;
@@ -107,6 +108,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 	}
 	spin_unlock_bh(&s->tcf_lock);
 
+	if (psample_group)
+		psample_group_put(psample_group);
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
 	return ret;
@@ -131,6 +134,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
 	case ARPHRD_TUNNEL6:
 	case ARPHRD_SIT:
 	case ARPHRD_IPGRE:
+	case ARPHRD_IP6GRE:
 	case ARPHRD_VOID:
 	case ARPHRD_NONE:
 		return false;
@@ -255,7 +259,7 @@ static __net_init int sample_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, sample_net_id);
 
-	return tc_action_net_init(tn, &act_sample_ops);
+	return tc_action_net_init(net, tn, &act_sample_ops);
 }
 
 static void __net_exit sample_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 658efae..b418ef6 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -215,7 +215,7 @@ static __net_init int simp_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, simp_net_id);
 
-	return tc_action_net_init(tn, &act_simp_ops);
+	return tc_action_net_init(net, tn, &act_simp_ops);
 }
 
 static void __net_exit simp_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 7709710..a80179c 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -316,7 +316,7 @@ static __net_init int skbedit_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 
-	return tc_action_net_init(tn, &act_skbedit_ops);
+	return tc_action_net_init(net, tn, &act_skbedit_ops);
 }
 
 static void __net_exit skbedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3038493..21d1952 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -277,7 +277,7 @@ static __net_init int skbmod_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
 
-	return tc_action_net_init(tn, &act_skbmod_ops);
+	return tc_action_net_init(net, tn, &act_skbmod_ops);
 }
 
 static void __net_exit skbmod_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 66bfe57..43309ff 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -579,7 +579,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
 
-	return tc_action_net_init(tn, &act_tunnel_key_ops);
+	return tc_action_net_init(net, tn, &act_tunnel_key_ops);
 }
 
 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index da993edd..41528b9 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -324,7 +324,7 @@ static __net_init int vlan_init_net(struct net *net)
 {
 	struct tc_action_net *tn = net_generic(net, vlan_net_id);
 
-	return tc_action_net_init(tn, &act_vlan_ops);
+	return tc_action_net_init(net, tn, &act_vlan_ops);
 }
 
 static void __net_exit vlan_exit_net(struct list_head *net_list)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 4159bcb..e217ebc 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2038,8 +2038,10 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
 void tcf_exts_destroy(struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
-	tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
-	kfree(exts->actions);
+	if (exts->actions) {
+		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
+		kfree(exts->actions);
+	}
 	exts->nr_actions = 0;
 #endif
 }
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index be7cd14..84fdc48 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1308,7 +1308,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 }
 
 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
-	[TCA_KIND]		= { .type = NLA_STRING },
+	[TCA_KIND]		= { .type = NLA_NUL_STRING,
+				    .len = IFNAMSIZ - 1 },
 	[TCA_RATE]		= { .type = NLA_BINARY,
 				    .len = sizeof(struct tc_estimator) },
 	[TCA_STAB]		= { .type = NLA_NESTED },
@@ -1831,6 +1832,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
 	cl = cops->find(q, portid);
 	if (!cl)
 		return;
+	if (!cops->tcf_block)
+		return;
 	block = cops->tcf_block(q, cl, NULL);
 	if (!block)
 		return;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f42025d..ebc3c8c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1132,6 +1132,32 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
 	[TCA_CBQ_POLICE]	= { .len = sizeof(struct tc_cbq_police) },
 };
 
+static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
+			 struct nlattr *opt,
+			 struct netlink_ext_ack *extack)
+{
+	int err;
+
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+		return -EINVAL;
+	}
+
+	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_CBQ_WRROPT]) {
+		const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+
+		if (wrr->priority > TC_CBQ_MAXPRIO) {
+			NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
+			err = -EINVAL;
+		}
+	}
+	return err;
+}
+
 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
 		    struct netlink_ext_ack *extack)
 {
@@ -1144,12 +1170,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
 	hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 	q->delay_timer.function = cbq_undelay;
 
-	if (!opt) {
-		NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
-		return -EINVAL;
-	}
-
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+	err = cbq_opt_parse(tb, opt, extack);
 	if (err < 0)
 		return err;
 
@@ -1466,12 +1487,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	struct cbq_class *parent;
 	struct qdisc_rate_table *rtab = NULL;
 
-	if (!opt) {
-		NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
-		return -EINVAL;
-	}
-
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+	err = cbq_opt_parse(tb, opt, extack);
 	if (err < 0)
 		return err;
 
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 049714c..84c948c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -357,6 +357,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
 		goto errout;
 
 	err = -EINVAL;
+	if (!tb[TCA_DSMARK_INDICES])
+		goto errout;
 	indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 
 	if (hweight32(indices) != 1)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 77b289d..30e32df 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -49,6 +49,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
+#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
+
 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
 {
 	const struct netdev_queue *txq = q->dev_queue;
@@ -74,7 +76,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
 				q->q.qlen--;
 			}
 		} else {
-			skb = NULL;
+			skb = SKB_XOFF_MAGIC;
 		}
 	}
 
@@ -272,8 +274,11 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 		return skb;
 
 	skb = qdisc_dequeue_skb_bad_txq(q);
-	if (unlikely(skb))
+	if (unlikely(skb)) {
+		if (skb == SKB_XOFF_MAGIC)
+			return NULL;
 		goto bulk;
+	}
 	skb = q->dequeue(q);
 	if (skb) {
 bulk:
@@ -942,9 +947,13 @@ void qdisc_free(struct Qdisc *qdisc)
 
 void qdisc_destroy(struct Qdisc *qdisc)
 {
-	const struct Qdisc_ops  *ops = qdisc->ops;
+	const struct Qdisc_ops *ops;
 	struct sk_buff *skb, *tmp;
 
+	if (!qdisc)
+		return;
+	ops = qdisc->ops;
+
 	if (qdisc->flags & TCQ_F_BUILTIN ||
 	    !refcount_dec_and_test(&qdisc->refcnt))
 		return;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c3a8388..a80fe8a 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -529,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
 		new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
 
 	non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
-	if (non_hh_quantum > INT_MAX)
+	if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
 		return -EINVAL;
 
 	sch_tree_lock(sch);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 4dfe10b9..86350fe 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -749,7 +749,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
 	struct disttable *d;
 	int i;
 
-	if (n > NETEM_DIST_MAX)
+	if (!n || n > NETEM_DIST_MAX)
 		return -EINVAL;
 
 	d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d97b2b4..6d36f74 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1350,7 +1350,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
 	return status;
 }
 
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
 {
 	/* Free the control endpoint.  */
 	inet_ctl_sock_destroy(net->sctp.ctl_sock);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 3131b41..de8a82b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -561,8 +561,8 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
 	 */
 	if (net->sctp.pf_enable &&
 	   (transport->state == SCTP_ACTIVE) &&
-	   (asoc->pf_retrans < transport->pathmaxrxt) &&
-	   (transport->error_count > asoc->pf_retrans)) {
+	   (transport->error_count < transport->pathmaxrxt) &&
+	   (transport->error_count > transport->pf_retrans)) {
 
 		sctp_assoc_control_transport(asoc, transport,
 					     SCTP_TRANSPORT_PF,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9f5b4e5..227b050 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8957,7 +8957,7 @@ struct proto sctp_prot = {
 	.backlog_rcv =	sctp_backlog_rcv,
 	.hash        =	sctp_hash,
 	.unhash      =	sctp_unhash,
-	.get_port    =	sctp_get_port,
+	.no_autobind =	true,
 	.obj_size    =  sizeof(struct sctp_sock),
 	.useroffset  =  offsetof(struct sctp_sock, subscribe),
 	.usersize    =  offsetof(struct sctp_sock, initmsg) -
@@ -8999,7 +8999,7 @@ struct proto sctpv6_prot = {
 	.backlog_rcv	= sctp_backlog_rcv,
 	.hash		= sctp_hash,
 	.unhash		= sctp_unhash,
-	.get_port	= sctp_get_port,
+	.no_autobind	= true,
 	.obj_size	= sizeof(struct sctp6_sock),
 	.useroffset	= offsetof(struct sctp6_sock, sctp.subscribe),
 	.usersize	= offsetof(struct sctp6_sock, sctp.initmsg) -
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 0da5793..87061a4b 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
 		nstr_list[i] = htons(str_list[i]);
 
 	if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+		kfree(nstr_list);
 		retval = -EAGAIN;
 		goto out;
 	}
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index d8366ed5..28361ae 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -75,13 +75,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 	struct smc_connection *conn = &smc->conn;
 	struct sock *sk = &smc->sk;
-	bool noblock;
 	long timeo;
 	int rc = 0;
 
 	/* similar to sk_stream_wait_memory */
 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-	noblock = timeo ? false : true;
 	add_wait_queue(sk_sleep(sk), &wait);
 	while (1) {
 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -96,8 +94,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
 			break;
 		}
 		if (!timeo) {
-			if (noblock)
-				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+			/* ensure EPOLLOUT is subsequently generated */
+			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 			rc = -EAGAIN;
 			break;
 		}
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d..0f1eaed 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
 		tipc_set_node_id(net, node_id);
 	}
 	tn->trial_addr = addr;
+	tn->addr_trial_end = jiffies;
 	pr_info("32-bit node address hash set to %x\n", addr);
 }
 
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 836727e..6344aca 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -161,6 +161,7 @@ struct tipc_link {
 	struct {
 		u16 len;
 		u16 limit;
+		struct sk_buff *target_bskb;
 	} backlog[5];
 	u16 snd_nxt;
 	u16 last_retransm;
@@ -846,6 +847,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
 void tipc_link_reset(struct tipc_link *l)
 {
 	struct sk_buff_head list;
+	u32 imp;
 
 	__skb_queue_head_init(&list);
 
@@ -864,11 +866,10 @@ void tipc_link_reset(struct tipc_link *l)
 	__skb_queue_purge(&l->transmq);
 	__skb_queue_purge(&l->deferdq);
 	__skb_queue_purge(&l->backlogq);
-	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+		l->backlog[imp].len = 0;
+		l->backlog[imp].target_bskb = NULL;
+	}
 	kfree_skb(l->reasm_buf);
 	kfree_skb(l->failover_reasm_skb);
 	l->reasm_buf = NULL;
@@ -909,7 +910,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 	struct sk_buff_head *transmq = &l->transmq;
 	struct sk_buff_head *backlogq = &l->backlogq;
-	struct sk_buff *skb, *_skb, *bskb;
+	struct sk_buff *skb, *_skb, **tskb;
 	int pkt_cnt = skb_queue_len(list);
 	int rc = 0;
 
@@ -955,19 +956,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
 			seqno++;
 			continue;
 		}
-		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+		tskb = &l->backlog[imp].target_bskb;
+		if (tipc_msg_bundle(*tskb, hdr, mtu)) {
 			kfree_skb(__skb_dequeue(list));
 			l->stats.sent_bundled++;
 			continue;
 		}
-		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+		if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
 			kfree_skb(__skb_dequeue(list));
-			__skb_queue_tail(backlogq, bskb);
-			l->backlog[msg_importance(buf_msg(bskb))].len++;
+			__skb_queue_tail(backlogq, *tskb);
+			l->backlog[imp].len++;
 			l->stats.sent_bundled++;
 			l->stats.sent_bundles++;
 			continue;
 		}
+		l->backlog[imp].target_bskb = NULL;
 		l->backlog[imp].len += skb_queue_len(list);
 		skb_queue_splice_tail_init(list, backlogq);
 	}
@@ -983,6 +986,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
 	u16 seqno = l->snd_nxt;
 	u16 ack = l->rcv_nxt - 1;
 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+	u32 imp;
 
 	while (skb_queue_len(&l->transmq) < l->window) {
 		skb = skb_peek(&l->backlogq);
@@ -993,7 +997,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
 			break;
 		__skb_dequeue(&l->backlogq);
 		hdr = buf_msg(skb);
-		l->backlog[msg_importance(hdr)].len--;
+		imp = msg_importance(hdr);
+		l->backlog[imp].len--;
+		if (unlikely(skb == l->backlog[imp].target_bskb))
+			l->backlog[imp].target_bskb = NULL;
 		__skb_queue_tail(&l->transmq, skb);
 		__skb_queue_tail(xmitq, _skb);
 		TIPC_SKB_CB(skb)->ackers = l->ackers;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b618910..cbccf17 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 	bmsg = buf_msg(_skb);
 	tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
 		      INT_H_SIZE, dnode);
-	if (msg_isdata(msg))
-		msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
-	else
-		msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
+	msg_set_importance(bmsg, msg_importance(msg));
 	msg_set_seqno(bmsg, msg_seqno(msg));
 	msg_set_ack(bmsg, msg_ack(msg));
 	msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 3cfeb9d..e0a3dd4 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -221,7 +221,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
 		       publ->key);
 	}
 
-	kfree_rcu(p, rcu);
+	if (p)
+		kfree_rcu(p, rcu);
 }
 
 /**
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 4c0ac79..3288bdf 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -301,6 +301,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
 #else
 	{
 #endif
+		if (sk->sk_write_space == tls_write_space)
+			sk->sk_write_space = ctx->sk_write_space;
 		tls_ctx_free(ctx);
 		ctx = NULL;
 	}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 6848a81..bbb2da7 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -354,7 +354,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
 	struct tls_context *tls_ctx = tls_get_ctx(sk);
 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-	int ret = 0;
+	int ret;
 	int required_size;
 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	bool eor = !(msg->msg_flags & MSG_MORE);
@@ -370,7 +370,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
 	lock_sock(sk);
 
-	if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
+	ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo);
+	if (ret)
 		goto send_end;
 
 	if (unlikely(msg->msg_controllen)) {
@@ -505,7 +506,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 {
 	struct tls_context *tls_ctx = tls_get_ctx(sk);
 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-	int ret = 0;
+	int ret;
 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 	bool eor;
 	size_t orig_size = size;
@@ -525,7 +526,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 
 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
-	if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
+	ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
+	if (ret)
 		goto sendpage_end;
 
 	/* Call the sk_stream functions to manage the sndbuf mem. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2e30bf1..2a4613b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -641,7 +641,7 @@ struct sock *__vsock_create(struct net *net,
 }
 EXPORT_SYMBOL_GPL(__vsock_create);
 
-static void __vsock_release(struct sock *sk)
+static void __vsock_release(struct sock *sk, int level)
 {
 	if (sk) {
 		struct sk_buff *skb;
@@ -651,9 +651,17 @@ static void __vsock_release(struct sock *sk)
 		vsk = vsock_sk(sk);
 		pending = NULL;	/* Compiler warning. */
 
+		/* The release call is supposed to use lock_sock_nested()
+		 * rather than lock_sock(), if a sock lock should be acquired.
+		 */
 		transport->release(vsk);
 
-		lock_sock(sk);
+		/* When "level" is SINGLE_DEPTH_NESTING, use the nested
+		 * version to avoid the warning "possible recursive locking
+		 * detected". When "level" is 0, lock_sock_nested(sk, level)
+		 * is the same as lock_sock(sk).
+		 */
+		lock_sock_nested(sk, level);
 		sock_orphan(sk);
 		sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -662,7 +670,7 @@ static void __vsock_release(struct sock *sk)
 
 		/* Clean up any sockets that never were accepted. */
 		while ((pending = vsock_dequeue_accept(sk)) != NULL) {
-			__vsock_release(pending);
+			__vsock_release(pending, SINGLE_DEPTH_NESTING);
 			sock_put(pending);
 		}
 
@@ -711,7 +719,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
 
 static int vsock_release(struct socket *sock)
 {
-	__vsock_release(sock->sk);
+	__vsock_release(sock->sk, 0);
 	sock->sk = NULL;
 	sock->state = SS_FREE;
 
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 9c7da81..70350dc 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -320,6 +320,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
 	lock_sock(sk);
 	hvs_do_close_lock_held(vsock_sk(sk), true);
 	release_sock(sk);
+
+	/* Release the refcnt for the channel that's opened in
+	 * hvs_open_connection().
+	 */
+	sock_put(sk);
 }
 
 static void hvs_open_connection(struct vmbus_channel *chan)
@@ -388,6 +393,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 	}
 
 	set_per_channel_state(chan, conn_from_host ? new : sk);
+
+	/* This reference will be dropped by hvs_close_connection(). */
+	sock_hold(conn_from_host ? new : sk);
 	vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
 
 	/* Set the pending send size to max packet size to always get
@@ -530,7 +538,7 @@ static void hvs_release(struct vsock_sock *vsk)
 	struct sock *sk = sk_vsock(vsk);
 	bool remove_sock;
 
-	lock_sock(sk);
+	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 	remove_sock = hvs_close_lock_held(vsk);
 	release_sock(sk);
 	if (remove_sock)
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index e30f537..3c199f7 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -791,7 +791,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
 	struct sock *sk = &vsk->sk;
 	bool remove_sock = true;
 
-	lock_sock(sk);
+	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 	if (sk->sk_type == SOCK_STREAM)
 		remove_sock = virtio_transport_close(vsk);
 
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2db713d..5d5333a 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -13,6 +13,11 @@
 #include "core.h"
 #include "rdev-ops.h"
 
+static bool cfg80211_valid_60g_freq(u32 freq)
+{
+	return freq >= 58320 && freq <= 70200;
+}
+
 void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 			     struct ieee80211_channel *chan,
 			     enum nl80211_channel_type chan_type)
@@ -22,6 +27,8 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 
 	chandef->chan = chan;
 	chandef->center_freq2 = 0;
+	chandef->edmg.bw_config = 0;
+	chandef->edmg.channels = 0;
 
 	switch (chan_type) {
 	case NL80211_CHAN_NO_HT:
@@ -46,6 +53,91 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
 }
 EXPORT_SYMBOL(cfg80211_chandef_create);
 
+static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
+{
+	int max_contiguous = 0;
+	int num_of_enabled = 0;
+	int contiguous = 0;
+	int i;
+
+	if (!chandef->edmg.channels || !chandef->edmg.bw_config)
+		return false;
+
+	if (!cfg80211_valid_60g_freq(chandef->chan->center_freq))
+		return false;
+
+	for (i = 0; i < 6; i++) {
+		if (chandef->edmg.channels & BIT(i)) {
+			contiguous++;
+			num_of_enabled++;
+		} else {
+			contiguous = 0;
+		}
+
+		max_contiguous = max(contiguous, max_contiguous);
+	}
+	/* basic verification of edmg configuration according to
+	 * IEEE P802.11ay/D4.0 section 9.4.2.251
+	 */
+	/* check bw_config against contiguous edmg channels */
+	switch (chandef->edmg.bw_config) {
+	case IEEE80211_EDMG_BW_CONFIG_4:
+	case IEEE80211_EDMG_BW_CONFIG_8:
+	case IEEE80211_EDMG_BW_CONFIG_12:
+		if (max_contiguous < 1)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_5:
+	case IEEE80211_EDMG_BW_CONFIG_9:
+	case IEEE80211_EDMG_BW_CONFIG_13:
+		if (max_contiguous < 2)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_6:
+	case IEEE80211_EDMG_BW_CONFIG_10:
+	case IEEE80211_EDMG_BW_CONFIG_14:
+		if (max_contiguous < 3)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_7:
+	case IEEE80211_EDMG_BW_CONFIG_11:
+	case IEEE80211_EDMG_BW_CONFIG_15:
+		if (max_contiguous < 4)
+			return false;
+		break;
+
+	default:
+		return false;
+	}
+
+	/* check bw_config against aggregated (non contiguous) edmg channels */
+	switch (chandef->edmg.bw_config) {
+	case IEEE80211_EDMG_BW_CONFIG_4:
+	case IEEE80211_EDMG_BW_CONFIG_5:
+	case IEEE80211_EDMG_BW_CONFIG_6:
+	case IEEE80211_EDMG_BW_CONFIG_7:
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_8:
+	case IEEE80211_EDMG_BW_CONFIG_9:
+	case IEEE80211_EDMG_BW_CONFIG_10:
+	case IEEE80211_EDMG_BW_CONFIG_11:
+		if (num_of_enabled < 2)
+			return false;
+		break;
+	case IEEE80211_EDMG_BW_CONFIG_12:
+	case IEEE80211_EDMG_BW_CONFIG_13:
+	case IEEE80211_EDMG_BW_CONFIG_14:
+	case IEEE80211_EDMG_BW_CONFIG_15:
+		if (num_of_enabled < 4 || max_contiguous < 2)
+			return false;
+		break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
 bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
 {
 	u32 control_freq;
@@ -111,6 +203,10 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
 		return false;
 	}
 
+	if (cfg80211_chandef_is_edmg(chandef) &&
+	    !cfg80211_edmg_chandef_valid(chandef))
+		return false;
+
 	return true;
 }
 EXPORT_SYMBOL(cfg80211_chandef_valid);
@@ -720,12 +816,66 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
 	return true;
 }
 
+/* check if the operating channels are valid and supported */
+static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
+				 enum ieee80211_edmg_bw_config edmg_bw_config,
+				 int primary_channel,
+				 struct ieee80211_edmg *edmg_cap)
+{
+	struct ieee80211_channel *chan;
+	int i, freq;
+	int channels_counter = 0;
+
+	if (!edmg_channels && !edmg_bw_config)
+		return true;
+
+	if ((!edmg_channels && edmg_bw_config) ||
+	    (edmg_channels && !edmg_bw_config))
+		return false;
+
+	if (!(edmg_channels & BIT(primary_channel - 1)))
+		return false;
+
+	/* 60GHz channels 1..6 */
+	for (i = 0; i < 6; i++) {
+		if (!(edmg_channels & BIT(i)))
+			continue;
+
+		if (!(edmg_cap->channels & BIT(i)))
+			return false;
+
+		channels_counter++;
+
+		freq = ieee80211_channel_to_frequency(i + 1,
+						      NL80211_BAND_60GHZ);
+		chan = ieee80211_get_channel(wiphy, freq);
+		if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+			return false;
+	}
+
+	/* IEEE802.11 allows max 4 channels */
+	if (channels_counter > 4)
+		return false;
+
+	/* check bw_config is a subset of what driver supports
+	 * (see IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13)
+	 */
+	if ((edmg_bw_config % 4) > (edmg_cap->bw_config % 4))
+		return false;
+
+	if (edmg_bw_config > edmg_cap->bw_config)
+		return false;
+
+	return true;
+}
+
 bool cfg80211_chandef_usable(struct wiphy *wiphy,
 			     const struct cfg80211_chan_def *chandef,
 			     u32 prohibited_flags)
 {
 	struct ieee80211_sta_ht_cap *ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap;
+	struct ieee80211_edmg *edmg_cap;
 	u32 width, control_freq, cap;
 
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -733,6 +883,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 
 	ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
 	vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
+	edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap;
+
+	if (edmg_cap->channels &&
+	    !cfg80211_edmg_usable(wiphy,
+				  chandef->edmg.channels,
+				  chandef->edmg.bw_config,
+				  chandef->chan->hw_value,
+				  edmg_cap))
+		return false;
 
 	control_freq = chandef->chan->center_freq;
 
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 2a46ec3..6866078 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1335,10 +1335,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
 		}
 		break;
 	case NETDEV_PRE_UP:
-		if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
-		    !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
-		      rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
-		      wdev->use_4addr))
+		if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
+					     wdev->use_4addr, 0))
 			return notifier_from_errno(-EOPNOTSUPP);
 
 		if (rfkill_blocked(rdev->rfkill))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 646d50c..e7e7ffc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -200,6 +200,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
 	return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+				struct netlink_ext_ack *extack)
+{
+	const u8 *data = nla_data(attr);
+	unsigned int len = nla_len(attr);
+	const struct element *elem;
+	const struct ieee80211_mgmt *mgmt = (void *)data;
+	unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+					 u.beacon.variable);
+
+	if (len < fixedlen)
+		goto err;
+
+	if (ieee80211_hdrlen(mgmt->frame_control) !=
+	    offsetof(struct ieee80211_mgmt, u.beacon))
+		goto err;
+
+	data += fixedlen;
+	len -= fixedlen;
+
+	for_each_element(elem, data, len) {
+		/* nothing */
+	}
+
+	if (for_each_element_completed(elem, data, len))
+		return 0;
+
+err:
+	NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+	return -EINVAL;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 	[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -209,6 +241,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 
 	[NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
 	[NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
+	[NL80211_ATTR_WIPHY_EDMG_CHANNELS] = { .type = NLA_U8 },
+	[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG] = { .type = NLA_U8 },
 	[NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 },
 	[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
 	[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
@@ -1410,6 +1444,15 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
 		nla_nest_end(msg, nl_iftype_data);
 	}
 
+	/* add EDMG info */
+	if (sband->edmg_cap.channels &&
+	    (nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS,
+		       sband->edmg_cap.channels) ||
+	    nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+		       sband->edmg_cap.bw_config)))
+
+		return -ENOBUFS;
+
 	/* add bitrates */
 	nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
 	if (!nl_rates)
@@ -2300,6 +2343,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 
 	control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+	memset(chandef, 0, sizeof(*chandef));
+
 	chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
 	chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
 	chandef->center_freq1 = control_freq;
@@ -2348,6 +2393,18 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 					info->attrs[NL80211_ATTR_CENTER_FREQ2]);
 	}
 
+	if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+		chandef->edmg.channels =
+		      nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+		if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+			chandef->edmg.bw_config =
+		     nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+	} else {
+		chandef->edmg.bw_config = 0;
+		chandef->edmg.channels = 0;
+	}
+
 	if (!cfg80211_chandef_valid(chandef))
 		return -EINVAL;
 
@@ -2820,7 +2877,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
 	if (rdev->ops->get_channel) {
 		int ret;
-		struct cfg80211_chan_def chandef;
+		struct cfg80211_chan_def chandef = {};
 
 		ret = rdev_get_channel(rdev, wdev, &chandef);
 		if (ret == 0) {
@@ -3211,9 +3268,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
 			return err;
 	}
 
-	if (!(rdev->wiphy.interface_modes & (1 << type)) &&
-	    !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
-	      rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
+	if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
 		return -EOPNOTSUPP;
 
 	err = nl80211_parse_mon_options(rdev, type, info, &params);
@@ -4017,6 +4072,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
 	memset(bcn, 0, sizeof(*bcn));
 
 	if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+		int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
+					       NULL);
+
+		if (ret)
+			return ret;
+
 		bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
 		bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
 		if (!bcn->head_len)
@@ -5811,6 +5872,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
 	if (!rdev->ops->del_mpath)
 		return -EOPNOTSUPP;
 
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
 	return rdev_del_mpath(rdev, dev, dst);
 }
 
@@ -9407,6 +9471,15 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 			return -EINVAL;
 	}
 
+	if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+		connect.edmg.channels =
+		      nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+		if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+			connect.edmg.bw_config =
+		     nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+	}
+
 	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
 		connkeys = nl80211_parse_connkeys(rdev, info, NULL);
 		if (IS_ERR(connkeys))
@@ -10281,9 +10354,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
 	hyst = wdev->cqm_config->rssi_hyst;
 	n = wdev->cqm_config->n_rssi_thresholds;
 
-	for (i = 0; i < n; i++)
+	for (i = 0; i < n; i++) {
+		i = array_index_nospec(i, n);
 		if (last < wdev->cqm_config->rssi_thresholds[i])
 			break;
+	}
 
 	low_index = i - 1;
 	if (low_index >= 0) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f432178..42bd15c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2137,7 +2137,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-	struct cfg80211_chan_def chandef;
+	struct cfg80211_chan_def chandef = {};
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	enum nl80211_iftype iftype;
 
@@ -2824,7 +2824,7 @@ static void reg_process_pending_hints(void)
 
 	/* When last_request->processed becomes true this will be rescheduled */
 	if (lr && !lr->processed) {
-		reg_process_hint(lr);
+		pr_debug("Pending regulatory request, waiting for it to be processed...\n");
 		return;
 	}
 
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 83a1781..8798e1c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -930,6 +930,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
 		}
 
 		cfg80211_process_rdev_events(rdev);
+		cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
 	}
 
 	err = rdev_change_virtual_intf(rdev, dev, ntype, params);
@@ -1009,7 +1010,7 @@ static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate)
 	return (bitrate + 50000) / 100000;
 }
 
-static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
+static u32 cfg80211_calculate_bitrate_dmg(struct rate_info *rate)
 {
 	static const u32 __mcs2bitrate[] = {
 		/* control PHY */
@@ -1056,6 +1057,40 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
 	return __mcs2bitrate[rate->mcs];
 }
 
+static u32 cfg80211_calculate_bitrate_edmg(struct rate_info *rate)
+{
+	static const u32 __mcs2bitrate[] = {
+		/* control PHY */
+		[0] =   275,
+		/* SC PHY */
+		[1] =  3850,
+		[2] =  7700,
+		[3] =  9625,
+		[4] = 11550,
+		[5] = 12512, /* 1251.25 mbps */
+		[6] = 13475,
+		[7] = 15400,
+		[8] = 19250,
+		[9] = 23100,
+		[10] = 25025,
+		[11] = 26950,
+		[12] = 30800,
+		[13] = 38500,
+		[14] = 46200,
+		[15] = 50050,
+		[16] = 53900,
+		[17] = 57750,
+		[18] = 69300,
+		[19] = 75075,
+		[20] = 80850,
+	};
+
+	if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
+		return 0;
+
+	return __mcs2bitrate[rate->mcs] * rate->n_bonded_ch;
+}
+
 static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
 {
 	static const u32 base[4][10] = {
@@ -1226,8 +1261,10 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
 	if (rate->flags & RATE_INFO_FLAGS_MCS)
 		return cfg80211_calculate_bitrate_ht(rate);
-	if (rate->flags & RATE_INFO_FLAGS_60G)
-		return cfg80211_calculate_bitrate_60g(rate);
+	if (rate->flags & RATE_INFO_FLAGS_DMG)
+		return cfg80211_calculate_bitrate_dmg(rate);
+	if (rate->flags & RATE_INFO_FLAGS_EDMG)
+		return cfg80211_calculate_bitrate_edmg(rate);
 	if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
 		return cfg80211_calculate_bitrate_vht(rate);
 	if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
@@ -1670,7 +1707,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
 	for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
 		num_interfaces += params->iftype_num[iftype];
 		if (params->iftype_num[iftype] > 0 &&
-		    !(wiphy->software_iftypes & BIT(iftype)))
+		    !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
 			used_iftypes |= BIT(iftype);
 	}
 
@@ -1692,7 +1729,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
 			return -ENOMEM;
 
 		for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
-			if (wiphy->software_iftypes & BIT(iftype))
+			if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
 				continue;
 			for (j = 0; j < c->n_limits; j++) {
 				all_iftypes |= limits[j].types;
@@ -1895,3 +1932,26 @@ EXPORT_SYMBOL(rfc1042_header);
 const unsigned char bridge_tunnel_header[] __aligned(2) =
 	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
 EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+			     bool is_4addr, u8 check_swif)
+
+{
+	bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
+
+	switch (check_swif) {
+	case 0:
+		if (is_vlan && is_4addr)
+			return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+		return wiphy->interface_modes & BIT(iftype);
+	case 1:
+		if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
+			return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+		return wiphy->software_iftypes & BIT(iftype);
+	default:
+		break;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(cfg80211_iftype_allowed);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 06943d9..4f0cfb8 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-	struct cfg80211_chan_def chandef;
+	struct cfg80211_chan_def chandef = {};
 	int ret;
 
 	switch (wdev->iftype) {
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index c67d7a82..73fd0ea 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 			       struct iw_point *data, char *ssid)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	int ret = 0;
 
 	/* call only for station! */
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 		if (ie) {
 			data->flags = 1;
 			data->length = ie[1];
-			memcpy(ssid, ie + 2, data->length);
+			if (data->length > IW_ESSID_MAX_SIZE)
+				ret = -EINVAL;
+			else
+				memcpy(ssid, ie + 2, data->length);
 		}
 		rcu_read_unlock();
 	} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
 	}
 	wdev_unlock(wdev);
 
-	return 0;
+	return ret;
 }
 
 int cfg80211_mgd_wext_siwap(struct net_device *dev,
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a3b037f..8cab91c 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -322,7 +322,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 	umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
 	if (!umem->pages) {
 		err = -ENOMEM;
-		goto out_account;
+		goto out_pin;
 	}
 
 	for (i = 0; i < umem->npgs; i++)
@@ -330,6 +330,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 	return 0;
 
+out_pin:
+	xdp_umem_unpin_pages(umem);
 out_account:
 	xdp_umem_unaccount_pages(umem);
 	return err;
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index dad5583..3b2861f 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -20,7 +20,7 @@
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1771a31..15dd58a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -75,7 +75,7 @@
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)  \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a5fe929..f49524e 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2817,7 +2817,7 @@
 # Check for line lengths > 75 in commit log, warn once
 		if ($in_commit_log && !$commit_log_long_line &&
 		    length($line) > 75 &&
-		    !($line =~ /^\s*[a-zA-Z0-9_\/\.\-]+\s+\|\s+\d+/ ||
+		    !($line =~ /^\s*[a-zA-Z0-9_\/\.\-\,]+\s+\|\s+\d+/ ||
 					# file delta changes
 		      $line =~ /^\s*(?:[\w\.\-]+\/)++[\w\.\-]+:/ ||
 					# filename then :
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index c4a9ddb..5aa75a0a 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -78,7 +78,7 @@
 	fi
 
 	# Strip out the base of the path
-	code=${code//^$basepath/""}
+	code=${code#$basepath/}
 
 	# In the case of inlines, move everything to same line
 	code=${code//$'\n'/' '}
diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
index 6d5bbd31..bd29e4e7 100644
--- a/scripts/gcc-plugins/randomize_layout_plugin.c
+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
@@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
 		if (node == fieldtype)
 			continue;
 
-		if (!is_fptr(fieldtype))
-			return 0;
-
-		if (code != RECORD_TYPE && code != UNION_TYPE)
+		if (code == RECORD_TYPE || code == UNION_TYPE) {
+			if (!is_pure_ops_struct(fieldtype))
+				return 0;
 			continue;
+		}
 
-		if (!is_pure_ops_struct(fieldtype))
+		if (!is_fptr(fieldtype))
 			return 0;
 	}
 
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 63fa0cb..ad55449 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -422,5 +422,9 @@
 if [ ! -z ${RTIC_MPGEN+x} ]; then
 	${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
 		--binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
-		--cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts
+		--cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts \
+		|| echo “RTIC MP DTS generation has failed”
+	# Echo statement above prints the error message in case above
+	# RTIC MP DTS generation command fails and it ensures rtic mp
+	# failure does not cause kernel compilation to fail.
 fi
diff --git a/scripts/namespace.pl b/scripts/namespace.pl
index 6135574..1da7bca 100755
--- a/scripts/namespace.pl
+++ b/scripts/namespace.pl
@@ -65,13 +65,14 @@
 use warnings;
 use strict;
 use File::Find;
+use File::Spec;
 
 my $nm = ($ENV{'NM'} || "nm") . " -p";
 my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
 
 if ($#ARGV != -1) {
 	print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@
 	}
 	($source = $basename) =~ s/\.o$//;
 	if (-e "$source.c" || -e "$source.S") {
-		$source = "$objtree$File::Find::dir/$source";
+		$source = File::Spec->catfile($objtree, $File::Find::dir, $source)
 	} else {
-		$source = "$srctree$File::Find::dir/$source";
+		$source = File::Spec->catfile($srctree, $File::Find::dir, $source)
 	}
 	if (! -e "$source.c" && ! -e "$source.S") {
 		# No obvious source, exclude the object if it is conglomerate
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 088ea2a..612f737 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -223,16 +223,21 @@ static void *kvmemdup(const void *src, size_t len)
 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
 {
 	size_t size = 0;
+	void *pos = e->pos;
 
 	if (!inbounds(e, sizeof(u16)))
-		return 0;
+		goto fail;
 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
 	e->pos += sizeof(__le16);
 	if (!inbounds(e, size))
-		return 0;
+		goto fail;
 	*chunk = e->pos;
 	e->pos += size;
 	return size;
+
+fail:
+	e->pos = pos;
+	return 0;
 }
 
 /* unpack control byte */
@@ -294,49 +299,66 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
 
 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
 {
+	void *pos = e->pos;
+
 	if (unpack_nameX(e, AA_U32, name)) {
 		if (!inbounds(e, sizeof(u32)))
-			return 0;
+			goto fail;
 		if (data)
 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
 		e->pos += sizeof(u32);
 		return 1;
 	}
+
+fail:
+	e->pos = pos;
 	return 0;
 }
 
 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
 {
+	void *pos = e->pos;
+
 	if (unpack_nameX(e, AA_U64, name)) {
 		if (!inbounds(e, sizeof(u64)))
-			return 0;
+			goto fail;
 		if (data)
 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
 		e->pos += sizeof(u64);
 		return 1;
 	}
+
+fail:
+	e->pos = pos;
 	return 0;
 }
 
 static size_t unpack_array(struct aa_ext *e, const char *name)
 {
+	void *pos = e->pos;
+
 	if (unpack_nameX(e, AA_ARRAY, name)) {
 		int size;
 		if (!inbounds(e, sizeof(u16)))
-			return 0;
+			goto fail;
 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
 		e->pos += sizeof(u16);
 		return size;
 	}
+
+fail:
+	e->pos = pos;
 	return 0;
 }
 
 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
 {
+	void *pos = e->pos;
+
 	if (unpack_nameX(e, AA_BLOB, name)) {
 		u32 size;
 		if (!inbounds(e, sizeof(u32)))
-			return 0;
+			goto fail;
 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
 		e->pos += sizeof(u32);
 		if (inbounds(e, (size_t) size)) {
@@ -345,6 +367,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
 			return size;
 		}
 	}
+
+fail:
+	e->pos = pos;
 	return 0;
 }
 
@@ -361,9 +386,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
 			if (src_str[size - 1] != 0)
 				goto fail;
 			*string = src_str;
+
+			return size;
 		}
 	}
-	return size;
 
 fail:
 	e->pos = pos;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index d9e7728..f63b4bd 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -271,8 +271,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
 		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
 		rc = integrity_kernel_read(file, offset, rbuf[active],
 					   rbuf_len);
-		if (rc != rbuf_len)
+		if (rc != rbuf_len) {
+			if (rc >= 0)
+				rc = -EINVAL;
+			/*
+			 * Forward current rc, do not overwrite with return value
+			 * from ahash_wait()
+			 */
+			ahash_wait(ahash_rc, &wait);
 			goto out3;
+		}
 
 		if (rbuf[1] && offset) {
 			/* Using two buffers, and it is not the first
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 5e51579..1d34b2a 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
 {
 	struct request_key_auth *rka = get_request_key_auth(key);
 
+	if (!rka)
+		return;
+
 	seq_puts(m, "key:");
 	seq_puts(m, key->description);
 	if (key_is_positive(key))
@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
 	size_t datalen;
 	long ret;
 
+	if (!rka)
+		return -EKEYREVOKED;
+
 	datalen = rka->callout_len;
 	ret = datalen;
 
diff --git a/security/security.c b/security/security.c
index 81cebf2..70e38fb 100644
--- a/security/security.c
+++ b/security/security.c
@@ -607,7 +607,7 @@ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 		return 0;
 	return call_int_hook(path_chown, 0, path, uid, gid);
 }
-EXPORT_SYMBOL(security_path_chown);
+EXPORT_SYMBOL_GPL(security_path_chown);
 
 int security_path_chroot(const struct path *path)
 {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 109ab51..b1a9ac9 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -100,7 +100,7 @@
 #include "audit.h"
 #include "avc_ss.h"
 
-struct selinux_state selinux_state;
+struct selinux_state selinux_state __rticdata;
 
 /* SECMARK reference count */
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9a4c0ad..c071c356 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -469,7 +469,7 @@ char *smk_parse_smack(const char *string, int len)
 	if (i == 0 || i >= SMK_LONGLABEL)
 		return ERR_PTR(-EINVAL);
 
-	smack = kzalloc(i + 1, GFP_KERNEL);
+	smack = kzalloc(i + 1, GFP_NOFS);
 	if (smack == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -504,7 +504,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
 			if ((m & *cp) == 0)
 				continue;
 			rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
-						  cat, GFP_KERNEL);
+						  cat, GFP_NOFS);
 			if (rc < 0) {
 				netlbl_catmap_free(sap->attr.mls.cat);
 				return rc;
@@ -540,7 +540,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
 	if (skp != NULL)
 		goto freeout;
 
-	skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+	skp = kzalloc(sizeof(*skp), GFP_NOFS);
 	if (skp == NULL) {
 		skp = ERR_PTR(-ENOMEM);
 		goto freeout;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 017c47e..221de4c 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -270,7 +270,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
 	if (!(ip->i_opflags & IOP_XATTR))
 		return ERR_PTR(-EOPNOTSUPP);
 
-	buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+	buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
 	if (buffer == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -947,7 +947,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
 
 		if (rc != 0)
 			return rc;
-	} else if (bprm->unsafe)
+	}
+	if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
 		return -EPERM;
 
 	bsp->smk_task = isp->smk_task;
@@ -4005,6 +4006,8 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 			skp = smack_ipv6host_label(&sadd);
 		if (skp == NULL)
 			skp = smack_net_ambient;
+		if (skb == NULL)
+			break;
 #ifdef CONFIG_AUDIT
 		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 		ad.a.u.net->family = family;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index bbf91a5..bd3d68e0 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1818,8 +1818,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
 	if (cptr->type == USER_CLIENT) {
 		info->input_pool = cptr->data.user.fifo_pool_size;
 		info->input_free = info->input_pool;
-		if (cptr->data.user.fifo)
-			info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+		info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
 	} else {
 		info->input_pool = 0;
 		info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 72c0302..6a24732 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -280,3 +280,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
 
 	return 0;
 }
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+	unsigned long flags;
+	int cells;
+
+	if (!f)
+		return 0;
+
+	snd_use_lock_use(&f->use_lock);
+	spin_lock_irqsave(&f->lock, flags);
+	cells = snd_seq_unused_cells(f->pool);
+	spin_unlock_irqrestore(&f->lock, flags);
+	snd_use_lock_free(&f->use_lock);
+	return cells;
+}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index 062c446..5d38a0d 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
 /* resize pool in fifo */
 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
 
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
 
 #endif
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index 218292b..f5b3252 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -15,7 +15,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
 
 static const unsigned int
 alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
-	{10, 10, 8},	/* Tx0 = Analog + S/PDIF. */
+	{10, 10, 4},	/* Tx0 = Analog + S/PDIF. */
 	{16, 8, 0},	/* Tx1 = ADAT1 + ADAT2. */
 };
 
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 743015e..e240fdf 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -255,6 +255,17 @@ static const struct snd_motu_spec motu_audio_express = {
 	.analog_out_ports = 4,
 };
 
+static const struct snd_motu_spec motu_4pre = {
+	.name = "4pre",
+	.protocol = &snd_motu_protocol_v3,
+	.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
+		 SND_MOTU_SPEC_TX_MICINST_CHUNK |
+		 SND_MOTU_SPEC_TX_RETURN_CHUNK |
+		 SND_MOTU_SPEC_RX_SEPARETED_MAIN,
+	.analog_in_ports = 2,
+	.analog_out_ports = 2,
+};
+
 #define SND_MOTU_DEV_ENTRY(model, data)			\
 {							\
 	.match_flags	= IEEE1394_MATCH_VENDOR_ID |	\
@@ -272,6 +283,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
 	SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3),	/* FireWire only. */
 	SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3),	/* Hybrid. */
 	SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
+	SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
 	{ }
 };
 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index e4cc899..9e58633 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -57,6 +57,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
 		goto err_locked;
 
 	err = snd_tscm_stream_get_clock(tscm, &clock);
+	if (err < 0)
+		goto err_locked;
+
 	if (clock != SND_TSCM_CLOCK_INTERNAL ||
 	    amdtp_stream_pcm_running(&tscm->rx_stream) ||
 	    amdtp_stream_pcm_running(&tscm->tx_stream)) {
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index f1657a4..a1308f1 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -9,20 +9,37 @@
 #include <linux/delay.h>
 #include "tascam.h"
 
+#define CLOCK_STATUS_MASK      0xffff0000
+#define CLOCK_CONFIG_MASK      0x0000ffff
+
 #define CALLBACK_TIMEOUT 500
 
 static int get_clock(struct snd_tscm *tscm, u32 *data)
 {
+	int trial = 0;
 	__be32 reg;
 	int err;
 
-	err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
-				 TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
-				 &reg, sizeof(reg), 0);
-	if (err >= 0)
-		*data = be32_to_cpu(reg);
+	while (trial++ < 5) {
+		err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
+				TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
+				&reg, sizeof(reg), 0);
+		if (err < 0)
+			return err;
 
-	return err;
+		*data = be32_to_cpu(reg);
+		if (*data & CLOCK_STATUS_MASK)
+			break;
+
+		// In intermediate state after changing clock status.
+		msleep(50);
+	}
+
+	// Still in the intermediate state.
+	if (trial >= 5)
+		return -EAGAIN;
+
+	return 0;
 }
 
 static int set_clock(struct snd_tscm *tscm, unsigned int rate,
@@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
 	err = get_clock(tscm, &data);
 	if (err < 0)
 		return err;
-	data &= 0x0000ffff;
+	data &= CLOCK_CONFIG_MASK;
 
 	if (rate > 0) {
 		data &= 0x000000ff;
@@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
 
 int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
 {
-	u32 data = 0x0;
-	unsigned int trials = 0;
+	u32 data;
 	int err;
 
-	while (data == 0x0 || trials++ < 5) {
-		err = get_clock(tscm, &data);
-		if (err < 0)
-			return err;
+	err = get_clock(tscm, &data);
+	if (err < 0)
+		return err;
 
-		data = (data & 0xff000000) >> 24;
-	}
+	data = (data & 0xff000000) >> 24;
 
 	/* Check base rate. */
 	if ((data & 0x0f) == 0x01)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 74244d8..e858b6f 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -443,6 +443,8 @@ static void azx_int_disable(struct hdac_bus *bus)
 	list_for_each_entry(azx_dev, &bus->stream_list, list)
 		snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
 
+	synchronize_irq(bus->irq);
+
 	/* disable SIE for all streams */
 	snd_hdac_chip_writeb(bus, INTCTL, 0);
 
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index 7f2761a..971197c 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
 				return err;
 
 			memset(&knew, 0, sizeof(knew));
-			knew.name = ak->adc_info[mixer_ch].selector_name;
-			if (!knew.name) {
+			if (!ak->adc_info ||
+				!ak->adc_info[mixer_ch].selector_name) {
 				knew.name = "Capture Channel";
 				knew.index = mixer_ch + ak->idx_offset * 2;
-			}
+			} else
+				knew.name = ak->adc_info[mixer_ch].selector_name;
 
 			knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
 			knew.info = ak4xxx_capture_source_info;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index b9a6b66..d8ba3a6 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -828,6 +828,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
 	while (id >= 0) {
 		const struct hda_fixup *fix = codec->fixup_list + id;
 
+		if (++depth > 10)
+			break;
 		if (fix->chained_before)
 			apply_fixup(codec, fix->chain_id, action, depth + 1);
 
@@ -867,8 +869,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
 		}
 		if (!fix->chained || fix->chained_before)
 			break;
-		if (++depth > 10)
-			break;
 		id = fix->chain_id;
 	}
 }
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a623377..82b0dc9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2947,15 +2947,19 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_force_resume(struct device *dev)
 {
+	struct hda_codec *codec = dev_to_hda_codec(dev);
+	bool forced_resume = !codec->relaxed_resume;
 	int ret;
 
 	/* The get/put pair below enforces the runtime resume even if the
 	 * device hasn't been used at suspend time.  This trick is needed to
 	 * update the jack state change during the sleep.
 	 */
-	pm_runtime_get_noresume(dev);
+	if (forced_resume)
+		pm_runtime_get_noresume(dev);
 	ret = pm_runtime_force_resume(dev);
-	pm_runtime_put(dev);
+	if (forced_resume)
+		pm_runtime_put(dev);
 	return ret;
 }
 
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index acacc19..2003403 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -261,6 +261,8 @@ struct hda_codec {
 	unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
 	unsigned int force_pin_prefix:1; /* Add location prefix */
 	unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+	unsigned int relaxed_resume:1;	/* don't resume forcibly for jack */
+
 #ifdef CONFIG_PM
 	unsigned long power_on_acct;
 	unsigned long power_off_acct;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a41c1be..8fcb421 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -877,10 +877,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
 	 */
 	if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
 		hbus->response_reset = 1;
+		dev_err(chip->card->dev,
+			"No response from codec, resetting bus: last cmd=0x%08x\n",
+			bus->last_cmd[addr]);
 		return -EAGAIN; /* give a chance to retry */
 	}
 
-	dev_err(chip->card->dev,
+	dev_WARN(chip->card->dev,
 		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
 		bus->last_cmd[addr]);
 	chip->single_cmd = 1;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984e..2609161 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -5991,7 +5991,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
 	if (spec->init_hook)
 		spec->init_hook(codec);
 
-	snd_hda_apply_verbs(codec);
+	if (!spec->skip_verbs)
+		snd_hda_apply_verbs(codec);
 
 	init_multi_out(codec);
 	init_extra_out(codec);
@@ -6033,6 +6034,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
 
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+	/* Make the codec enter D3 to avoid spurious noises from the internal
+	 * speaker during (and after) reboot
+	 */
+	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+	snd_hda_codec_write(codec, codec->core.afg, 0,
+			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+	msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
 #ifdef CONFIG_PM
 /**
  * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6060,6 +6079,7 @@ static const struct hda_codec_ops generic_patch_ops = {
 	.init = snd_hda_gen_init,
 	.free = snd_hda_gen_free,
 	.unsol_event = snd_hda_jack_unsol_event,
+	.reboot_notify = snd_hda_gen_reboot_notify,
 #ifdef CONFIG_PM
 	.check_power_status = snd_hda_gen_check_power_status,
 #endif
@@ -6082,7 +6102,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
 	err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
 	if (err < 0)
-		return err;
+		goto error;
 
 	err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
 	if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 1012366..8933c0f 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -247,6 +247,7 @@ struct hda_gen_spec {
 	unsigned int indep_hp_enabled:1; /* independent HP enabled */
 	unsigned int have_aamix_ctl:1;
 	unsigned int hp_mic_jack_modes:1;
+	unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
 
 	/* additional mute flags (only effective with auto_mute_via_amp=1) */
 	u64 mute_bits;
@@ -336,6 +337,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
 				  struct auto_pin_cfg *cfg);
 int snd_hda_gen_build_controls(struct hda_codec *codec);
 int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
 
 /* standard jack event callbacks */
 void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 81cea34..bfc4508 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -329,13 +329,11 @@ enum {
 
 #define AZX_DCAPS_INTEL_SKYLAKE \
 	(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+	 AZX_DCAPS_SYNC_WRITE |\
 	 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
 	 AZX_DCAPS_I915_POWERWELL)
 
-#define AZX_DCAPS_INTEL_BROXTON \
-	(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
-	 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
-	 AZX_DCAPS_I915_POWERWELL)
+#define AZX_DCAPS_INTEL_BROXTON			AZX_DCAPS_INTEL_SKYLAKE
 
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
@@ -1457,9 +1455,9 @@ static int azx_free(struct azx *chip)
 	}
 
 	if (bus->chip_init) {
+		azx_stop_chip(chip);
 		azx_clear_irq_pending(chip);
 		azx_stop_all_streams(chip);
-		azx_stop_chip(chip);
 	}
 
 	if (bus->irq >= 0)
@@ -2655,14 +2653,16 @@ static const struct pci_device_id azx_ids[] = {
 	/* AMD, X370 & co */
 	{ PCI_DEVICE(0x1022, 0x1457),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+	/* AMD, X570 & co */
+	{ PCI_DEVICE(0x1022, 0x1487),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
 	/* AMD Stoney */
 	{ PCI_DEVICE(0x1022, 0x157a),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
 			 AZX_DCAPS_PM_RUNTIME },
 	/* AMD Raven */
 	{ PCI_DEVICE(0x1022, 0x15e3),
-	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
-			 AZX_DCAPS_PM_RUNTIME },
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
 	/* ATI HDMI */
 	{ PCI_DEVICE(0x1002, 0x0002),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index fd476fb..677dcc0 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -370,6 +370,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+	SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
 	SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index b70fbfa..ae8fde4 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
 
-	switch (codec->core.vendor_id) {
-	case 0x14f12008: /* CX8200 */
-	case 0x14f150f2: /* CX20722 */
-	case 0x14f150f4: /* CX20724 */
-		break;
-	default:
-		return;
-	}
-
 	/* Turn the problematic codec into D3 to avoid spurious noises
 	   from the internal speaker during (and after) reboot */
 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
+	snd_hda_gen_reboot_notify(codec);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
@@ -637,18 +624,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
 
 /* update LED status via GPIO */
 static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
-				bool enabled)
+				bool led_on)
 {
 	struct conexant_spec *spec = codec->spec;
 	unsigned int oldval = spec->gpio_led;
 
 	if (spec->mute_led_polarity)
-		enabled = !enabled;
+		led_on = !led_on;
 
-	if (enabled)
-		spec->gpio_led &= ~mask;
-	else
+	if (led_on)
 		spec->gpio_led |= mask;
+	else
+		spec->gpio_led &= ~mask;
+	codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
+			mask, led_on, spec->gpio_led);
 	if (spec->gpio_led != oldval)
 		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
 				    spec->gpio_led);
@@ -659,8 +648,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
 {
 	struct hda_codec *codec = private_data;
 	struct conexant_spec *spec = codec->spec;
-
-	cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+	/* muted -> LED on */
+	cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
 }
 
 /* turn on/off mic-mute LED via GPIO per capture hook */
@@ -682,7 +671,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
 		{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
 		{}
 	};
-	codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
 
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
 		spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 35931a1..c827a2a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2293,8 +2293,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
 	struct hdmi_spec *spec = codec->spec;
 	int pin_idx, pcm_idx;
 
-	if (codec_has_acomp(codec))
+	if (codec_has_acomp(codec)) {
 		snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
+		codec->relaxed_resume = 0;
+	}
 
 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2550,6 +2552,8 @@ static void register_i915_notifier(struct hda_codec *codec)
 	spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
 	snd_hdac_acomp_register_notifier(&codec->bus->core,
 					&spec->drm_audio_ops);
+	/* no need for forcible resume for jack check thanks to notifier */
+	codec->relaxed_resume = 1;
 }
 
 /* setup_stream ops override for HSW+ */
@@ -2579,6 +2583,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
 /* precondition and allocation for Intel codecs */
 static int alloc_intel_hdmi(struct hda_codec *codec)
 {
+	int err;
+
 	/* requires i915 binding */
 	if (!codec->bus->core.audio_component) {
 		codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
@@ -2587,7 +2593,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
 		return -ENODEV;
 	}
 
-	return alloc_generic_hdmi(codec);
+	err = alloc_generic_hdmi(codec);
+	if (err < 0)
+		return err;
+	/* no need to handle unsol events */
+	codec->patch_ops.unsol_event = NULL;
+	return 0;
 }
 
 /* parse and post-process for Intel codecs */
@@ -3253,6 +3264,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
 		nvhdmi_chmap_cea_alloc_validate_get_type;
 	spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
 
+	codec->link_down_at_suspend = 1;
+
 	return 0;
 }
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dc19896..dd46354 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -405,6 +405,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
 	case 0x10ec0700:
 	case 0x10ec0701:
 	case 0x10ec0703:
+	case 0x10ec0711:
 		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
 		break;
 	case 0x10ec0662:
@@ -836,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
 	if (spec->init_hook)
 		spec->init_hook(codec);
 
+	spec->gen.skip_verbs = 1; /* applied in below */
 	snd_hda_gen_init(codec);
 	alc_fix_pll(codec);
 	alc_auto_init_amp(codec, spec->init_amp);
+	snd_hda_apply_verbs(codec); /* apply verbs here after own init */
 
 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
@@ -868,15 +871,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
 		alc_shutup(codec);
 }
 
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
-}
-
 #define alc_free	snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -1064,6 +1058,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
 	SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
 	SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
 	SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
+	/* blacklist -- no beep available */
+	SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
+	SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
 	{}
 };
 
@@ -5111,7 +5108,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
 	struct alc_spec *spec = codec->spec;
 
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-		spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+		spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
 		codec->power_save_node = 0; /* avoid click noises */
 		snd_hda_apply_pincfgs(codec, pincfgs);
@@ -5680,8 +5677,11 @@ enum {
 	ALC225_FIXUP_WYSE_AUTO_MUTE,
 	ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
 	ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+	ALC256_FIXUP_ASUS_HEADSET_MIC,
 	ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
 	ALC299_FIXUP_PREDATOR_SPK,
+	ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+	ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6694,6 +6694,15 @@ static const struct hda_fixup alc269_fixups[] = {
 		.chained = true,
 		.chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
 	},
+	[ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x03a11020 }, /* headset mic with jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+	},
 	[ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -6710,6 +6719,26 @@ static const struct hda_fixup alc269_fixups[] = {
 			{ }
 		}
 	},
+	[ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x14, 0x411111f0 }, /* disable confusing internal speaker */
+			{ 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
+	[ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x04a11040 },
+			{ 0x21, 0x04211020 },
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6851,6 +6880,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6867,6 +6898,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
 	SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -6944,6 +6977,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
 	SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
 	SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+	SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -6969,6 +7003,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
 
 #if 0
 	/* Below is a quirk table taken from the old code.
@@ -7133,6 +7168,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 	{.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
 	{.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
 	{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
+	{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
 	{}
 };
 #define ALC225_STANDARD_PINS \
@@ -7728,6 +7764,7 @@ static int patch_alc269(struct hda_codec *codec)
 	case 0x10ec0700:
 	case 0x10ec0701:
 	case 0x10ec0703:
+	case 0x10ec0711:
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -8806,6 +8843,7 @@ static int patch_alc680(struct hda_codec *codec)
 static const struct hda_device_id snd_hda_id_realtek[] = {
 	HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
@@ -8858,6 +8896,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
 	HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
 	HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
 	HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index e97d12d..9ebe77c 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -46,7 +46,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
+	0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
+	8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
+);
 
 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
@@ -84,7 +87,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
 	SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
 		       4, 0, 3, 1, hpout_vol_tlv),
 	SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
-		       0, 4, 7, 0, hpmixer_gain_tlv),
+		       0, 4, 11, 0, hpmixer_gain_tlv),
 
 	SOC_ENUM("Playback Polarity", dacpol),
 	SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 60764f6..64a52d4 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -31,6 +31,13 @@
 #define SGTL5000_DAP_REG_OFFSET	0x0100
 #define SGTL5000_MAX_REG_OFFSET	0x013A
 
+/* Delay for the VAG ramp up */
+#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
+/* Delay for the VAG ramp down */
+#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
+
+#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
+
 /* default value of sgtl5000 registers */
 static const struct reg_default sgtl5000_reg_defaults[] = {
 	{ SGTL5000_CHIP_DIG_POWER,		0x0000 },
@@ -116,6 +123,13 @@ enum  {
 	I2S_LRCLK_STRENGTH_HIGH,
 };
 
+enum {
+	HP_POWER_EVENT,
+	DAC_POWER_EVENT,
+	ADC_POWER_EVENT,
+	LAST_POWER_EVENT = ADC_POWER_EVENT
+};
+
 /* sgtl5000 private structure in codec */
 struct sgtl5000_priv {
 	int sysclk;	/* sysclk rate */
@@ -129,8 +143,109 @@ struct sgtl5000_priv {
 	u8 micbias_resistor;
 	u8 micbias_voltage;
 	u8 lrclk_strength;
+	u16 mute_state[LAST_POWER_EVENT + 1];
 };
 
+static inline int hp_sel_input(struct snd_soc_component *component)
+{
+	return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
+		SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
+}
+
+static inline u16 mute_output(struct snd_soc_component *component,
+			      u16 mute_mask)
+{
+	u16 mute_reg = snd_soc_component_read32(component,
+					      SGTL5000_CHIP_ANA_CTRL);
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+			    mute_mask, mute_mask);
+	return mute_reg;
+}
+
+static inline void restore_output(struct snd_soc_component *component,
+				  u16 mute_mask, u16 mute_reg)
+{
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+		mute_mask, mute_reg);
+}
+
+static void vag_power_on(struct snd_soc_component *component, u32 source)
+{
+	if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
+	    SGTL5000_VAG_POWERUP)
+		return;
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+			    SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+
+	/* When VAG powering on to get local loop from Line-In, the sleep
+	 * is required to avoid loud pop.
+	 */
+	if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
+	    source == HP_POWER_EVENT)
+		msleep(SGTL5000_VAG_POWERUP_DELAY);
+}
+
+static int vag_power_consumers(struct snd_soc_component *component,
+			       u16 ana_pwr_reg, u32 source)
+{
+	int consumers = 0;
+
+	/* count dac/adc consumers unconditional */
+	if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
+		consumers++;
+	if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
+		consumers++;
+
+	/*
+	 * If the event comes from HP and Line-In is selected,
+	 * current action is 'DAC to be powered down'.
+	 * As HP_POWERUP is not set when HP muxed to line-in,
+	 * we need to keep VAG power ON.
+	 */
+	if (source == HP_POWER_EVENT) {
+		if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
+			consumers++;
+	} else {
+		if (ana_pwr_reg & SGTL5000_HP_POWERUP)
+			consumers++;
+	}
+
+	return consumers;
+}
+
+static void vag_power_off(struct snd_soc_component *component, u32 source)
+{
+	u16 ana_pwr = snd_soc_component_read32(component,
+					     SGTL5000_CHIP_ANA_POWER);
+
+	if (!(ana_pwr & SGTL5000_VAG_POWERUP))
+		return;
+
+	/*
+	 * This function calls when any of VAG power consumers is disappearing.
+	 * Thus, if there is more than one consumer at the moment, as minimum
+	 * one consumer will definitely stay after the end of the current
+	 * event.
+	 * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
+	 * - LINE_IN (for HP events) / HP (for DAC/ADC events)
+	 * - DAC
+	 * - ADC
+	 * (the current consumer is disappearing right now)
+	 */
+	if (vag_power_consumers(component, ana_pwr, source) >= 2)
+		return;
+
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+		SGTL5000_VAG_POWERUP, 0);
+	/* In power down case, we need wait 400-1000 ms
+	 * when VAG fully ramped down.
+	 * As longer we wait, as smaller pop we've got.
+	 */
+	msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+}
+
 /*
  * mic_bias power on/off share the same register bits with
  * output impedance of mic bias, when power on mic bias, we
@@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
-/*
- * As manual described, ADC/DAC only works when VAG powerup,
- * So enabled VAG before ADC/DAC up.
- * In power down case, we need wait 400ms when vag fully ramped down.
- */
-static int power_vag_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
+static int vag_and_mute_control(struct snd_soc_component *component,
+				 int event, int event_source)
 {
-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-	const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+	static const u16 mute_mask[] = {
+		/*
+		 * Mask for HP_POWER_EVENT.
+		 * Muxing Headphones have to be wrapped with mute/unmute
+		 * headphones only.
+		 */
+		SGTL5000_HP_MUTE,
+		/*
+		 * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
+		 * Muxing DAC or ADC block have to wrapped with mute/unmute
+		 * both headphones and line-out.
+		 */
+		SGTL5000_OUTPUTS_MUTE,
+		SGTL5000_OUTPUTS_MUTE
+	};
+
+	struct sgtl5000_priv *sgtl5000 =
+		snd_soc_component_get_drvdata(component);
 
 	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-			SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
-		msleep(400);
+	case SND_SOC_DAPM_PRE_PMU:
+		sgtl5000->mute_state[event_source] =
+			mute_output(component, mute_mask[event_source]);
 		break;
-
+	case SND_SOC_DAPM_POST_PMU:
+		vag_power_on(component, event_source);
+		restore_output(component, mute_mask[event_source],
+			       sgtl5000->mute_state[event_source]);
+		break;
 	case SND_SOC_DAPM_PRE_PMD:
-		/*
-		 * Don't clear VAG_POWERUP, when both DAC and ADC are
-		 * operational to prevent inadvertently starving the
-		 * other one of them.
-		 */
-		if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
-				mask) != mask) {
-			snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-				SGTL5000_VAG_POWERUP, 0);
-			msleep(400);
-		}
+		sgtl5000->mute_state[event_source] =
+			mute_output(component, mute_mask[event_source]);
+		vag_power_off(component, event_source);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		restore_output(component, mute_mask[event_source],
+			       sgtl5000->mute_state[event_source]);
 		break;
 	default:
 		break;
@@ -200,6 +325,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
+/*
+ * Mute Headphone when power it up/down.
+ * Control VAG power on HP power path.
+ */
+static int headphone_pga_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, HP_POWER_EVENT);
+}
+
+/* As manual describes, ADC/DAC powering up/down requires
+ * to mute outputs to avoid pops.
+ * Control VAG power on ADC/DAC power path.
+ */
+static int adc_updown_depop(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, ADC_POWER_EVENT);
+}
+
+static int dac_updown_depop(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(w->dapm);
+
+	return vag_and_mute_control(component, event, DAC_POWER_EVENT);
+}
+
 /* input sources for ADC */
 static const char *adc_mux_text[] = {
 	"MIC_IN", "LINE_IN"
@@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
 			    mic_bias_event,
 			    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-	SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+	SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+			   headphone_pga_event,
+			   SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
 	SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
@@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
 				0, SGTL5000_CHIP_DIG_POWER,
 				1, 0),
 
-	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
-	SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
-
-	SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
-	SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
+	SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
+			   adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
+	SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
+			   dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+			   SND_SOC_DAPM_PRE_POST_PMD),
 };
 
 /* routes for sgtl5000 */
@@ -1165,12 +1329,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
 					SGTL5000_INT_OSC_EN);
 		/* Enable VDDC charge pump */
 		ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
-	} else if (vddio >= 3100 && vdda >= 3100) {
+	} else {
 		ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
-		/* VDDC use VDDIO rail */
-		lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
-		lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
-			    SGTL5000_VDDC_MAN_ASSN_SHIFT;
+		/*
+		 * if vddio == vdda the source of charge pump should be
+		 * assigned manually to VDDIO
+		 */
+		if (vddio == vdda) {
+			lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+			lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+				    SGTL5000_VDDC_MAN_ASSN_SHIFT;
+		}
 	}
 
 	snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
@@ -1280,6 +1449,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
 	int ret;
 	u16 reg;
 	struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
+	unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
 
 	/* power up sgtl5000 */
 	ret = sgtl5000_set_power_regs(component);
@@ -1305,9 +1475,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
 	reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
 	snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
 
-	snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
-			SGTL5000_HP_ZCD_EN |
-			SGTL5000_ADC_ZCD_EN);
+	snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+		zcd_mask, zcd_mask);
 
 	snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
 			SGTL5000_BIAS_R_MASK,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index bf92d36..3c75dcf 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1441,7 +1441,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
 	aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
 						      GPIOD_OUT_LOW);
 	if (IS_ERR(aic31xx->gpio_reset)) {
-		dev_err(aic31xx->dev, "not able to acquire gpio\n");
+		if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
+			dev_err(aic31xx->dev, "not able to acquire gpio\n");
 		return PTR_ERR(aic31xx->gpio_reset);
 	}
 
@@ -1452,7 +1453,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
 				      ARRAY_SIZE(aic31xx->supplies),
 				      aic31xx->supplies);
 	if (ret) {
-		dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(aic31xx->dev,
+				"Failed to request supplies: %d\n", ret);
 		return ret;
 	}
 
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 160b276..6a8c279 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
 	return ret;
 }
 
+static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
+					    struct snd_pcm_hw_rule *rule)
+{
+	struct davinci_mcasp_ruledata *rd = rule->private;
+	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+	struct snd_mask nfmt;
+	int i, slot_width;
+
+	snd_mask_none(&nfmt);
+	slot_width = rd->mcasp->slot_width;
+
+	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+		if (snd_mask_test(fmt, i)) {
+			if (snd_pcm_format_width(i) <= slot_width) {
+				snd_mask_set(&nfmt, i);
+			}
+		}
+	}
+
+	return snd_mask_refine(fmt, &nfmt);
+}
+
 static const unsigned int davinci_mcasp_dai_rates[] = {
 	8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
 	88200, 96000, 176400, 192000,
@@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 	struct davinci_mcasp_ruledata *ruledata =
 					&mcasp->ruledata[substream->stream];
 	u32 max_channels = 0;
-	int i, dir;
+	int i, dir, ret;
 	int tdm_slots = mcasp->tdm_slots;
 
 	/* Do not allow more then one stream per direction */
@@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 			max_channels++;
 	}
 	ruledata->serializers = max_channels;
+	ruledata->mcasp = mcasp;
 	max_channels *= tdm_slots;
 	/*
 	 * If the already active stream has less channels than the calculated
@@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 				   0, SNDRV_PCM_HW_PARAM_CHANNELS,
 				   &mcasp->chconstr[substream->stream]);
 
-	if (mcasp->slot_width)
-		snd_pcm_hw_constraint_minmax(substream->runtime,
-					     SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-					     8, mcasp->slot_width);
+	if (mcasp->slot_width) {
+		/* Only allow formats require <= slot_width bits on the bus */
+		ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+					  SNDRV_PCM_HW_PARAM_FORMAT,
+					  davinci_mcasp_hw_rule_slot_width,
+					  ruledata,
+					  SNDRV_PCM_HW_PARAM_FORMAT, -1);
+		if (ret)
+			return ret;
+	}
 
 	/*
 	 * If we rely on implicit BCLK divider setting we should
 	 * set constraints based on what we can provide.
 	 */
 	if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
-		int ret;
-
-		ruledata->mcasp = mcasp;
-
 		ret = snd_pcm_hw_rule_add(substream->runtime, 0,
 					  SNDRV_PCM_HW_PARAM_RATE,
 					  davinci_mcasp_hw_rule_rate,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0a64822..d83be26 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
 	u32 wl = SSI_SxCCR_WL(sample_size);
 	int ret;
 
-	/*
-	 * SSI is properly configured if it is enabled and running in
-	 * the synchronous mode; Note that AC97 mode is an exception
-	 * that should set separate configurations for STCCR and SRCCR
-	 * despite running in the synchronous mode.
-	 */
-	if (ssi->streams && ssi->synchronous)
-		return 0;
-
 	if (fsl_ssi_is_i2s_master(ssi)) {
 		ret = fsl_ssi_set_bclk(substream, dai, hw_params);
 		if (ret)
@@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
 		}
 	}
 
+	/*
+	 * SSI is properly configured if it is enabled and running in
+	 * the synchronous mode; Note that AC97 mode is an exception
+	 * that should set separate configurations for STCCR and SRCCR
+	 * despite running in the synchronous mode.
+	 */
+	if (ssi->streams && ssi->synchronous)
+		return 0;
+
 	if (!fsl_ssi_is_ac97(ssi)) {
 		/*
 		 * Keep the ssi->i2s_net intact while having a local variable
@@ -1439,8 +1439,10 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi)
 	 * different name to register the device.
 	 */
 	if (!ssi->card_name[0] && of_get_property(np, "codec-handle", NULL)) {
-		sprop = of_get_property(of_find_node_by_path("/"),
-					"compatible", NULL);
+		struct device_node *root = of_find_node_by_path("/");
+
+		sprop = of_get_property(root, "compatible", NULL);
+		of_node_put(root);
 		/* Strip "fsl," in the compatible name if applicable */
 		p = strrchr(sprop, ',');
 		if (p)
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 08a5152..e762001 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -42,6 +42,7 @@ struct cht_mc_private {
 	struct clk *mclk;
 	struct snd_soc_jack jack;
 	bool ts3a227e_present;
+	int quirks;
 };
 
 static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -53,6 +54,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
 	struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
 	int ret;
 
+	/* See the comment in snd_cht_mc_probe() */
+	if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+		return 0;
+
 	codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
 	if (!codec_dai) {
 		dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -222,6 +227,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
 			"jack detection gpios not added, error %d\n", ret);
 	}
 
+	/* See the comment in snd_cht_mc_probe() */
+	if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+		return 0;
+
 	/*
 	 * The firmware might enable the clock at
 	 * boot (this information may or may not
@@ -420,16 +429,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 	int ret_val = 0;
 	struct cht_mc_private *drv;
 	const char *mclk_name;
-	int quirks = 0;
-
-	dmi_id = dmi_first_match(cht_max98090_quirk_table);
-	if (dmi_id)
-		quirks = (unsigned long)dmi_id->driver_data;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
 		return -ENOMEM;
 
+	dmi_id = dmi_first_match(cht_max98090_quirk_table);
+	if (dmi_id)
+		drv->quirks = (unsigned long)dmi_id->driver_data;
+
 	drv->ts3a227e_present = acpi_dev_found("104C227E");
 	if (!drv->ts3a227e_present) {
 		/* no need probe TI jack detection chip */
@@ -446,7 +454,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 	snd_soc_card_cht.dev = &pdev->dev;
 	snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-	if (quirks & QUIRK_PMC_PLT_CLK_0)
+	if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
 		mclk_name = "pmc_plt_clk_0";
 	else
 		mclk_name = "pmc_plt_clk_3";
@@ -459,6 +467,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 		return PTR_ERR(drv->mclk);
 	}
 
+	/*
+	 * Boards which have the MAX98090's clk connected to clk_0 do not seem
+	 * to like it if we muck with the clock. If we disable the clock when
+	 * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+	 * and the PLL never seems to lock again.
+	 * So for these boards we enable it here once and leave it at that.
+	 */
+	if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+		ret_val = clk_prepare_enable(drv->mclk);
+		if (ret_val < 0) {
+			dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+			return ret_val;
+		}
+	}
+
 	ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
 	if (ret_val) {
 		dev_err(&pdev->dev,
@@ -469,11 +492,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 	return ret_val;
 }
 
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+		clk_disable_unprepare(ctx->mclk);
+
+	return 0;
+}
+
 static struct platform_driver snd_cht_mc_driver = {
 	.driver = {
 		.name = "cht-bsw-max98090",
 	},
 	.probe = snd_cht_mc_probe,
+	.remove = snd_cht_mc_remove,
 };
 
 module_platform_driver(snd_cht_mc_driver)
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index dcff138..771734fd 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -231,6 +231,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
 
 	if (ipc->ops.reply_msg_match != NULL)
 		header = ipc->ops.reply_msg_match(header, &mask);
+	else
+		mask = (u64)-1;
 
 	if (list_empty(&ipc->rx_list)) {
 		dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
index 5d7ac2e..faf1cba 100644
--- a/sound/soc/intel/skylake/skl-debug.c
+++ b/sound/soc/intel/skylake/skl-debug.c
@@ -196,7 +196,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
 	memset(d->fw_read_buff, 0, FW_REG_BUF);
 
 	if (w0_stat_sz > 0)
-		__iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+		__ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
 
 	for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
 		ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 01a050c..3cef2ebf 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -231,7 +231,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
 	struct hdac_bus *bus = skl_to_bus(skl);
 	struct device *dev = bus->dev;
 
-	dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
+	dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
 		nhlt->header.oem_id, nhlt->header.oem_table_id,
 		nhlt->header.oem_revision);
 
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 60d43d5..11399f8 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
 		val |= I2S_CHN_4;
 		break;
 	case 2:
-	case 1:
 		val |= I2S_CHN_2;
 		break;
 	default:
@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
 	},
 	.capture = {
 		.stream_name = "Capture",
-		.channels_min = 1,
+		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_192000,
 		.formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
 	}
 
 	if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
-		if (val >= 1 && val <= 8)
+		if (val >= 2 && val <= 8)
 			soc_dai->capture.channels_max = val;
 	}
 
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 051f964..549a137 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -30,6 +30,7 @@ struct rsnd_adg {
 	struct clk *clkout[CLKOUTMAX];
 	struct clk_onecell_data onecell;
 	struct rsnd_mod mod;
+	int clk_rate[CLKMAX];
 	u32 flags;
 	u32 ckr;
 	u32 rbga;
@@ -113,9 +114,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
 	unsigned int val, en;
 	unsigned int min, diff;
 	unsigned int sel_rate[] = {
-		clk_get_rate(adg->clk[CLKA]),	/* 0000: CLKA */
-		clk_get_rate(adg->clk[CLKB]),	/* 0001: CLKB */
-		clk_get_rate(adg->clk[CLKC]),	/* 0010: CLKC */
+		adg->clk_rate[CLKA],	/* 0000: CLKA */
+		adg->clk_rate[CLKB],	/* 0001: CLKB */
+		adg->clk_rate[CLKC],	/* 0010: CLKC */
 		adg->rbga_rate_for_441khz,	/* 0011: RBGA */
 		adg->rbgb_rate_for_48khz,	/* 0100: RBGB */
 	};
@@ -331,7 +332,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
 	 * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
 	 */
 	for_each_rsnd_clk(clk, adg, i) {
-		if (rate == clk_get_rate(clk))
+		if (rate == adg->clk_rate[i])
 			return sel_table[i];
 	}
 
@@ -398,10 +399,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
 
 	for_each_rsnd_clk(clk, adg, i) {
 		ret = 0;
-		if (enable)
+		if (enable) {
 			ret = clk_prepare_enable(clk);
-		else
+
+			/*
+			 * We shouldn't use clk_get_rate() under
+			 * atomic context. Let's keep it when
+			 * rsnd_adg_clk_enable() was called
+			 */
+			adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+		} else {
 			clk_disable_unprepare(clk);
+		}
 
 		if (ret < 0)
 			dev_warn(dev, "can't use clk %d\n", i);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index d23c2bb..15a3182 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -674,6 +674,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	}
 
 	/* set format */
+	rdai->bit_clk_inv = 0;
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
 		rdai->sys_delay = 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 91cc574..b9ad15f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1147,8 +1147,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
 		list_add_tail(&widget->work_list, list);
 
 	if (custom_stop_condition && custom_stop_condition(widget, dir)) {
-		widget->endpoints[dir] = 1;
-		return widget->endpoints[dir];
+		list = NULL;
+		custom_stop_condition = NULL;
 	}
 
 	if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1185,8 +1185,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
  *
  * Optionally, can be supplied with a function acting as a stopping condition.
  * This function takes the dapm widget currently being examined and the walk
- * direction as an arguments, it should return true if the walk should be
- * stopped and false otherwise.
+ * direction as an arguments, it should return true if widgets from that point
+ * in the graph onwards should not be added to the widget list.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
 	struct list_head *list,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 30e791a..232df04 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -313,6 +313,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
 
 		if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
 			pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
+
+		if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
+			strncpy(rtd->pcm->streams[i].pcm->name,
+				rtd->pcm->streams[i].pcm->id,
+				sizeof(rtd->pcm->streams[i].pcm->name));
+		}
 	}
 
 	return 0;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index fa6e582..dc0c00a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2669,8 +2669,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 
 		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
 			continue;
 
 		dev_dbg(be->dev, "ASoC: prepare BE %s\n",
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 6173dd86..18cf840 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -223,10 +223,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
 };
 
 static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
-				  unsigned int oversample_rate,
+				  unsigned long parent_rate,
+				  unsigned int sampling_rate,
 				  unsigned int word_size)
 {
-	int div = oversample_rate / word_size / 2;
+	int div = parent_rate / sampling_rate / word_size / 2;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
@@ -316,8 +317,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
 		return -EINVAL;
 	}
 
-	bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
-					  word_size);
+	bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
+					  rate, word_size);
 	if (bclk_div < 0) {
 		dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
 		return -EINVAL;
diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
index ee90e6c..2ae582a 100644
--- a/sound/soc/uniphier/aio-cpu.c
+++ b/sound/soc/uniphier/aio-cpu.c
@@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
 {
 	struct uniphier_aio *aio = uniphier_priv(dai);
 
-	reset_control_assert(aio->chip->rst);
-	clk_disable_unprepare(aio->chip->clk);
+	aio->chip->num_wup_aios--;
+	if (!aio->chip->num_wup_aios) {
+		reset_control_assert(aio->chip->rst);
+		clk_disable_unprepare(aio->chip->clk);
+	}
 
 	return 0;
 }
@@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
 	if (!aio->chip->active)
 		return 0;
 
-	ret = clk_prepare_enable(aio->chip->clk);
-	if (ret)
-		return ret;
+	if (!aio->chip->num_wup_aios) {
+		ret = clk_prepare_enable(aio->chip->clk);
+		if (ret)
+			return ret;
 
-	ret = reset_control_deassert(aio->chip->rst);
-	if (ret)
-		goto err_out_clock;
+		ret = reset_control_deassert(aio->chip->rst);
+		if (ret)
+			goto err_out_clock;
+	}
 
 	aio_iecout_set_enable(aio->chip, true);
 	aio_chip_init(aio->chip);
@@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
 
 		ret = aio_init(sub);
 		if (ret)
-			goto err_out_clock;
+			goto err_out_reset;
 
 		if (!sub->setting)
 			continue;
@@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
 		aio_port_reset(sub);
 		aio_src_reset(sub);
 	}
+	aio->chip->num_wup_aios++;
 
 	return 0;
 
+err_out_reset:
+	if (!aio->chip->num_wup_aios)
+		reset_control_assert(aio->chip->rst);
 err_out_clock:
-	clk_disable_unprepare(aio->chip->clk);
+	if (!aio->chip->num_wup_aios)
+		clk_disable_unprepare(aio->chip->clk);
 
 	return ret;
 }
@@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
 		return PTR_ERR(chip->rst);
 
 	chip->num_aios = chip->chip_spec->num_dais;
+	chip->num_wup_aios = chip->num_aios;
 	chip->aios = devm_kcalloc(dev,
 				  chip->num_aios, sizeof(struct uniphier_aio),
 				  GFP_KERNEL);
diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
index ca6ccba..a7ff7e5 100644
--- a/sound/soc/uniphier/aio.h
+++ b/sound/soc/uniphier/aio.h
@@ -285,6 +285,7 @@ struct uniphier_aio_chip {
 
 	struct uniphier_aio *aios;
 	int num_aios;
+	int num_wup_aios;
 	struct uniphier_aio_pll *plls;
 	int num_plls;
 
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 78c2d6c..5315642 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -554,6 +554,15 @@ int line6_init_pcm(struct usb_line6 *line6,
 	line6pcm->volume_monitor = 255;
 	line6pcm->line6 = line6;
 
+	spin_lock_init(&line6pcm->out.lock);
+	spin_lock_init(&line6pcm->in.lock);
+	line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+
+	line6->line6pcm = line6pcm;
+
+	pcm->private_data = line6pcm;
+	pcm->private_free = line6_cleanup_pcm;
+
 	line6pcm->max_packet_size_in =
 		usb_maxpacket(line6->usbdev,
 			usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -566,15 +575,6 @@ int line6_init_pcm(struct usb_line6 *line6,
 		return -EINVAL;
 	}
 
-	spin_lock_init(&line6pcm->out.lock);
-	spin_lock_init(&line6pcm->in.lock);
-	line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
-
-	line6->line6pcm = line6pcm;
-
-	pcm->private_data = line6pcm;
-	pcm->private_free = line6_cleanup_pcm;
-
 	err = line6_create_audio_out_urbs(line6pcm);
 	if (err < 0)
 		return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e63a7d3..0b902bd 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -83,6 +83,7 @@ struct mixer_build {
 	unsigned char *buffer;
 	unsigned int buflen;
 	DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+	DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
 	struct usb_audio_term oterm;
 	const struct usbmix_name_map *map;
 	const struct usbmix_selector_map *selector_map;
@@ -753,12 +754,13 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
 				       struct uac_mixer_unit_descriptor *desc)
 {
 	int mu_channels;
-	void *c;
 
 	if (desc->bLength < sizeof(*desc))
 		return -EINVAL;
 	if (!desc->bNrInPins)
 		return -EINVAL;
+	if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+		return -EINVAL;
 
 	switch (state->mixer->protocol) {
 	case UAC_VERSION_1:
@@ -774,13 +776,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
 		break;
 	}
 
-	if (!mu_channels)
-		return 0;
-
-	c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
-	if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
-		return 0; /* no bmControls -> skip */
-
 	return mu_channels;
 }
 
@@ -788,16 +783,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
 			    struct usb_audio_term *term)
 {
 	int protocol = state->mixer->protocol;
 	int err;
 	void *p1;
+	unsigned char *hdr;
 
 	memset(term, 0, sizeof(*term));
-	while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-		unsigned char *hdr = p1;
+	for (;;) {
+		/* a loop in the terminal chain? */
+		if (test_and_set_bit(id, state->termbitmap))
+			return -EINVAL;
+
+		p1 = find_audio_control_unit(state, id);
+		if (!p1)
+			break;
+
+		hdr = p1;
 		term->id = id;
 
 		if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -815,7 +819,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 					/* call recursively to verify that the
 					 * referenced clock entity is valid */
-					err = check_input_term(state, d->bCSourceID, term);
+					err = __check_input_term(state, d->bCSourceID, term);
 					if (err < 0)
 						return err;
 
@@ -849,7 +853,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC2_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -912,7 +916,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 				/* call recursively to verify that the
 				 * referenced clock entity is valid */
-				err = check_input_term(state, d->bCSourceID, term);
+				err = __check_input_term(state, d->bCSourceID, term);
 				if (err < 0)
 					return err;
 
@@ -963,7 +967,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC3_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -979,7 +983,7 @@ static int check_input_term(struct mixer_build *state, int id,
 					return -EINVAL;
 
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 
@@ -997,6 +1001,15 @@ static int check_input_term(struct mixer_build *state, int id,
 	return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+			    struct usb_audio_term *term)
+{
+	memset(term, 0, sizeof(*term));
+	memset(state->termbitmap, 0, sizeof(state->termbitmap));
+	return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */
@@ -2009,6 +2022,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
  * Mixer Unit
  */
 
+/* check whether the given in/out overflows bmMixerControls matrix */
+static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
+				  int protocol, int num_ins, int num_outs)
+{
+	u8 *hdr = (u8 *)desc;
+	u8 *c = uac_mixer_unit_bmControls(desc, protocol);
+	size_t rest; /* remaining bytes after bmMixerControls */
+
+	switch (protocol) {
+	case UAC_VERSION_1:
+	default:
+		rest = 1; /* iMixer */
+		break;
+	case UAC_VERSION_2:
+		rest = 2; /* bmControls + iMixer */
+		break;
+	case UAC_VERSION_3:
+		rest = 6; /* bmControls + wMixerDescrStr */
+		break;
+	}
+
+	/* overflow? */
+	return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
+}
+
 /*
  * build a mixer unit control
  *
@@ -2137,6 +2175,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
 		if (err < 0)
 			return err;
 		num_ins += iterm.channels;
+		if (mixer_bitmap_overflow(desc, state->mixer->protocol,
+					  num_ins, num_outs))
+			break;
 		for (; ich < num_ins; ich++) {
 			int och, ich_has_controls = 0;
 
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 5b342fe..10c6971 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1167,17 +1167,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
 {
 	struct usb_mixer_interface *mixer;
 	struct usb_mixer_elem_info *cval;
-	int unitid = 12; /* SamleRate ExtensionUnit ID */
+	int unitid = 12; /* SampleRate ExtensionUnit ID */
 
 	list_for_each_entry(mixer, &chip->mixer_list, list) {
-		cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
-		if (cval) {
+		if (mixer->id_elems[unitid]) {
+			cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
 			snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
 						    cval->control << 8,
 						    samplerate_id);
 			snd_usb_mixer_notify_id(mixer, unitid);
+			break;
 		}
-		break;
 	}
 }
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a893f5a..5e67b99 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -39,6 +39,8 @@
 #define SUBSTREAM_FLAG_DATA_EP_STARTED	0
 #define SUBSTREAM_FLAG_SYNC_EP_STARTED	1
 
+#define MAX_SETALT_TIMEOUT_MS 1000
+
 /* return the estimated delay based on USB frame counters */
 snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
 				    unsigned int rate)
@@ -413,10 +415,14 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 		ep = 0x81;
 		ifnum = 2;
 		goto add_sync_ep_from_ifnum;
+	case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
 	case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
 		ep = 0x81;
 		ifnum = 1;
 		goto add_sync_ep_from_ifnum;
+	case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+		/* BOSS Katana amplifiers do not need quirks */
+		return 0;
 	}
 
 	if (attr == USB_ENDPOINT_SYNC_ASYNC &&
@@ -526,6 +532,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
 	}
 	ep = get_endpoint(alts, 1)->bEndpointAddress;
 	if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+	    get_endpoint(alts, 0)->bSynchAddress != 0 &&
 	    ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
 	     (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
 		dev_err(&dev->dev,
@@ -581,7 +588,8 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
 	/* close the old interface */
 	if (subs->interface >= 0 && subs->interface != fmt->iface) {
 		if (!subs->stream->chip->keep_iface) {
-			err = usb_set_interface(subs->dev, subs->interface, 0);
+			err = usb_set_interface_timeout(subs->dev,
+				subs->interface, 0, MAX_SETALT_TIMEOUT_MS);
 			if (err < 0) {
 				dev_err(&dev->dev,
 					"%d:%d: return to setting 0 failed (%d)\n",
@@ -599,7 +607,8 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
 		if (err < 0)
 			return -EIO;
 
-		err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+		err = usb_set_interface_timeout(dev, fmt->iface,
+				fmt->altsetting, MAX_SETALT_TIMEOUT_MS);
 		if (err < 0) {
 			dev_err(&dev->dev,
 				"%d:%d: usb_set_interface failed (%d)\n",
@@ -645,7 +654,8 @@ int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
 
 	if (!enable) {
 		if (subs->interface >= 0) {
-			usb_set_interface(subs->dev, subs->interface, 0);
+			usb_set_interface_timeout(subs->dev, subs->interface, 0,
+				MAX_SETALT_TIMEOUT_MS);
 			subs->altset_idx = 0;
 			subs->interface = -1;
 			subs->cur_audiofmt = NULL;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d71e019..60d0009 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1449,6 +1449,8 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 	case 0x152a:  /* Thesycon devices */
 	case 0x25ce:  /* Mytek devices */
 	case 0x2ab6:  /* T+A devices */
+	case 0x3842:  /* EVGA */
+	case 0xc502:  /* HiBy devices */
 		if (fp->dsd_raw)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index fcaf006..be7aebf 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -238,7 +238,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
 
 	fd = get_fd_by_id(id);
 	if (fd < 0) {
-		p_err("can't get prog by id (%u): %s", id, strerror(errno));
+		p_err("can't open object by id (%u): %s", id, strerror(errno));
 		return -1;
 	}
 
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index bbba0d6..4f9611a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -381,7 +381,9 @@ static int do_show(int argc, char **argv)
 		if (fd < 0)
 			return -1;
 
-		return show_prog(fd);
+		err = show_prog(fd);
+		close(fd);
+		return err;
 	}
 
 	if (argc)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d7e06fe..ef8a82f 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
 	int sn_offset = 0;
 	int error = 0;
 	char *buffer;
-	struct hv_kvp_ipaddr_value *ip_buffer;
+	struct hv_kvp_ipaddr_value *ip_buffer = NULL;
 	char cidr_mask[5]; /* /xyz */
 	int weight;
 	int i;
@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
 			daemonize = 0;
 			break;
 		case 'h':
+			print_usage(argv);
+			exit(0);
 		default:
 			print_usage(argv);
 			exit(EXIT_FAILURE);
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index b1330017..c2bb8a3 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -229,6 +229,8 @@ int main(int argc, char *argv[])
 			daemonize = 0;
 			break;
 		case 'h':
+			print_usage(argv);
+			exit(0);
 		default:
 			print_usage(argv);
 			exit(EXIT_FAILURE);
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
index 55e7374..099f2c4 100644
--- a/tools/hv/lsvmbus
+++ b/tools/hv/lsvmbus
@@ -4,10 +4,10 @@
 import os
 from optparse import OptionParser
 
+help_msg = "print verbose messages. Try -vv, -vvv for  more verbose messages"
 parser = OptionParser()
-parser.add_option("-v", "--verbose", dest="verbose",
-		   help="print verbose messages. Try -vv, -vvv for \
-			more verbose messages", action="count")
+parser.add_option(
+	"-v", "--verbose", dest="verbose", help=help_msg, action="count")
 
 (options, args) = parser.parse_args()
 
@@ -21,27 +21,28 @@
 	exit(-1)
 
 vmbus_dev_dict = {
-	'{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]',
-	'{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]',
-	'{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]',
-	'{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]',
-	'{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]',
-	'{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]',
-	'{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]',
-	'{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse',
-	'{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard',
-	'{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter',
-	'{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter',
-	'{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller',
-	'{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller',
-	'{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter',
-	'{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter',
-	'{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through',
-	'{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]',
-	'{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]',
-	'{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]',
+	'{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
+	'{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
+	'{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
+	'{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
+	'{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
+	'{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
+	'{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
+	'{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
+	'{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
+	'{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
+	'{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
+	'{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
+	'{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
+	'{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
+	'{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
+	'{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
+	'{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
+	'{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
+	'{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
 }
 
+
 def get_vmbus_dev_attr(dev_name, attr):
 	try:
 		f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
@@ -52,6 +53,7 @@
 
 	return lines
 
+
 class VMBus_Dev:
 	pass
 
@@ -66,12 +68,13 @@
 
 	chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
 	chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
-	chn_vp_mapping = sorted(chn_vp_mapping,
-		key = lambda c : int(c.split(':')[0]))
+	chn_vp_mapping = sorted(
+		chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
 
-	chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' %
-				(c.split(':')[0], c.split(':')[1])
-					for c in chn_vp_mapping]
+	chn_vp_mapping = [
+		'\tRel_ID=%s, target_cpu=%s' %
+		(c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
+	]
 	d = VMBus_Dev()
 	d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
 	d.vmbus_id = vmbus_id
@@ -85,7 +88,7 @@
 	vmbus_dev_list.append(d)
 
 
-vmbus_dev_list  = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id))
+vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
 
 format0 = '%2s: %s'
 format1 = '%2s: Class_ID = %s - %s\n%s'
@@ -95,9 +98,15 @@
 	if verbose == 0:
 		print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
 	elif verbose == 1:
-		print (('VMBUS ID ' + format1) %	\
-			(d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping))
+		print(
+			('VMBUS ID ' + format1) %
+			(d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
+		)
 	else:
-		print (('VMBUS ID ' + format2) % \
-			(d.vmbus_id, d.class_id, d.dev_desc, \
-			d.device_id, d.sysfs_path, d.chn_vp_mapping))
+		print(
+			('VMBUS ID ' + format2) %
+			(
+				d.vmbus_id, d.class_id, d.dev_desc,
+				d.device_id, d.sysfs_path, d.chn_vp_mapping
+			)
+		)
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 57aaeaf8..edba4d9 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -1,22 +1,22 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #if defined(__i386__) || defined(__x86_64__)
-#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
 #elif defined(__aarch64__)
-#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
 #elif defined(__powerpc__)
-#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
 #elif defined(__s390__)
-#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
 #elif defined(__sparc__)
-#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
 #elif defined(__mips__)
-#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
 #elif defined(__ia64__)
-#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
 #elif defined(__riscv)
-#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
 #elif defined(__alpha__)
-#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
 #else
 #include <asm-generic/bitsperlong.h>
 #endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index dd0b68d..482025b 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -75,6 +75,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
 	return syscall(__NR_bpf, cmd, attr, size);
 }
 
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+{
+	int fd;
+
+	do {
+		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+	} while (fd < 0 && errno == EAGAIN);
+
+	return fd;
+}
+
 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
 {
 	__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -218,7 +229,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
 	memcpy(attr.prog_name, load_attr->name,
 	       min(name_len, BPF_OBJ_NAME_LEN - 1));
 
-	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+	fd = sys_bpf_prog_load(&attr, sizeof(attr));
 	if (fd >= 0 || !log_buf || !log_buf_sz)
 		return fd;
 
@@ -227,7 +238,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
 	attr.log_size = log_buf_sz;
 	attr.log_level = 1;
 	log_buf[0] = 0;
-	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+	return sys_bpf_prog_load(&attr, sizeof(attr));
 }
 
 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@@ -268,7 +279,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
 	attr.kern_version = kern_version;
 	attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
 
-	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+	return sys_bpf_prog_load(&attr, sizeof(attr));
 }
 
 int bpf_map_update_elem(int fd, const void *key, const void *value,
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 0b4e833..bca0c9e 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -55,15 +55,15 @@
 
 # Set plugin_dir to preffered global plugin location
 # If we install under $HOME directory we go under
-# $(HOME)/.traceevent/plugins
+# $(HOME)/.local/lib/traceevent/plugins
 #
 # We dont set PLUGIN_DIR in case we install under $HOME
 # directory, because by default the code looks under:
-# $(HOME)/.traceevent/plugins by default.
+# $(HOME)/.local/lib/traceevent/plugins by default.
 #
 ifeq ($(plugin_dir),)
 ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.traceevent/plugins
+override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
 set_plugin_dir := 0
 else
 override plugin_dir = $(libdir)/traceevent/plugins
@@ -259,8 +259,8 @@
 
 define do_generate_dynamic_list_file
 	symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
-	xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
-	if [ "$$symbol_type" = "U W w" ];then				\
+	xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+	if [ "$$symbol_type" = "U W" ];then				\
 		(echo '{';						\
 		$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
 		echo '};';						\
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 6ccfd13..382e476 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -254,10 +254,10 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
 		errno = ENOMEM;
 		return -1;
 	}
+	pevent->cmdlines = cmdlines;
 
 	cmdlines[pevent->cmdline_count].comm = strdup(comm);
 	if (!cmdlines[pevent->cmdline_count].comm) {
-		free(cmdlines);
 		errno = ENOMEM;
 		return -1;
 	}
@@ -268,7 +268,6 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
 		pevent->cmdline_count++;
 
 	qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-	pevent->cmdlines = cmdlines;
 
 	return 0;
 }
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index f17e250..52874eb 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -16,7 +16,7 @@
 #include "event-parse.h"
 #include "event-utils.h"
 
-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
+#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
 
 static struct registered_plugin_options {
 	struct registered_plugin_options	*next;
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 8815823..20f67fc 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -35,7 +35,7 @@
 	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
 	    -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+CFLAGS   := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
 LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 849b3be..510caed 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -837,7 +837,7 @@
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
   else
     ifneq (,$(wildcard /usr/sbin/alternatives))
-      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
     endif
   endif
   ifndef JDIR
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 05920e3e..4735797 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,11 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <errno.h>
+#include "../../util/debug.h"
 #ifndef REMOTE_UNWIND_LIBUNWIND
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
-#include "../../util/debug.h"
 #endif
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index fa56fde..91c0a44 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
 
 	/* Allocate and initialize all memory on CPU#0: */
 	if (init_cpu0) {
-		orig_mask = bind_to_node(0);
-		bind_to_memnode(0);
+		int node = numa_node_of_cpu(0);
+
+		orig_mask = bind_to_node(node);
+		bind_to_memnode(node);
 	}
 
 	bytes = bytes0 + HPSIZE;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index f42f228..1379551 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
 	int last_cpu;
 
 	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
-	mask_size = (last_cpu + 3) / 4 + 1;
+	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
 	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
 
 	cpumask = malloc(mask_size);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7899625..6aae10f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -3090,8 +3090,11 @@ int cmd_stat(int argc, const char **argv)
 			fprintf(output, "[ perf stat: executing run #%d ... ]\n",
 				run_idx + 1);
 
+		if (run_idx != 0)
+			perf_evlist__reset_prev_raw_counts(evsel_list);
+
 		status = run_perf_stat(argc, argv, run_idx);
-		if (forever && status != -1) {
+		if (forever && status != -1 && !interval) {
 			print_counters(NULL, argc, argv);
 			perf_stat__reset_stats();
 		}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a11cb00..80f8ae8 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -439,6 +439,9 @@ int main(int argc, const char **argv)
 
 	srandom(time(NULL));
 
+	/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+	config_exclusive_filename = getenv("PERF_CONFIG");
+
 	err = perf_config(perf_default_config, NULL);
 	if (err)
 		return err;
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 68c92bb..6b36b71 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -450,6 +450,7 @@ static struct fixed {
 	{ "inst_retired.any_p", "event=0xc0" },
 	{ "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
 	{ "cpu_clk_unhalted.thread", "event=0x3c" },
+	{ "cpu_clk_unhalted.core", "event=0x3c" },
 	{ "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
 	{ NULL, NULL},
 };
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 4ce276e..fe223fc 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -29,6 +29,10 @@
 	exit $err
 fi
 
+# Do not use whatever ~/.perfconfig file, it may change the output
+# via trace.{show_timestamp,show_prefix,etc}
+export PERF_CONFIG=/dev/null
+
 trace_open_vfs_getname
 err=$?
 rm -f ${file}
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 1be3b4c..82346ca 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -22,7 +22,7 @@
 static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
 {
 	static const char *ioctl_tty_cmd[] = {
-	"TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
+	[_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
 	"TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
 	"TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
 	"TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 383674f..f93846e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
 	unsigned char *bitmap;
 	int last_cpu = cpu_map__cpu(map, map->nr - 1);
 
-	bitmap = zalloc((last_cpu + 7) / 8);
+	if (buf == NULL)
+		return 0;
+
+	bitmap = zalloc(last_cpu / 8 + 1);
 	if (bitmap == NULL) {
 		buf[0] = '\0';
 		return 0;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a94bd68..3c0d74fc1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1114,7 +1114,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
 
 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
 	if (sysfs__read_str(file, &cache->map, &len)) {
-		free(cache->map);
+		free(cache->size);
 		free(cache->type);
 		return -1;
 	}
@@ -2184,8 +2184,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
 	/* On s390 the socket_id number is not related to the numbers of cpus.
 	 * The socket_id number might be higher than the numbers of cpus.
 	 * This depends on the configuration.
+	 * AArch64 is the same.
 	 */
-	if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+	if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
+			  || !strncmp(ph->env.arch, "aarch64", 7)))
 		do_core_id_test = false;
 
 	for (i = 0; i < (u32)cpu_nr; i++) {
@@ -3285,6 +3287,13 @@ int perf_session__read_header(struct perf_session *session)
 			   data->file.path);
 	}
 
+	if (f_header.attr_size == 0) {
+		pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+		       "Was the 'perf record' command properly terminated?\n",
+		       data->file.path);
+		return -EINVAL;
+	}
+
 	nr_attrs = f_header.attrs.size / f_header.attr_size;
 	lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -3365,7 +3374,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
 	size += sizeof(struct perf_event_header);
 	size += ids * sizeof(u64);
 
-	ev = malloc(size);
+	ev = zalloc(size);
 
 	if (ev == NULL)
 		return -ENOMEM;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a186300..663e790 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -394,7 +394,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
 	size_t size;
 	u16 idr_size;
 	const char *sym;
-	uint32_t count;
+	uint64_t count;
 	int ret, csize, usize;
 	pid_t pid, tid;
 	struct {
@@ -417,7 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
 		return -1;
 
 	filename = event->mmap2.filename;
-	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
 			jd->dir,
 			pid,
 			count);
@@ -530,7 +530,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
 		return -1;
 
 	filename = event->mmap2.filename;
-	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+	size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
 	         jd->dir,
 	         pid,
 		 jr->move.code_index);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 19262f9..2344d86 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -230,14 +230,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
 	const char *prefix_dir = "";
 	const char *suffix_dir = "";
 
+	/* _UTSNAME_LENGTH is 65 */
+	char release[128];
+
 	char *autoconf_path;
 
 	int err;
 
 	if (!test_dir) {
-		/* _UTSNAME_LENGTH is 65 */
-		char release[128];
-
 		err = fetch_kernel_version(NULL, release,
 					   sizeof(release));
 		if (err)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index a0061e0..6917ba8 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
 	evsel->prev_raw_counts = NULL;
 }
 
+static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
+{
+	if (evsel->prev_raw_counts) {
+		evsel->prev_raw_counts->aggr.val = 0;
+		evsel->prev_raw_counts->aggr.ena = 0;
+		evsel->prev_raw_counts->aggr.run = 0;
+       }
+}
+
 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 {
 	int ncpus = perf_evsel__nr_cpus(evsel);
@@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
 	}
 }
 
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each_entry(evlist, evsel)
+		perf_evsel__reset_prev_raw_counts(evsel);
+}
+
 static void zero_per_pkg(struct perf_evsel *counter)
 {
 	if (counter->per_pkg_mask)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 36efb98..e19abb1 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -158,6 +158,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
 
 int perf_stat_process_counter(struct perf_stat_config *config,
 			      struct perf_evsel *counter);
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
index 7ffe562..2627b03 100644
--- a/tools/perf/util/xyarray.h
+++ b/tools/perf/util/xyarray.h
@@ -2,6 +2,7 @@
 #ifndef _PERF_XYARRAY_H_
 #define _PERF_XYARRAY_H_ 1
 
+#include <linux/compiler.h>
 #include <sys/types.h>
 
 struct xyarray {
@@ -10,7 +11,7 @@ struct xyarray {
 	size_t entries;
 	size_t max_x;
 	size_t max_y;
-	char contents[];
+	char contents[] __aligned(8);
 };
 
 struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index fbb53c9..71cf7e77 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -4953,7 +4953,7 @@ int initialize_counters(int cpu_id)
 
 void allocate_output_buffer()
 {
-	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+	output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
 	outp = output_buffer;
 	if (outp == NULL)
 		err(-1, "calloc output buffer");
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 65bbe62..2aba622 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -546,7 +546,7 @@ void cmdline(int argc, char **argv)
 
 	progname = argv[0];
 
-	while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+	while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
 				long_options, &option_index)) != -1) {
 		switch (opt) {
 		case 'a':
@@ -1260,6 +1260,15 @@ void probe_dev_msr(void)
 		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
 			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
+
+static void get_cpuid_or_exit(unsigned int leaf,
+			     unsigned int *eax, unsigned int *ebx,
+			     unsigned int *ecx, unsigned int *edx)
+{
+	if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
+		errx(1, "Processor not supported\n");
+}
+
 /*
  * early_cpuid()
  * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1267,15 +1276,10 @@ void probe_dev_msr(void)
  */
 void early_cpuid(void)
 {
-	unsigned int eax, ebx, ecx, edx, max_level;
+	unsigned int eax, ebx, ecx, edx;
 	unsigned int fms, family, model;
 
-	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
-
-	if (max_level < 6)
-		errx(1, "Processor not supported\n");
-
-	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
+	get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
 	family = (fms >> 8) & 0xf;
 	model = (fms >> 4) & 0xf;
 	if (family == 6 || family == 0xf)
@@ -1289,7 +1293,7 @@ void early_cpuid(void)
 		bdx_highest_ratio = msr & 0xFF;
 	}
 
-	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+	get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
 	turbo_is_enabled = (eax >> 1) & 1;
 	has_hwp = (eax >> 7) & 1;
 	has_epb = (ecx >> 3) & 1;
@@ -1307,7 +1311,7 @@ void parse_cpuid(void)
 
 	eax = ebx = ecx = edx = 0;
 
-	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+	get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
 
 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
 		genuine_intel = 1;
@@ -1316,7 +1320,7 @@ void parse_cpuid(void)
 		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
 			(char *)&ebx, (char *)&edx, (char *)&ecx);
 
-	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
+	get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
 	family = (fms >> 8) & 0xf;
 	model = (fms >> 4) & 0xf;
 	stepping = fms & 0xf;
@@ -1341,7 +1345,7 @@ void parse_cpuid(void)
 		errx(1, "CPUID: no MSR");
 
 
-	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+	get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
 	/* turbo_is_enabled already set */
 	/* has_hwp already set */
 	has_hwp_notify = eax & (1 << 8);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 33752e0..3de57cc 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -12,6 +12,7 @@
  */
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
+#include <linux/acpi.h>
 #include <linux/list.h>
 #include <linux/uuid.h>
 #include <linux/ioport.h>
@@ -234,9 +235,6 @@ struct nd_intel_lss {
 	__u32 status;
 } __packed;
 
-union acpi_object;
-typedef void *acpi_handle;
-
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
 		 const guid_t *guid, u64 rev, u64 func,
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c
index 5aeaa28..a680628 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/sendmsg6_prog.c
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
 	}
 
 	/* Rewrite destination. */
-	if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
-	     ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
+	if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
 		ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
 		ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
 		ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index b8ebe2f..e956712 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
 #include <bpf/bpf.h>
 
 #include "cgroup_helpers.h"
+#include "bpf_endian.h"
 #include "bpf_rlimit.h"
 #include "bpf_util.h"
 
@@ -231,7 +232,8 @@ static struct sock_test tests[] = {
 			/* if (ip == expected && port == expected) */
 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 				    offsetof(struct bpf_sock, src_ip6[3])),
-			BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+				    __bpf_constant_ntohl(0x00000001), 4),
 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 				    offsetof(struct bpf_sock, src_port)),
 			BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -260,7 +262,8 @@ static struct sock_test tests[] = {
 			/* if (ip == expected && port == expected) */
 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 				    offsetof(struct bpf_sock, src_ip4)),
-			BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+				    __bpf_constant_ntohl(0x7F000001), 4),
 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 				    offsetof(struct bpf_sock, src_port)),
 			BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644
index 0000000..63ed533
--- /dev/null
+++ b/tools/testing/selftests/kvm/config
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
index a3122f1..4d35eba 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86.c
@@ -809,9 +809,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
                 r);
 
-	r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
-        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
-                r);
+	if (kvm_check_cap(KVM_CAP_XCRS)) {
+		r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
+		TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
+			    r);
+	}
 
 	r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -858,9 +860,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
 
-	r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
-        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
-                r);
+	if (kvm_check_cap(KVM_CAP_XCRS)) {
+		r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
+		TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
+			    r);
+	}
 
 	r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
index 3764e71..65db510 100644
--- a/tools/testing/selftests/kvm/platform_info_test.c
+++ b/tools/testing/selftests/kvm/platform_info_test.c
@@ -100,8 +100,8 @@ int main(int argc, char *argv[])
 	msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
 	vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
 		msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
-	test_msr_platform_info_disabled(vm);
 	test_msr_platform_info_enabled(vm);
+	test_msr_platform_info_disabled(vm);
 	vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
 
 	kvm_vm_free(vm);
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index 1ba0699..ba2d9fa 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -15,6 +15,7 @@
 SRC_IP6=2001:db8:1::3
 
 DEV_ADDR=192.51.100.1
+DEV_ADDR6=2001:db8:1::1
 DEV=dummy0
 
 log_test()
@@ -55,8 +56,8 @@
 
 	$IP link add dummy0 type dummy
 	$IP link set dev dummy0 up
-	$IP address add 192.51.100.1/24 dev dummy0
-	$IP -6 address add 2001:db8:1::1/64 dev dummy0
+	$IP address add $DEV_ADDR/24 dev dummy0
+	$IP -6 address add $DEV_ADDR6/64 dev dummy0
 
 	set +e
 }
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index cca2baa..a8d8e8b 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -93,18 +93,10 @@
 	ip route add vrf v$ol1 192.0.2.16/28 \
 	   nexthop dev g1a \
 	   nexthop dev g1b
-
-	tc qdisc add dev $ul1 clsact
-	tc filter add dev $ul1 egress pref 111 prot ipv4 \
-	   flower dst_ip 192.0.2.66 action pass
-	tc filter add dev $ul1 egress pref 222 prot ipv4 \
-	   flower dst_ip 192.0.2.82 action pass
 }
 
 sw1_destroy()
 {
-	tc qdisc del dev $ul1 clsact
-
 	ip route del vrf v$ol1 192.0.2.16/28
 
 	ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@
 	ip route add vrf v$ol2 192.0.2.0/28 \
 	   nexthop dev g2a \
 	   nexthop dev g2b
+
+	tc qdisc add dev $ul2 clsact
+	tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+	   flower vlan_id 111 action pass
+	tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+	   flower vlan_id 222 action pass
 }
 
 sw2_destroy()
 {
+	tc qdisc del dev $ul2 clsact
+
 	ip route del vrf v$ol2 192.0.2.0/28
 
 	ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@
 	sw1_create
 	sw2_create
 	h2_create
+
+	forwarding_enable
 }
 
 cleanup()
 {
 	pre_cleanup
 
+	forwarding_restore
+
 	h2_destroy
 	sw2_destroy
 	sw1_destroy
@@ -211,15 +215,15 @@
 	   nexthop dev g1a weight $weight1 \
 	   nexthop dev g1b weight $weight2
 
-	local t0_111=$(tc_rule_stats_get $ul1 111 egress)
-	local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+	local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+	local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
 
 	ip vrf exec v$h1 \
 	   $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
 	       -d 1msec -t udp "sp=1024,dp=0-32768"
 
-	local t1_111=$(tc_rule_stats_get $ul1 111 egress)
-	local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+	local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+	local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
 
 	local d111=$((t1_111 - t0_111))
 	local d222=$((t1_222 - t0_222))
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index e279051..270c17a 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -90,12 +90,9 @@ struct testcase testcases_v4[] = {
 		.tfail = true,
 	},
 	{
-		/* send a single MSS: will fail with GSO, because the segment
-		 * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-		 */
+		/* send a single MSS: will fall back to no GSO */
 		.tlen = CONST_MSS_V4,
 		.gso_len = CONST_MSS_V4,
-		.tfail = true,
 		.r_num_mss = 1,
 	},
 	{
@@ -140,10 +137,9 @@ struct testcase testcases_v4[] = {
 		.tfail = true,
 	},
 	{
-		/* send a single 1B MSS: will fail, see single MSS above */
+		/* send a single 1B MSS: will fall back to no GSO */
 		.tlen = 1,
 		.gso_len = 1,
-		.tfail = true,
 		.r_num_mss = 1,
 	},
 	{
@@ -197,12 +193,9 @@ struct testcase testcases_v6[] = {
 		.tfail = true,
 	},
 	{
-		/* send a single MSS: will fail with GSO, because the segment
-		 * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-		 */
+		/* send a single MSS: will fall back to no GSO */
 		.tlen = CONST_MSS_V6,
 		.gso_len = CONST_MSS_V6,
-		.tfail = true,
 		.r_num_mss = 1,
 	},
 	{
@@ -247,10 +240,9 @@ struct testcase testcases_v6[] = {
 		.tfail = true,
 	},
 	{
-		/* send a single 1B MSS: will fail, see single MSS above */
+		/* send a single 1B MSS: will fall back to no GSO */
 		.tlen = 1,
 		.gso_len = 1,
-		.tfail = true,
 		.r_num_mss = 1,
 	},
 	{
diff --git a/usr/Makefile b/usr/Makefile
index 748f6a6..138c18c 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -11,6 +11,9 @@
 datafile_d_y = .$(datafile_y).d
 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
 
+# clean rules do not have CONFIG_INITRAMFS_COMPRESSION.  So clean up after all
+# possible compression formats.
+clean-files += initramfs_data.cpio*
 
 # Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 02bac8a..d982650 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
 	kvm_timer_schedule(vcpu);
+	/*
+	 * If we're about to block (most likely because we've just hit a
+	 * WFI), we need to sync back the state of the GIC CPU interface
+	 * so that we have the lastest PMR and group enables. This ensures
+	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
+	 * whether we have pending interrupts.
+	 */
+	preempt_disable();
+	kvm_vgic_vmcr_sync(vcpu);
+	preempt_enable();
+
 	kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index 08443a1..3caee91 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	unsigned int len;
 	int mask;
 
+	/* Detect an already handled MMIO return */
+	if (unlikely(!vcpu->mmio_needed))
+		return 0;
+
+	vcpu->mmio_needed = 0;
+
 	if (!run->mmio.is_write) {
 		len = run->mmio.len;
 		if (len > sizeof(unsigned long))
@@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	run->mmio.is_write	= is_write;
 	run->mmio.phys_addr	= fault_ipa;
 	run->mmio.len		= len;
+	vcpu->mmio_needed	= 1;
 
 	if (!ret) {
 		/* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 8196e4f..cd75df2 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -19,6 +19,7 @@
 #include <linux/cpu.h>
 #include <linux/kvm_host.h>
 #include <kvm/arm_vgic.h>
+#include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 #include "vgic.h"
 
@@ -175,12 +176,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
 		irq->vcpu = NULL;
 		irq->target_vcpu = vcpu0;
 		kref_init(&irq->refcount);
-		if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+		switch (dist->vgic_model) {
+		case KVM_DEV_TYPE_ARM_VGIC_V2:
 			irq->targets = 0;
 			irq->group = 0;
-		} else {
+			break;
+		case KVM_DEV_TYPE_ARM_VGIC_V3:
 			irq->mpidr = 0;
 			irq->group = 1;
+			break;
+		default:
+			kfree(dist->spis);
+			return -EINVAL;
 		}
 	}
 	return 0;
@@ -220,7 +227,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
 		irq->intid = i;
 		irq->vcpu = NULL;
 		irq->target_vcpu = vcpu;
-		irq->targets = 1U << vcpu->vcpu_id;
 		kref_init(&irq->refcount);
 		if (vgic_irq_is_sgi(i)) {
 			/* SGIs */
@@ -230,11 +236,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
 			/* PPIs */
 			irq->config = VGIC_CONFIG_LEVEL;
 		}
-
-		if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
-			irq->group = 1;
-		else
-			irq->group = 0;
 	}
 
 	if (!irqchip_in_kernel(vcpu->kvm))
@@ -297,10 +298,19 @@ int vgic_init(struct kvm *kvm)
 
 		for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
 			struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
-			if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+			switch (dist->vgic_model) {
+			case KVM_DEV_TYPE_ARM_VGIC_V3:
 				irq->group = 1;
-			else
+				irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+				break;
+			case KVM_DEV_TYPE_ARM_VGIC_V2:
 				irq->group = 0;
+				irq->targets = 1U << idx;
+				break;
+			default:
+				ret = -EINVAL;
+				goto out;
+			}
 		}
 	}
 
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e..762f819 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -203,6 +203,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 	vgic_irq_set_phys_active(irq, true);
 }
 
+static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
+	return (vgic_irq_is_sgi(irq->intid) &&
+		vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
+}
+
 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 			      gpa_t addr, unsigned int len,
 			      unsigned long val)
@@ -215,6 +221,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 	for_each_set_bit(i, &val, len * 8) {
 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+		/* GICD_ISPENDR0 SGI bits are WI */
+		if (is_vgic_v2_sgi(vcpu, irq)) {
+			vgic_put_irq(vcpu->kvm, irq);
+			continue;
+		}
+
 		spin_lock_irqsave(&irq->irq_lock, flags);
 		if (irq->hw)
 			vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -262,6 +274,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 	for_each_set_bit(i, &val, len * 8) {
 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+		/* GICD_ICPENDR0 SGI bits are WI */
+		if (is_vgic_v2_sgi(vcpu, irq)) {
+			vgic_put_irq(vcpu->kvm, irq);
+			continue;
+		}
+
 		spin_lock_irqsave(&irq->irq_lock, flags);
 
 		if (irq->hw)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892a..91b14df 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -195,7 +195,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 		if (vgic_irq_is_sgi(irq->intid)) {
 			u32 src = ffs(irq->source);
 
-			BUG_ON(!src);
+			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+					   irq->intid))
+				return;
+
 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
 			irq->source &= ~(1 << (src - 1));
 			if (irq->source) {
@@ -495,10 +498,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
 		       kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
 
-void vgic_v2_put(struct kvm_vcpu *vcpu)
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 
 	cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+	vgic_v2_vmcr_sync(vcpu);
 	cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 3f2350a..8b958ed 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -179,7 +179,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
 			u32 src = ffs(irq->source);
 
-			BUG_ON(!src);
+			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+					   irq->intid))
+				return;
+
 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
 			irq->source &= ~(1 << (src - 1));
 			if (irq->source) {
@@ -674,12 +677,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
 		__vgic_v3_activate_traps(vcpu);
 }
 
-void vgic_v3_put(struct kvm_vcpu *vcpu)
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
 	if (likely(cpu_if->vgic_sre))
 		cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+	vgic_v3_vmcr_sync(vcpu);
 
 	kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
 
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index c5165e3..4040a33 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -244,6 +244,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
 	bool penda, pendb;
 	int ret;
 
+	/*
+	 * list_sort may call this function with the same element when
+	 * the list is fairly long.
+	 */
+	if (unlikely(irqa == irqb))
+		return 0;
+
 	spin_lock(&irqa->irq_lock);
 	spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
 
@@ -902,6 +909,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
 		vgic_v3_put(vcpu);
 }
 
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+		return;
+
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_vmcr_sync(vcpu);
+	else
+		vgic_v3_vmcr_sync(vcpu);
+}
+
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 {
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index a9002471..d5e4542 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 void vgic_v2_init_lrs(void);
 void vgic_v2_load(struct kvm_vcpu *vcpu);
 void vgic_v2_put(struct kvm_vcpu *vcpu);
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
 
 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
 
 void vgic_v3_load(struct kvm_vcpu *vcpu);
 void vgic_v3_put(struct kvm_vcpu *vcpu);
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
 
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 9e65feb..b933669 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 	return 1;
 }
 
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
 {
 	struct kvm_coalesced_mmio_ring *ring;
 	unsigned avail;
@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
 	 * there is always one unused entry in the buffer
 	 */
 	ring = dev->kvm->coalesced_mmio_ring;
-	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
 	if (avail == 0) {
 		/* full */
 		return 0;
@@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
 {
 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+	__u32 insert;
 
 	if (!coalesced_mmio_in_range(dev, addr, len))
 		return -EOPNOTSUPP;
 
 	spin_lock(&dev->kvm->ring_lock);
 
-	if (!coalesced_mmio_has_room(dev)) {
+	insert = READ_ONCE(ring->last);
+	if (!coalesced_mmio_has_room(dev, insert) ||
+	    insert >= KVM_COALESCED_MMIO_MAX) {
 		spin_unlock(&dev->kvm->ring_lock);
 		return -EOPNOTSUPP;
 	}
 
 	/* copy data in first free entry of the ring */
 
-	ring->coalesced_mmio[ring->last].phys_addr = addr;
-	ring->coalesced_mmio[ring->last].len = len;
-	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+	ring->coalesced_mmio[insert].phys_addr = addr;
+	ring->coalesced_mmio[insert].len = len;
+	memcpy(ring->coalesced_mmio[insert].data, val, len);
 	smp_wmb();
-	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
 	spin_unlock(&dev->kvm->ring_lock);
 	return 0;
 }
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b20b751..757a17f 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -44,6 +44,12 @@
 
 static struct workqueue_struct *irqfd_cleanup_wq;
 
+bool __attribute__((weak))
+kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+{
+	return true;
+}
+
 static void
 irqfd_inject(struct work_struct *work)
 {
@@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 	if (!kvm_arch_intc_initialized(kvm))
 		return -EAGAIN;
 
+	if (!kvm_arch_irqfd_allowed(kvm, args))
+		return -EINVAL;
+
 	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
 	if (!irqfd)
 		return -ENOMEM;