Merge "usb: phy: qusb: Remove usage of module params"
diff --git a/Makefile b/Makefile
index 47b7619..9ed6682 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 66
+SUBLEVEL = 68
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 36efe41..9e33c41 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -125,6 +125,9 @@
 	};
 
 	mdio-bus-mux {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
 		/* BIT(9) = 1 => external mdio */
 		mdio_ext: mdio@200 {
 			reg = <0x200>;
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index cd350de..efcd400 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -37,6 +37,7 @@
 #define DEEPSLEEP_SLEEPENABLE_BIT	BIT(31)
 
 	.text
+	.arch	armv5te
 /*
  * Move DaVinci into deep sleep state
  *
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abc6b05..ee0625d 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -29,14 +29,15 @@
 DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
 
 # Add RTIC DTB to the DTB list if RTIC MPGen is enabled
+# Note, we keep this for compatibility with
+# BUILD_ARM64_APPENDED_DTB_IMAGE targets.
+# The rtic_mp.dts would be generated with the vmlinux if
+# MPGen enabled (RTIC_MPGEN defined).
 ifdef RTIC_MPGEN
 DTB_OBJS += rtic_mp.dtb
 endif
 
 rtic_mp.dtb: vmlinux FORCE
-	$(RTIC_MPGEN) --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
-	--binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
-	--cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts && \
 	$(DTC) -O dtb -o rtic_mp.dtb -b 0 $(DTC_FLAGS) rtic_mp.dts
 
 $(obj)/Image: vmlinux FORCE
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index 58f039b..d1fe28e 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -241,6 +241,7 @@
 CONFIG_DNS_RESOLVER=y
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
@@ -264,6 +265,7 @@
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -272,6 +274,7 @@
 CONFIG_DUMMY=y
 CONFIG_TUN=y
 CONFIG_RMNET=y
+CONFIG_PHYLIB=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -411,9 +414,11 @@
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_SM_GCC_BENGAL=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
@@ -422,10 +427,12 @@
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QPNP_PBS=y
 CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_SMEM=y
@@ -454,6 +461,7 @@
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
@@ -473,11 +481,13 @@
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_SLIMBUS=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -485,6 +495,8 @@
 CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_EFIVAR_FS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index e83f690..ad2767a 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -106,6 +106,7 @@
 CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
@@ -680,10 +681,10 @@
 CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 1e1049a..5e3ed0d 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -113,6 +113,7 @@
 CONFIG_CMA=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
@@ -565,6 +566,7 @@
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -766,4 +768,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index b745cb6..db1a214 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -63,7 +63,6 @@
 # CONFIG_EFI is not set
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
 # CONFIG_PM_WAKELOCKS_GC is not set
@@ -309,6 +308,7 @@
 CONFIG_PPPOL2TP=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS_GENL=y
@@ -665,4 +665,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index a4302f6..4bcc713 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -63,7 +63,6 @@
 CONFIG_RANDOMIZE_BASE=y
 CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
 CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
 # CONFIG_PM_WAKELOCKS_GC is not set
@@ -316,6 +315,7 @@
 CONFIG_PPPOL2TP=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS_GENL=y
@@ -732,4 +732,3 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index 4cb442c..80397db 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -28,42 +28,8 @@ struct dma_iommu_mapping {
 };
 
 #ifdef CONFIG_ARM64_DMA_USE_IOMMU
-
-struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
-				size_t size);
-
-void __depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
-
-int __depr_arm_iommu_attach_device(struct device *dev,
-					struct dma_iommu_mapping *mapping);
-void __depr_arm_iommu_detach_device(struct device *dev);
-
 void arm_iommu_put_dma_cookie(struct iommu_domain *domain);
 #else  /* !CONFIG_ARM64_DMA_USE_IOMMU */
-
-static inline struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
-				size_t size)
-{
-	return NULL;
-}
-
-static inline void
-__depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
-{
-}
-
-static inline int __depr_arm_iommu_attach_device(struct device *dev,
-			struct dma_iommu_mapping *mapping)
-{
-	return -ENODEV;
-}
-
-static inline void __depr_arm_iommu_detach_device(struct device *dev)
-{
-}
-
 static inline void arm_iommu_put_dma_cookie(struct iommu_domain *domain) {}
 #endif	/* CONFIG_ARM64_DMA_USE_IOMMU */
 
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..f52a296 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 	((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)		&screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+				    struct screen_info *si)
+{
+}
 
 /* redeclare as 'hidden' so the compiler will generate relative references */
 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f64d4e3..c9a1d5f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -448,8 +448,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PMD_TYPE_SECT)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud)		(0)
-#define pud_table(pud)		(1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
 #else
 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
 				 PUD_TYPE_SECT)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 57e9622..7eff8af 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 	if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-		struct plt_entry trampoline;
+		struct plt_entry trampoline, *dst;
 		struct module *mod;
 
 		/*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		 * is added in the future, but for now, the pr_err() below
 		 * deals with a theoretical issue only.
 		 */
+		dst = mod->arch.ftrace_trampoline;
 		trampoline = get_plt_entry(addr);
-		if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-				       &trampoline)) {
-			if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-					       &(struct plt_entry){})) {
+		if (!plt_entries_equal(dst, &trampoline)) {
+			if (!plt_entries_equal(dst, &(struct plt_entry){})) {
 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 				return -EINVAL;
 			}
 
 			/* point the trampoline to our ftrace entry point */
 			module_disable_ro(mod);
-			*mod->arch.ftrace_trampoline = trampoline;
+			*dst = trampoline;
 			module_enable_ro(mod, true);
 
-			/* update trampoline before patching in the branch */
-			smp_wmb();
+			/*
+			 * Ensure updated trampoline is visible to instruction
+			 * fetch before we patch in the branch.
+			 */
+			__flush_icache_range((unsigned long)&dst[0],
+					     (unsigned long)&dst[1]);
 		}
-		addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+		addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
 		return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc..0311fe5 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -11,6 +11,7 @@
 
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
 		return 0;
 	}
 }
+NOKPROBE_SYMBOL(save_return_addr);
 
 void *return_address(unsigned int level)
 {
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
 		return NULL;
 }
 EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7e..bb482ec 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
 	return 0;
 }
+NOKPROBE_SYMBOL(unwind_frame);
 
 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 		     int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 			break;
 	}
 }
+NOKPROBE_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 7a5173e..4c2e96e 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
 	switch (spsr_idx) {
 	case KVM_SPSR_SVC:
 		write_sysreg_el1(v, spsr);
+		break;
 	case KVM_SPSR_ABT:
 		write_sysreg(v, spsr_abt);
+		break;
 	case KVM_SPSR_UND:
 		write_sysreg(v, spsr_und);
+		break;
 	case KVM_SPSR_IRQ:
 		write_sysreg(v, spsr_irq);
+		break;
 	case KVM_SPSR_FIQ:
 		write_sysreg(v, spsr_fiq);
+		break;
 	}
 }
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d12667d..430ea0e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1151,179 +1151,6 @@ static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
 	set_dma_ops(dev, mapping.ops);
 }
 
-/**
- * DEPRECATED
- * arm_iommu_create_mapping
- * @bus: pointer to the bus holding the client device (for IOMMU calls)
- * @base: start address of the valid IO address space
- * @size: maximum size of the valid IO address space
- *
- * Creates a mapping structure which holds information about used/unused
- * IO address ranges, which is required to perform memory allocation and
- * mapping with IOMMU aware functions.
- *
- * Clients may use iommu_domain_set_attr() to set additional flags prior
- * to calling arm_iommu_attach_device() to complete initialization.
- */
-struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
-				size_t size)
-{
-	unsigned int bits = size >> PAGE_SHIFT;
-	struct dma_iommu_mapping *mapping;
-
-	if (!bits)
-		return ERR_PTR(-EINVAL);
-
-	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
-	if (!mapping)
-		return ERR_PTR(-ENOMEM);
-
-	mapping->base = base;
-	mapping->bits = bits;
-
-	mapping->domain = iommu_domain_alloc(bus);
-	if (!mapping->domain)
-		goto err_domain_alloc;
-
-	mapping->init = false;
-	return mapping;
-
-err_domain_alloc:
-	kfree(mapping);
-	return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(__depr_arm_iommu_create_mapping);
-
-/*
- * DEPRECATED
- * arm_iommu_release_mapping
- * @mapping: allocted via arm_iommu_create_mapping()
- *
- * Frees all resources associated with the iommu mapping.
- * The device associated with this mapping must be in the 'detached' state
- */
-void __depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
-{
-	if (!mapping)
-		return;
-
-	if (mapping->domain)
-		iommu_domain_free(mapping->domain);
-
-	kfree(mapping);
-}
-EXPORT_SYMBOL(__depr_arm_iommu_release_mapping);
-
-/**
- * DEPRECATED
- * arm_iommu_attach_device
- * @dev: valid struct device pointer
- * @mapping: io address space mapping structure (returned from
- *	arm_iommu_create_mapping)
- *
- * Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version.
- *
- * Only configures dma_ops for a single device in the iommu_group.
- */
-int __depr_arm_iommu_attach_device(struct device *dev,
-			    struct dma_iommu_mapping *mapping)
-{
-	int err;
-	struct iommu_domain *domain;
-	struct iommu_group *group;
-
-	if (!dev || !mapping) {
-		pr_err("%s: Error input is NULL\n", __func__);
-		return -EINVAL;
-	}
-
-	group = dev->iommu_group;
-	if (!group) {
-		dev_err(dev, "No iommu associated with device\n");
-		return -EINVAL;
-	}
-
-	domain = iommu_get_domain_for_dev(dev);
-	if (domain) {
-		int dynamic = 0;
-
-		iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &dynamic);
-
-		if ((domain->type == IOMMU_DOMAIN_DMA) && dynamic) {
-			dev_warn(dev, "Deprecated API %s in use! Continuing anyway\n",
-				__func__);
-		} else {
-			dev_err(dev, "Device already attached to other iommu_domain\n");
-			return -EINVAL;
-		}
-	}
-
-	err = iommu_attach_group(mapping->domain, group);
-	if (err) {
-		dev_err(dev, "iommu_attach_group failed\n");
-		return err;
-	}
-
-	err = arm_iommu_get_dma_cookie(dev, mapping);
-	if (err) {
-		dev_err(dev, "arm_iommu_get_dma_cookie failed\n");
-		iommu_detach_group(domain, group);
-		return err;
-	}
-
-	dev->archdata.mapping = mapping;
-	set_dma_ops(dev, mapping->ops);
-
-	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
-	return 0;
-}
-EXPORT_SYMBOL(__depr_arm_iommu_attach_device);
-
-/**
- * DEPRECATED
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void __depr_arm_iommu_detach_device(struct device *dev)
-{
-	struct iommu_domain *domain;
-	int s1_bypass = 0;
-
-	if (!dev->iommu_group) {
-		dev_err(dev, "No iommu associated with device\n");
-		return;
-	}
-
-	domain = iommu_get_domain_for_dev(dev);
-	if (!domain) {
-		dev_warn(dev, "Not attached\n");
-		return;
-	}
-
-	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
-
-	/*
-	 * ION defers dma_unmap calls. Ensure they have all completed prior to
-	 * setting dma_ops to NULL.
-	 */
-	if (msm_dma_unmap_all_for_dev(dev))
-		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
-
-	iommu_detach_group(domain, dev->iommu_group);
-	dev->archdata.mapping = NULL;
-	if (!s1_bypass)
-		set_dma_ops(dev, NULL);
-
-	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
-}
-EXPORT_SYMBOL(__depr_arm_iommu_detach_device);
-
 #else /*!CONFIG_ARM64_DMA_USE_IOMMU */
 
 static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 578174a3..51cd66d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	return kvm_arch_vcpu_runnable(vcpu);
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
 	return false;
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05b..d911a8c 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
 
 static inline void __fstate_clean(struct pt_regs *regs)
 {
-	regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+	regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
 }
 
 static inline void fstate_save(struct task_struct *task,
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 41e3908..0d75329 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | \
 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#define ARCH_ZONE_DMA_BITS	31
+
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
 
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index d9ff3b4..2569ffc 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
 	switch (sh_type) {
 	case SH_BREAKPOINT_READ:
 		*gen_type = HW_BREAKPOINT_R;
+		break;
 	case SH_BREAKPOINT_WRITE:
 		*gen_type = HW_BREAKPOINT_W;
 		break;
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index c4428a1..2622c07 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
 	return diff;
 }
 
+/*
+ * Clang may lower `memcmp == 0` to `bcmp == 0`.
+ */
+int bcmp(const void *s1, const void *s2, size_t len)
+{
+	return memcmp(s1, s2, len);
+}
+
 int strcmp(const char *str1, const char *str2)
 {
 	const unsigned char *s1 = (const unsigned char *)str1;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2877e1fba..3245b95 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1113,6 +1113,7 @@ struct kvm_x86_ops {
 	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
 			      uint32_t guest_irq, bool set);
 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ea454d3..0f33f00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5146,6 +5146,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
 		kvm_vcpu_wake_up(vcpu);
 }
 
+static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
 {
 	unsigned long flags;
@@ -7203,6 +7208,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
 	.pmu_ops = &amd_pmu_ops,
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
+	.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
 	.update_pi_irte = svm_update_pi_irte,
 	.setup_mce = svm_setup_mce,
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4cf16378..2e310ea 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10411,6 +10411,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
 	return ((rvi & 0xf0) > (vppr & 0xf0));
 }
 
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
 	if (!kvm_vcpu_apicv_active(vcpu))
@@ -14387,6 +14392,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
 	.sync_pir_to_irr = vmx_sync_pir_to_irr,
 	.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+	.dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
 
 	.set_tss_addr = vmx_set_tss_addr,
 	.set_identity_map_addr = vmx_set_identity_map_addr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cea6568..e10a7a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9336,6 +9336,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+		return true;
+
+	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+		kvm_test_request(KVM_REQ_SMI, vcpu) ||
+		 kvm_test_request(KVM_REQ_EVENT, vcpu))
+		return true;
+
+	if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+		return true;
+
+	return false;
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.preempted_in_kernel;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d9765e..1bcb724 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -261,13 +261,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
 	pmd = pmd_offset(pud, address);
 	pmd_k = pmd_offset(pud_k, address);
+
+	if (pmd_present(*pmd) != pmd_present(*pmd_k))
+		set_pmd(pmd, *pmd_k);
+
 	if (!pmd_present(*pmd_k))
 		return NULL;
-
-	if (!pmd_present(*pmd))
-		set_pmd(pmd, *pmd_k);
 	else
-		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
 	return pmd_k;
 }
@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
 		spin_lock(&pgd_lock);
 		list_for_each_entry(page, &pgd_list, lru) {
 			spinlock_t *pgt_lock;
-			pmd_t *ret;
 
 			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
 			spin_lock(pgt_lock);
-			ret = vmalloc_sync_one(page_address(page), address);
+			vmalloc_sync_one(page_address(page), address);
 			spin_unlock(pgt_lock);
-
-			if (!ret)
-				break;
 		}
 		spin_unlock(&pgd_lock);
 	}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 3cf302b..8901a1f 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,6 +6,9 @@
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
+$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
+	$(call if_changed_rule,cc_o_c)
+
 $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
 	$(call if_changed_rule,cc_o_c)
 
@@ -17,11 +20,34 @@
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
-# sure how to relocate those. Like kexec-tools, use custom flags.
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_sha256.o		+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_purgatory.o	+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_string.o		+= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_kexec-purgatory.o	+= $(CC_FLAGS_FTRACE)
+endif
 
-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
-KBUILD_CFLAGS += -m$(BITS)
-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ifdef CONFIG_STACKPROTECTOR
+CFLAGS_REMOVE_sha256.o		+= -fstack-protector
+CFLAGS_REMOVE_purgatory.o	+= -fstack-protector
+CFLAGS_REMOVE_string.o		+= -fstack-protector
+CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+CFLAGS_REMOVE_sha256.o		+= -fstack-protector-strong
+CFLAGS_REMOVE_purgatory.o	+= -fstack-protector-strong
+CFLAGS_REMOVE_string.o		+= -fstack-protector-strong
+CFLAGS_REMOVE_kexec-purgatory.o	+= -fstack-protector-strong
+endif
+
+ifdef CONFIG_RETPOLINE
+CFLAGS_REMOVE_sha256.o		+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_purgatory.o	+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_string.o		+= $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_kexec-purgatory.o	+= $(RETPOLINE_CFLAGS)
+endif
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 		$(call if_changed,ld)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 025c34a..7971f7a 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -70,3 +70,9 @@ void purgatory(void)
 	}
 	copy_backup_region();
 }
+
+/*
+ * Defined in order to reuse memcpy() and memset() from
+ * arch/x86/boot/compressed/string.c
+ */
+void warn(const char *msg) {}
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
deleted file mode 100644
index 795ca4f..0000000
--- a/arch/x86/purgatory/string.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Simple string functions.
- *
- * Copyright (C) 2014 Red Hat Inc.
- *
- * Author:
- *       Vivek Goyal <vgoyal@redhat.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/types.h>
-
-#include "../boot/string.c"
-
-void *memcpy(void *dst, const void *src, size_t len)
-{
-	return __builtin_memcpy(dst, src, len);
-}
-
-void *memset(void *dst, int c, size_t len)
-{
-	return __builtin_memset(dst, c, len);
-}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index a285fbd..15580e4 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -515,6 +515,7 @@ void cpu_reset(void)
 				      "add	%2, %2, %7\n\t"
 				      "addi	%0, %0, -1\n\t"
 				      "bnez	%0, 1b\n\t"
+				      "isync\n\t"
 				      /* Jump to identity mapping */
 				      "jx	%3\n"
 				      "2:\n\t"
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 43c2615..e11b5da 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -616,8 +616,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 
 	/* Move to ITS specific data */
 	its = (struct acpi_iort_its_group *)node->node_data;
-	if (idx > its->its_count) {
-		dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+	if (idx >= its->its_count) {
+		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
 			idx, its->its_count);
 		return -ENXIO;
 	}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d..5bece97 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
 		hpriv->phys[port] = NULL;
 		rc = 0;
 		break;
+	case -EPROBE_DEFER:
+		/* Do not complain yet */
+		break;
 
 	default:
 		dev_err(dev,
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2..eefda51 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 	unsigned int ret;
 	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
-	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+	static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 81f9bd6..8ebe99b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5241,7 +5241,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	unsigned int key_len;
 	char secret[SHARED_SECRET_MAX]; /* 64 byte */
 	unsigned int resp_size;
-	SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+	struct shash_desc *desc;
 	struct packet_info pi;
 	struct net_conf *nc;
 	int err, rv;
@@ -5254,6 +5254,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	memcpy(secret, nc->shared_secret, key_len);
 	rcu_read_unlock();
 
+	desc = kmalloc(sizeof(struct shash_desc) +
+		       crypto_shash_descsize(connection->cram_hmac_tfm),
+		       GFP_KERNEL);
+	if (!desc) {
+		rv = -1;
+		goto fail;
+	}
 	desc->tfm = connection->cram_hmac_tfm;
 	desc->flags = 0;
 
@@ -5396,7 +5403,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	kfree(peers_ch);
 	kfree(response);
 	kfree(right_response);
-	shash_desc_zero(desc);
+	if (desc) {
+		shash_desc_zero(desc);
+		kfree(desc);
+	}
 
 	return rv;
 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f1e63eb..cef8e00 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -886,7 +886,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
 
 static int loop_kthread_worker_fn(void *worker_ptr)
 {
-	current->flags |= PF_LESS_THROTTLE;
+	current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
 	return kthread_worker_fn(worker_ptr);
 }
 
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index cdb334b..a19b806 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -31,8 +31,6 @@ struct arch_info {
 	void *boot_ipc_log;
 	void *tsync_ipc_log;
 	struct mhi_device *boot_dev;
-	struct mhi_link_info current_link_info;
-	struct work_struct bw_scale_work;
 	bool drv_connected;
 	struct notifier_block pm_notifier;
 	struct completion pm_completion;
@@ -346,7 +344,7 @@ static  int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
 				   struct pci_dev *pci_dev,
 				   struct mhi_link_info *link_info)
 {
-	int ret, scale;
+	int ret;
 
 	mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
 	ret = msm_pcie_set_link_bandwidth(pci_dev, link_info->target_link_speed,
@@ -356,60 +354,22 @@ static  int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
 	if (ret)
 		return ret;
 
-	/* if we switch to low bw release bus scale voting */
-	scale = !(link_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB);
-	mhi_arch_set_bus_request(mhi_cntrl, scale);
+	/* do a bus scale vote based on gen speeds */
+	mhi_arch_set_bus_request(mhi_cntrl, link_info->target_link_speed);
 
-	MHI_VERB("bw changed to speed:0x%x width:0x%x bus_scale:%d\n",
-		 link_info->target_link_speed, link_info->target_link_width,
-		 scale);
+	MHI_VERB("bw changed to speed:0x%x width:0x%x\n",
+		 link_info->target_link_speed, link_info->target_link_width);
 
 	return 0;
 }
 
-static void mhi_arch_pcie_bw_scale_work(struct work_struct *work)
+static int mhi_arch_bw_scale(struct mhi_controller *mhi_cntrl,
+			     struct mhi_link_info *link_info)
 {
-	struct arch_info *arch_info = container_of(work,
-						   struct arch_info,
-						   bw_scale_work);
-	struct mhi_dev *mhi_dev = arch_info->mhi_dev;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
 	struct pci_dev *pci_dev = mhi_dev->pci_dev;
-	struct device *dev = &pci_dev->dev;
-	struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
-	struct mhi_link_info mhi_link_info;
-	struct mhi_link_info *cur_info = &arch_info->current_link_info;
-	int ret;
 
-	mutex_lock(&mhi_cntrl->pm_mutex);
-	if (!mhi_dev->powered_on || MHI_IS_SUSPENDED(mhi_dev->suspend_mode))
-		goto exit_work;
-
-	/* copy the latest speed change */
-	write_lock_irq(&mhi_cntrl->pm_lock);
-	mhi_link_info = mhi_cntrl->mhi_link_info;
-	write_unlock_irq(&mhi_cntrl->pm_lock);
-
-	/* link is already set to current settings */
-	if (cur_info->target_link_speed == mhi_link_info.target_link_speed &&
-	    cur_info->target_link_width == mhi_link_info.target_link_width)
-		goto exit_work;
-
-	ret = mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, &mhi_link_info);
-	if (ret)
-		goto exit_work;
-
-	*cur_info = mhi_link_info;
-
-exit_work:
-	mutex_unlock(&mhi_cntrl->pm_mutex);
-}
-
-static void mhi_arch_pcie_bw_scale_cb(struct mhi_controller *mhi_cntrl,
-				      struct mhi_dev *mhi_dev)
-{
-	struct arch_info *arch_info = mhi_dev->arch_info;
-
-	schedule_work(&arch_info->bw_scale_work);
+	return mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, link_info);
 }
 
 static int mhi_bl_probe(struct mhi_device *mhi_device,
@@ -454,6 +414,7 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
 	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct mhi_link_info *cur_link_info;
 	char node[32];
 	int ret;
 	u16 linkstat;
@@ -462,7 +423,6 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 		struct msm_pcie_register_event *reg_event;
 		struct pci_dev *root_port;
 		struct device_node *root_ofnode;
-		struct mhi_link_info *cur_link_info;
 
 		arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
 					 sizeof(*arch_info), GFP_KERNEL);
@@ -566,28 +526,24 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
 				    mhi_dev->pci_dev, NULL, 0);
 		mhi_dev->pci_dev->no_d3hot = true;
 
-		INIT_WORK(&arch_info->bw_scale_work,
-			  mhi_arch_pcie_bw_scale_work);
-		mhi_dev->bw_scale = mhi_arch_pcie_bw_scale_cb;
-
-		/* store the current bw info */
-		ret = pcie_capability_read_word(mhi_dev->pci_dev,
-						PCI_EXP_LNKSTA, &linkstat);
-		if (ret)
-			return ret;
-
-		cur_link_info = &arch_info->current_link_info;
-		cur_link_info->target_link_speed =
-			linkstat & PCI_EXP_LNKSTA_CLS;
-		cur_link_info->target_link_width =
-			(linkstat & PCI_EXP_LNKSTA_NLW) >>
-			PCI_EXP_LNKSTA_NLW_SHIFT;
-		mhi_cntrl->mhi_link_info = *cur_link_info;
+		mhi_cntrl->bw_scale = mhi_arch_bw_scale;
 
 		mhi_driver_register(&mhi_bl_driver);
 	}
 
-	return mhi_arch_set_bus_request(mhi_cntrl, 1);
+	/* store the current bw info */
+	ret = pcie_capability_read_word(mhi_dev->pci_dev,
+					PCI_EXP_LNKSTA, &linkstat);
+	if (ret)
+		return ret;
+
+	cur_link_info = &mhi_cntrl->mhi_link_info;
+	cur_link_info->target_link_speed = linkstat & PCI_EXP_LNKSTA_CLS;
+	cur_link_info->target_link_width = (linkstat & PCI_EXP_LNKSTA_NLW) >>
+					    PCI_EXP_LNKSTA_NLW_SHIFT;
+
+	return mhi_arch_set_bus_request(mhi_cntrl,
+					cur_link_info->target_link_speed);
 }
 
 void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
@@ -598,13 +554,12 @@ void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
 static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
-	struct arch_info *arch_info = mhi_dev->arch_info;
 	struct pci_dev *pci_dev = mhi_dev->pci_dev;
 	struct mhi_link_info link_info, *cur_link_info;
 	bool bw_switched = false;
 	int ret;
 
-	cur_link_info = &arch_info->current_link_info;
+	cur_link_info = &mhi_cntrl->mhi_link_info;
 	/* if link is not in gen 1 we need to switch to gen 1 */
 	if (cur_link_info->target_link_speed != PCI_EXP_LNKSTA_CLS_2_5GB) {
 		link_info.target_link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
@@ -630,9 +585,6 @@ static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
 		return ret;
 	}
 
-	if (bw_switched)
-		*cur_link_info = link_info;
-
 	return ret;
 }
 
@@ -689,17 +641,16 @@ static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
 	struct arch_info *arch_info = mhi_dev->arch_info;
 	struct pci_dev *pci_dev = mhi_dev->pci_dev;
-	struct mhi_link_info *cur_info = &arch_info->current_link_info;
+	struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
 	int ret;
 
 	MHI_LOG("Entered\n");
 
-	/* request bus scale voting if we're on Gen 2 or higher speed */
-	if (cur_info->target_link_speed != PCI_EXP_LNKSTA_CLS_2_5GB) {
-		ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
-		if (ret)
-			MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
-	}
+	/* request bus scale voting based on higher gen speed */
+	ret = mhi_arch_set_bus_request(mhi_cntrl,
+				       cur_info->target_link_speed);
+	if (ret)
+		MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
 
 	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev,
 				  NULL, 0);
@@ -733,10 +684,8 @@ static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
-	struct arch_info *arch_info = mhi_dev->arch_info;
 	struct pci_dev *pci_dev = mhi_dev->pci_dev;
-	struct mhi_link_info *cur_info = &arch_info->current_link_info;
-	struct mhi_link_info *updated_info = &mhi_cntrl->mhi_link_info;
+	struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
 	int ret = 0;
 
 	MHI_LOG("Entered\n");
@@ -748,6 +697,19 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 	case MHI_FAST_LINK_OFF:
 		ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus,
 					  pci_dev, NULL, 0);
+		if (ret ||
+		    cur_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+			break;
+
+		/*
+		 * BW request from device isn't for gen 1 link speed, we can
+		 * only print an error here.
+		 */
+		if (mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, cur_info))
+			MHI_ERR(
+			"Failed to honor bw request: speed:0x%x width:0x%x\n",
+			cur_info->target_link_speed,
+			cur_info->target_link_width);
 		break;
 	case MHI_ACTIVE_STATE:
 	case MHI_FAST_LINK_ON:
@@ -759,14 +721,6 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 		return ret;
 	}
 
-	/* BW request from device doesn't match current link speed */
-	if (cur_info->target_link_speed != updated_info->target_link_speed ||
-	    cur_info->target_link_width != updated_info->target_link_width) {
-		ret = mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, updated_info);
-		if (!ret)
-			*cur_info = *updated_info;
-	}
-
 	msm_pcie_l1ss_timeout_enable(pci_dev);
 
 	MHI_LOG("Exited\n");
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index c7002ec..67e12b5 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -609,10 +609,6 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
 		pm_runtime_mark_last_busy(dev);
 		pm_request_autosuspend(dev);
 		break;
-	case MHI_CB_BW_REQ:
-		if (mhi_dev->bw_scale)
-			mhi_dev->bw_scale(mhi_cntrl, mhi_dev);
-		break;
 	case MHI_CB_EE_MISSION_MODE:
 		/*
 		 * we need to force a suspend so device can switch to
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index fdab799..6fbbac9 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -48,9 +48,6 @@ struct mhi_dev {
 	dma_addr_t iova_stop;
 	enum mhi_suspend_mode suspend_mode;
 
-	/* if set, soc support dynamic bw scaling */
-	void (*bw_scale)(struct mhi_controller *mhi_cntrl,
-			 struct mhi_dev *mhi_dev);
 	unsigned int lpm_disable_depth;
 	/* lock to toggle low power modes */
 	spinlock_t lpm_lock;
diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c
index 73cf01e..db33e95 100644
--- a/drivers/bus/mhi/core/mhi_dtr.c
+++ b/drivers/bus/mhi/core/mhi_dtr.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
 
 #include <linux/debugfs.h>
 #include <linux/device.h>
@@ -164,6 +164,9 @@ static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
 	if (dtr_msg->msg & CTRL_MSG_RI)
 		mhi_dev->tiocm |= TIOCM_RI;
 	spin_unlock_irq(res_lock);
+
+	/* Notify the update */
+	mhi_notify(mhi_dev, MHI_CB_DTR_SIGNAL);
 }
 
 static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index c76d3a4..16ac408 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -14,6 +14,14 @@
 #include <linux/mhi.h>
 #include "mhi_internal.h"
 
+const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
+	[MHI_MSG_LVL_VERBOSE] = "Verbose",
+	[MHI_MSG_LVL_INFO] = "Info",
+	[MHI_MSG_LVL_ERROR] = "Error",
+	[MHI_MSG_LVL_CRITICAL] = "Critical",
+	[MHI_MSG_LVL_MASK_ALL] = "Mask all",
+};
+
 const char * const mhi_ee_str[MHI_EE_MAX] = {
 	[MHI_EE_PBL] = "PBL",
 	[MHI_EE_SBL] = "SBL",
@@ -58,6 +66,7 @@ static const char * const mhi_pm_state_str[] = {
 	[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
 	[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
 	[MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+	[MHI_PM_BIT_SHUTDOWN_NO_ACCESS] = "SHUTDOWN No Access",
 };
 
 struct mhi_bus mhi_bus;
@@ -72,6 +81,38 @@ const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
 	return mhi_pm_state_str[index];
 }
 
+static ssize_t log_level_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			TO_MHI_LOG_LEVEL_STR(mhi_cntrl->log_lvl));
+}
+
+static ssize_t log_level_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	enum MHI_DEBUG_LEVEL log_level;
+
+	if (kstrtou32(buf, 0, &log_level) < 0)
+		return -EINVAL;
+
+	mhi_cntrl->log_lvl = log_level;
+
+	MHI_LOG("IPC log level changed to: %s\n",
+		TO_MHI_LOG_LEVEL_STR(log_level));
+
+	return count;
+}
+static DEVICE_ATTR_RW(log_level);
+
 static ssize_t bus_vote_show(struct device *dev,
 			     struct device_attribute *attr,
 			     char *buf)
@@ -130,27 +171,28 @@ static ssize_t device_vote_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(device_vote);
 
-static struct attribute *mhi_vote_attrs[] = {
+static struct attribute *mhi_sysfs_attrs[] = {
+	&dev_attr_log_level.attr,
 	&dev_attr_bus_vote.attr,
 	&dev_attr_device_vote.attr,
 	NULL,
 };
 
-static const struct attribute_group mhi_vote_group = {
-	.attrs = mhi_vote_attrs,
+static const struct attribute_group mhi_sysfs_group = {
+	.attrs = mhi_sysfs_attrs,
 };
 
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
-				  &mhi_vote_group);
+				  &mhi_sysfs_group);
 }
 
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 
-	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_sysfs_group);
 
 	/* relinquish any pending votes for device */
 	while (atomic_read(&mhi_dev->dev_vote))
@@ -183,7 +225,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
-		if (mhi_event->offload_ev)
+		if (!mhi_event->request_irq)
 			continue;
 
 		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -207,7 +249,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
 		return ret;
 
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
-		if (mhi_event->offload_ev)
+		if (!mhi_event->request_irq)
 			continue;
 
 		ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
@@ -224,7 +266,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
 
 error_request:
 	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
-		if (mhi_event->offload_ev)
+		if (!mhi_event->request_irq)
 			continue;
 
 		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -496,15 +538,18 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
 	return ret;
 }
 
-static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl)
+/* to be used only if a single event ring with the type is present */
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+			    enum mhi_er_data_type type)
 {
 	int i;
 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 
-	/* find event ring with timesync support */
-	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++)
-		if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE)
+	/* find event ring for requested type */
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (mhi_event->data_type == type)
 			return mhi_event->er_index;
+	}
 
 	return -ENOENT;
 }
@@ -581,7 +626,7 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
 	/* get time-sync event ring configuration */
-	ret = mhi_get_tsync_er_cfg(mhi_cntrl);
+	ret = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
 	if (ret < 0) {
 		MHI_LOG("Could not find timesync event ring\n");
 		return ret;
@@ -611,6 +656,36 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
 	return ret;
 }
 
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
+{
+	int ret, er_index;
+	u32 bw_cfg_offset;
+
+	/* controller doesn't support dynamic bw switch */
+	if (!mhi_cntrl->bw_scale)
+		return -ENODEV;
+
+	ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
+					&bw_cfg_offset);
+	if (ret)
+		return ret;
+
+	/* No ER configured to support BW scale */
+	er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE_ELEMENT_TYPE);
+	if (ret < 0)
+		return er_index;
+
+	bw_cfg_offset += BW_SCALE_CFG_OFFSET;
+
+	MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
+
+	/* advertise host support */
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+		      MHI_BW_SCALE_SETUP(er_index));
+
+	return 0;
+}
+
 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 {
 	u32 val;
@@ -707,6 +782,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
 	mhi_cntrl->wake_set = false;
 
+	/* setup bw scale db */
+	mhi_cntrl->bw_scale_db = base + val + (8 * MHI_BW_SCALE_CHAN_DB);
+
 	/* setup channel db addresses */
 	mhi_chan = mhi_cntrl->mhi_chan;
 	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
@@ -737,6 +815,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 				    reg_info[i].mask, reg_info[i].shift,
 				    reg_info[i].val);
 
+	/* setup bandwidth scaling features */
+	mhi_init_bw_scale(mhi_cntrl);
+
 	return 0;
 }
 
@@ -887,6 +968,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 	if (!mhi_cntrl->mhi_event)
 		return -ENOMEM;
 
+	INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);
+
 	/* populate ev ring */
 	mhi_event = mhi_cntrl->mhi_event;
 	i = 0;
@@ -952,6 +1035,9 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 		case MHI_ER_TSYNC_ELEMENT_TYPE:
 			mhi_event->process_event = mhi_process_tsync_event_ring;
 			break;
+		case MHI_ER_BW_SCALE_ELEMENT_TYPE:
+			mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+			break;
 		}
 
 		mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev");
@@ -963,6 +1049,19 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 							"mhi,client-manage");
 		mhi_event->offload_ev = of_property_read_bool(child,
 							      "mhi,offload");
+
+		/*
+		 * low priority events are handled in a separate worker thread
+		 * to allow for sleeping functions to be called.
+		 */
+		if (!mhi_event->offload_ev) {
+			if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+				list_add_tail(&mhi_event->node,
+						&mhi_cntrl->lp_ev_rings);
+			else
+				mhi_event->request_irq = true;
+		}
+
 		mhi_event++;
 	}
 
@@ -1242,6 +1341,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
 	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
 	init_waitqueue_head(&mhi_cntrl->state_event);
 
 	mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -1255,6 +1355,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 
 		mhi_event->mhi_cntrl = mhi_cntrl;
 		spin_lock_init(&mhi_event->lock);
+
+		if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+			continue;
+
 		if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
 			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
 				     (ulong)mhi_event);
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index e06e76e..735e4152 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -8,6 +8,7 @@ extern struct bus_type mhi_bus_type;
 
 /* MHI mmio register mapping */
 #define PCI_INVALID_READ(val) (val == U32_MAX)
+#define MHI_REG_SIZE (SZ_4K)
 
 #define MHIREGLEN (0x0)
 #define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
@@ -153,6 +154,17 @@ extern struct bus_type mhi_bus_type;
 
 #define TIMESYNC_CAP_ID (2)
 
+/* MHI Bandwidth scaling offsets */
+#define BW_SCALE_CFG_OFFSET (0x04)
+#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000)
+#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
+#define BW_SCALE_CFG_ENABLED_MASK (0x01000000)
+#define BW_SCALE_CFG_ENABLED_SHIFT (24)
+#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000)
+#define BW_SCALE_CFG_ER_ID_SHIFT (19)
+
+#define BW_SCALE_CAP_ID (3)
+
 /* MHI BHI offfsets */
 #define BHI_BHIVERSION_MINOR (0x00)
 #define BHI_BHIVERSION_MAJOR (0x04)
@@ -329,12 +341,13 @@ enum mhi_cmd_type {
 #define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
 #define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
 #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TSYNC_SEQ(tre) ((tre)->dword[0])
 #define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
 #define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
 #define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
 #define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
 #define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)
 
 /* transfer descriptor macros */
 #define MHI_TRE_DATA_PTR(ptr) (ptr)
@@ -428,6 +441,11 @@ extern const char * const mhi_state_str[MHI_STATE_MAX];
 				  !mhi_state_str[state]) ? \
 				"INVALID_STATE" : mhi_state_str[state])
 
+extern const char * const mhi_log_level_str[MHI_MSG_LVL_MAX];
+#define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
+				  !mhi_log_level_str[level]) ? \
+				"Mask all" : mhi_log_level_str[level])
+
 enum {
 	MHI_PM_BIT_DISABLE,
 	MHI_PM_BIT_POR,
@@ -441,6 +459,7 @@ enum {
 	MHI_PM_BIT_SYS_ERR_PROCESS,
 	MHI_PM_BIT_SHUTDOWN_PROCESS,
 	MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+	MHI_PM_BIT_SHUTDOWN_NO_ACCESS,
 	MHI_PM_BIT_MAX
 };
 
@@ -460,6 +479,7 @@ enum MHI_PM_STATE {
 	MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
 	/* link not accessible */
 	MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+	MHI_PM_SHUTDOWN_NO_ACCESS = BIT(MHI_PM_BIT_SHUTDOWN_NO_ACCESS),
 };
 
 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
@@ -467,7 +487,7 @@ enum MHI_PM_STATE {
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
 		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
-#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state >= MHI_PM_LD_ERR_FATAL_DETECT)
 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
 					mhi_cntrl->db_access)
 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
@@ -492,19 +512,38 @@ enum MHI_XFER_TYPE {
 #define NR_OF_CMD_RINGS (1)
 #define CMD_EL_PER_RING (128)
 #define PRIMARY_CMD_RING (0)
+#define MHI_BW_SCALE_CHAN_DB (126)
 #define MHI_DEV_WAKE_DB (127)
 #define MHI_MAX_MTU (0xffff)
 
+#define MHI_BW_SCALE_SETUP(er_index) ((MHI_BW_SCALE_CHAN_DB << \
+	BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK | \
+	(1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK | \
+	((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK)
+
+#define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF))
+#define MHI_BW_SCALE_NACK 0xF
+
 enum MHI_ER_TYPE {
 	MHI_ER_TYPE_INVALID = 0x0,
 	MHI_ER_TYPE_VALID = 0x1,
 };
 
+enum mhi_er_priority {
+	MHI_ER_PRIORITY_HIGH,
+	MHI_ER_PRIORITY_MEDIUM,
+	MHI_ER_PRIORITY_LOW,
+};
+
+#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
+#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)
+
 enum mhi_er_data_type {
 	MHI_ER_DATA_ELEMENT_TYPE,
 	MHI_ER_CTRL_ELEMENT_TYPE,
 	MHI_ER_TSYNC_ELEMENT_TYPE,
-	MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE,
+	MHI_ER_BW_SCALE_ELEMENT_TYPE,
+	MHI_ER_DATA_TYPE_MAX = MHI_ER_BW_SCALE_ELEMENT_TYPE,
 };
 
 enum mhi_ch_ee_mask {
@@ -587,17 +626,19 @@ struct mhi_buf_info {
 };
 
 struct mhi_event {
+	struct list_head node;
 	u32 er_index;
 	u32 intmod;
 	u32 msi;
 	int chan; /* this event ring is dedicated to a channel */
-	u32 priority;
+	enum mhi_er_priority priority;
 	enum mhi_er_data_type data_type;
 	struct mhi_ring ring;
 	struct db_cfg db_cfg;
 	bool hw_ring;
 	bool cl_manage;
 	bool offload_ev; /* managed by a device driver */
+	bool request_irq; /* has dedicated interrupt handler */
 	spinlock_t lock;
 	struct mhi_chan *mhi_chan; /* dedicated to channel */
 	struct tasklet_struct task;
@@ -700,6 +741,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 void mhi_pm_st_worker(struct work_struct *work);
 void mhi_fw_load_worker(struct work_struct *work);
 void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_low_priority_worker(struct work_struct *work);
 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
 void mhi_ctrl_ev_task(unsigned long data);
 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
@@ -712,6 +754,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 			     struct mhi_event *mhi_event, u32 event_quota);
 int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
 				 struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event, u32 event_quota);
 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 		 enum MHI_CMD cmd);
 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
@@ -760,11 +804,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
 		      struct mhi_chan *mhi_chan);
 int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
 			      u32 *offset);
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
 int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
 int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
 int mhi_early_notify_device(struct device *dev, void *data);
 
 /* timesync log support */
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 7eb387d..8e1e2fd 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -81,7 +81,9 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
 		if (ret)
 			return ret;
 
-		*offset += next_offset;
+		*offset = next_offset;
+		if (*offset >= MHI_REG_SIZE)
+			return -ENXIO;
 	} while (next_offset);
 
 	return -ENXIO;
@@ -255,7 +257,7 @@ static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
 	return nr_el;
 }
 
-static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
 {
 	return (addr - ring->iommu_base) + ring->base;
 }
@@ -1133,25 +1135,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 			local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
 
 		switch (type) {
-		case MHI_PKT_TYPE_BW_REQ_EVENT:
-		{
-			struct mhi_link_info *link_info;
-
-			link_info = &mhi_cntrl->mhi_link_info;
-			write_lock_irq(&mhi_cntrl->pm_lock);
-			link_info->target_link_speed =
-				MHI_TRE_GET_EV_LINKSPEED(local_rp);
-			link_info->target_link_width =
-				MHI_TRE_GET_EV_LINKWIDTH(local_rp);
-			write_unlock_irq(&mhi_cntrl->pm_lock);
-			MHI_VERB(
-				 "Received BW_REQ with link speed:0x%x width:0x%x\n",
-				 link_info->target_link_speed,
-				 link_info->target_link_width);
-			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
-					     MHI_CB_BW_REQ);
-			break;
-		}
 		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
 		{
 			enum mhi_dev_state new_state;
@@ -1241,7 +1224,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 			break;
 		}
 		default:
-			MHI_ASSERT(1, "Unsupported ev type");
+			MHI_ERR("Unhandled Event: 0x%x\n", type);
 			break;
 		}
 
@@ -1346,7 +1329,7 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
 
 		MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");
 
-		sequence = MHI_TRE_GET_EV_SEQ(local_rp);
+		sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
 		remote_time = MHI_TRE_GET_EV_TIME(local_rp);
 
 		do {
@@ -1392,6 +1375,94 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
 	return count;
 }
 
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event,
+				 u32 event_quota)
+{
+	struct mhi_tre *dev_rp;
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
+	int result, ret = 0;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+		MHI_LOG("No EV access, PM_STATE:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto exit_bw_process;
+	}
+
+	/*
+	 * BW change is not process during suspend since we're suspending link,
+	 * host will process it during resume
+	 */
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		ret = -EACCES;
+		goto exit_bw_process;
+	}
+
+	spin_lock_bh(&mhi_event->lock);
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+	if (ev_ring->rp == dev_rp) {
+		spin_unlock_bh(&mhi_event->lock);
+		goto exit_bw_process;
+	}
+
+	/* if rp points to base, we need to wrap it around */
+	if (dev_rp == ev_ring->base)
+		dev_rp = ev_ring->base + ev_ring->len;
+	dev_rp--;
+
+	MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
+		   "!BW SCALE REQ event");
+
+	link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+	link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+	link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+	MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+		 link_info.sequence_num,
+		 link_info.target_link_speed,
+		 link_info.target_link_width);
+
+	/* fast forward to currently processed element and recycle er */
+	ev_ring->rp = dev_rp;
+	ev_ring->wp = dev_rp - 1;
+	if (ev_ring->wp < ev_ring->base)
+		ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+	mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_er_db(mhi_event);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	spin_unlock_bh(&mhi_event->lock);
+
+	ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+	if (!ret)
+		*cur_info = link_info;
+
+	result = ret ? MHI_BW_SCALE_NACK : 0;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+			      MHI_BW_SCALE_RESULT(result,
+						  link_info.sequence_num));
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+exit_bw_process:
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+
 void mhi_ev_task(unsigned long data)
 {
 	struct mhi_event *mhi_event = (struct mhi_event *)data;
@@ -1471,7 +1542,13 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev)
 
 		if (mhi_dev)
 			mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
-	} else
+
+		return IRQ_HANDLED;
+	}
+
+	if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
+		tasklet_hi_schedule(&mhi_event->task);
+	else
 		tasklet_schedule(&mhi_event->task);
 
 	return IRQ_HANDLED;
@@ -1541,6 +1618,8 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
 	wake_up_all(&mhi_cntrl->state_event);
 	MHI_VERB("Exit\n");
 
+	schedule_work(&mhi_cntrl->low_priority_worker);
+
 	return IRQ_WAKE_THREAD;
 }
 
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 29b5cad..95f496a 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -34,9 +34,11 @@
  *     M0 -> FW_DL_ERR
  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
  * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
- * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ *     SHUTDOWN_PROCESS -> DISABLE
  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
- *     LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ *     LD_ERR_FATAL_DETECT -> SHUTDOWN_NO_ACCESS
+ *     SHUTDOWN_NO_ACCESS -> DISABLE
  */
 static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L0 States */
@@ -48,49 +50,52 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 		MHI_PM_POR,
 		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M0,
 		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M2,
 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_ENTER,
 		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3,
 		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M3_EXIT,
 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_FW_DL_ERR,
 		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L1 States */
 	{
 		MHI_PM_SYS_ERR_DETECT,
 		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_SYS_ERR_PROCESS,
 		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	/* L2 States */
 	{
@@ -100,7 +105,11 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
 	/* L3 States */
 	{
 		MHI_PM_LD_ERR_FATAL_DETECT,
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_SHUTDOWN_NO_ACCESS,
+		MHI_PM_DISABLE
 	},
 };
 
@@ -492,7 +501,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	mhi_create_devices(mhi_cntrl);
 
 	/* setup sysfs nodes for userspace votes */
-	mhi_create_vote_sysfs(mhi_cntrl);
+	mhi_create_sysfs(mhi_cntrl);
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 
@@ -589,7 +598,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	MHI_LOG("Waiting for all pending event ring processing to complete\n");
 	mhi_event = mhi_cntrl->mhi_event;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
-		if (mhi_event->offload_ev)
+		if (!mhi_event->request_irq)
 			continue;
 		tasklet_kill(&mhi_event->task);
 	}
@@ -602,12 +611,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	MHI_LOG("Finish resetting channels\n");
 
 	/* remove support for userspace votes */
-	mhi_destroy_vote_sysfs(mhi_cntrl);
+	mhi_destroy_sysfs(mhi_cntrl);
 
 	MHI_LOG("Waiting for all pending threads to complete\n");
 	wake_up_all(&mhi_cntrl->state_event);
 	flush_work(&mhi_cntrl->st_worker);
 	flush_work(&mhi_cntrl->fw_worker);
+	flush_work(&mhi_cntrl->low_priority_worker);
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 
@@ -720,6 +730,44 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 	return 0;
 }
 
+static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_event *mhi_event;
+
+	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+		struct mhi_event_ctxt *er_ctxt =
+			&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+		struct mhi_ring *ev_ring = &mhi_event->ring;
+
+		spin_lock_bh(&mhi_event->lock);
+		if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
+			schedule_work(&mhi_cntrl->low_priority_worker);
+			spin_unlock_bh(&mhi_event->lock);
+			break;
+		}
+		spin_unlock_bh(&mhi_event->lock);
+	}
+}
+
+void mhi_low_priority_worker(struct work_struct *work)
+{
+	struct mhi_controller *mhi_cntrl = container_of(work,
+							struct mhi_controller,
+							low_priority_worker);
+	struct mhi_event *mhi_event;
+
+	MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
+		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		 TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/* check low priority event rings and process events */
+	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+		if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+			mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+	}
+}
+
 void mhi_pm_sys_err_worker(struct work_struct *work)
 {
 	struct mhi_controller *mhi_cntrl = container_of(work,
@@ -920,6 +968,7 @@ EXPORT_SYMBOL(mhi_control_error);
 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 {
 	enum MHI_PM_STATE cur_state;
+	enum MHI_PM_STATE transition_state = MHI_PM_SHUTDOWN_PROCESS;
 
 	/* if it's not graceful shutdown, force MHI to a linkdown state */
 	if (!graceful) {
@@ -933,8 +982,10 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 			MHI_ERR("Failed to move to state:%s from:%s\n",
 				to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
 				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		transition_state = MHI_PM_SHUTDOWN_NO_ACCESS;
 	}
-	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+	mhi_pm_disable_transition(mhi_cntrl, transition_state);
 
 	mhi_deinit_debugfs(mhi_cntrl);
 
@@ -1244,6 +1295,14 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 		return -EIO;
 	}
 
+	/*
+	 * If MHI on host is in suspending/suspended state, we do not process
+	 * any low priority requests, for example, bandwidth scaling events
+	 * from the device. Check for low priority event rings and handle the
+	 * pending events upon resume.
+	 */
+	mhi_low_priority_events_pending(mhi_cntrl);
+
 	return 0;
 }
 
@@ -1288,6 +1347,7 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
 	switch (mhi_cntrl->pm_state) {
 	case MHI_PM_M0:
 		mhi_pm_m0_transition(mhi_cntrl);
+		break;
 	case MHI_PM_M2:
 		read_lock_bh(&mhi_cntrl->pm_lock);
 		mhi_cntrl->wake_get(mhi_cntrl, true);
@@ -1306,12 +1366,15 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
 	 */
 	mhi_event = mhi_cntrl->mhi_event;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
-		if (mhi_event->offload_ev)
+		if (!mhi_event->request_irq)
 			continue;
 
 		mhi_msi_handlr(0, mhi_event);
 	}
 
+	/* schedules worker if any low priority events need to be handled */
+	mhi_low_priority_events_pending(mhi_cntrl);
+
 	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index ce1e28f..2e6dcde 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -14,6 +14,7 @@
 #include <linux/errno.h>
 #include <linux/of_device.h>
 #include <linux/rtnetlink.h>
+#include <linux/kthread.h>
 #include <linux/mhi.h>
 
 #define MHI_NETDEV_DRIVER_NAME "mhi_netdev"
@@ -80,6 +81,7 @@ struct mhi_netdev {
 	int alias;
 	struct mhi_device *mhi_dev;
 	struct mhi_netdev *rsc_dev; /* rsc linked node */
+	struct mhi_netdev *rsc_parent;
 	bool is_rsc_dev;
 	int wake;
 
@@ -89,16 +91,26 @@ struct mhi_netdev {
 	struct napi_struct *napi;
 	struct net_device *ndev;
 
-	struct mhi_netbuf **netbuf_pool;
-	int pool_size; /* must be power of 2 */
-	int current_index;
+	struct list_head *recycle_pool;
+	int pool_size;
 	bool chain_skb;
 	struct mhi_net_chain *chain;
 
+	struct task_struct *alloc_task;
+	wait_queue_head_t alloc_event;
+	int bg_pool_limit; /* minimum pool size */
+	int bg_pool_size; /* current size of the pool */
+	struct list_head *bg_pool;
+	spinlock_t bg_lock; /* lock to access list */
+
+
 	struct dentry *dentry;
 	enum MHI_DEBUG_LEVEL msg_lvl;
 	enum MHI_DEBUG_LEVEL ipc_log_lvl;
 	void *ipc_log;
+
+	/* debug stats */
+	u32 abuffers, kbuffers, rbuffers;
 };
 
 struct mhi_netdev_priv {
@@ -111,6 +123,7 @@ struct mhi_netdev_priv {
  */
 struct mhi_netbuf {
 	struct mhi_buf mhi_buf; /* this must be first element */
+	bool recycle;
 	void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
 		      enum dma_data_direction dir);
 };
@@ -147,7 +160,7 @@ static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
 	struct mhi_buf *mhi_buf;
 	void *vaddr;
 
-	page = __dev_alloc_pages(gfp, order);
+	page = __dev_alloc_pages(gfp | __GFP_NOMEMALLOC, order);
 	if (!page)
 		return NULL;
 
@@ -155,11 +168,15 @@ static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
 
 	/* we going to use the end of page to store cached data */
 	netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
-
+	netbuf->recycle = false;
 	mhi_buf = (struct mhi_buf *)netbuf;
 	mhi_buf->page = page;
 	mhi_buf->buf = vaddr;
 	mhi_buf->len = (void *)netbuf - vaddr;
+
+	if (!dev)
+		return netbuf;
+
 	mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
 					 DMA_FROM_DEVICE);
 	if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
@@ -178,9 +195,10 @@ static void mhi_netdev_unmap_page(struct device *dev,
 	dma_unmap_page(dev, dma_addr, len, dir);
 }
 
-static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre)
+static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev,
+				struct mhi_device *mhi_dev,
+				int nr_tre)
 {
-	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
 	struct device *dev = mhi_dev->dev.parent;
 	const u32 order = mhi_netdev->order;
 	int i, ret;
@@ -204,21 +222,73 @@ static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre)
 			__free_pages(mhi_buf->page, order);
 			return ret;
 		}
+		mhi_netdev->abuffers++;
 	}
 
 	return 0;
 }
 
-static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
+static int mhi_netdev_queue_bg_pool(struct mhi_netdev *mhi_netdev,
+				    struct mhi_device *mhi_dev,
+				    int nr_tre)
 {
-	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+	struct device *dev = mhi_dev->dev.parent;
+	int i, ret;
+	LIST_HEAD(head);
+
+	spin_lock_bh(&mhi_netdev->bg_lock);
+	list_splice_init(mhi_netdev->bg_pool, &head);
+	spin_unlock_bh(&mhi_netdev->bg_lock);
+
+	for (i = 0; i < nr_tre; i++) {
+		struct mhi_buf *mhi_buf =
+			list_first_entry_or_null(&head, struct mhi_buf, node);
+		struct mhi_netbuf *netbuf = (struct mhi_netbuf *)mhi_buf;
+
+		if (!mhi_buf)
+			break;
+
+		mhi_buf->dma_addr = dma_map_page(dev, mhi_buf->page, 0,
+						 mhi_buf->len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, mhi_buf->dma_addr))
+			break;
+
+		netbuf->unmap = mhi_netdev_unmap_page;
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+					 mhi_buf->len, MHI_EOT);
+		if (unlikely(ret)) {
+			MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+			mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+					      mhi_buf->len, DMA_FROM_DEVICE);
+			break;
+		}
+		list_del(&mhi_buf->node);
+		mhi_netdev->kbuffers++;
+	}
+
+	/* add remaining buffers back to main pool */
+	spin_lock_bh(&mhi_netdev->bg_lock);
+	list_splice(&head, mhi_netdev->bg_pool);
+	mhi_netdev->bg_pool_size -= i;
+	spin_unlock_bh(&mhi_netdev->bg_lock);
+
+
+	/* wake up the bg thread to allocate more buffers */
+	wake_up_interruptible(&mhi_netdev->alloc_event);
+
+	return i;
+}
+
+static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev,
+			     struct mhi_device *mhi_dev)
+{
 	struct device *dev = mhi_dev->dev.parent;
 	struct mhi_netbuf *netbuf;
 	struct mhi_buf *mhi_buf;
-	struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool;
+	struct list_head *pool = mhi_netdev->recycle_pool;
 	int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
-	int i, peak, cur_index, ret;
-	const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4;
+	int i, ret;
+	const int  max_peek = 4;
 
 	MSG_VERB("Enter free_desc:%d\n", nr_tre);
 
@@ -227,23 +297,21 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
 
 	/* try going thru reclaim pool first */
 	for (i = 0; i < nr_tre; i++) {
-		/* peak for the next buffer, we going to peak several times,
+		/* peek for the next buffer, we going to peak several times,
 		 * and we going to give up if buffers are not yet free
 		 */
-		cur_index = mhi_netdev->current_index;
+		int peek = 0;
+
 		netbuf = NULL;
-		for (peak = 0; peak < max_peak; peak++) {
-			struct mhi_netbuf *tmp = netbuf_pool[cur_index];
-
-			mhi_buf = &tmp->mhi_buf;
-
-			cur_index = (cur_index + 1) & pool_size;
-
+		list_for_each_entry(mhi_buf, pool, node) {
 			/* page == 1 idle, buffer is free to reclaim */
 			if (page_ref_count(mhi_buf->page) == 1) {
-				netbuf = tmp;
+				netbuf = (struct mhi_netbuf *)mhi_buf;
 				break;
 			}
+
+			if (peek++ >= max_peek)
+				break;
 		}
 
 		/* could not find a free buffer */
@@ -254,6 +322,7 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
 		 * with buffer, the buffer won't be freed
 		 */
 		page_ref_inc(mhi_buf->page);
+		list_del(&mhi_buf->node);
 		dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
 					   DMA_FROM_DEVICE);
 		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
@@ -263,30 +332,36 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
 			netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
 				      DMA_FROM_DEVICE);
 			page_ref_dec(mhi_buf->page);
+			list_add(&mhi_buf->node, pool);
 			return;
 		}
-		mhi_netdev->current_index = cur_index;
+		mhi_netdev->rbuffers++;
 	}
 
+	/* recycling did not work, buffers are still busy use bg pool */
+	if (i < nr_tre)
+		i += mhi_netdev_queue_bg_pool(mhi_netdev, mhi_dev, nr_tre - i);
+
 	/* recyling did not work, buffers are still busy allocate temp pkts */
 	if (i < nr_tre)
-		mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i);
+		mhi_netdev_tmp_alloc(mhi_netdev, mhi_dev, nr_tre - i);
 }
 
 /* allocating pool of memory */
 static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
 {
 	int i;
-	struct mhi_netbuf *netbuf, **netbuf_pool;
-	struct mhi_buf *mhi_buf;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf, *tmp;
 	const u32 order = mhi_netdev->order;
 	struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+	struct list_head *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
 
-	netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool),
-				    GFP_KERNEL);
-	if (!netbuf_pool)
+	if (!pool)
 		return -ENOMEM;
 
+	INIT_LIST_HEAD(pool);
+
 	for (i = 0; i < mhi_netdev->pool_size; i++) {
 		/* allocate paged data */
 		netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
@@ -294,44 +369,100 @@ static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
 			goto error_alloc_page;
 
 		netbuf->unmap = dma_sync_single_for_cpu;
-		netbuf_pool[i] = netbuf;
+		netbuf->recycle = true;
+		mhi_buf = (struct mhi_buf *)netbuf;
+		list_add(&mhi_buf->node, pool);
 	}
 
-	mhi_netdev->netbuf_pool = netbuf_pool;
+	mhi_netdev->recycle_pool = pool;
 
 	return 0;
 
 error_alloc_page:
-	for (--i; i >= 0; i--) {
-		netbuf = netbuf_pool[i];
-		mhi_buf = &netbuf->mhi_buf;
+	list_for_each_entry_safe(mhi_buf, tmp, pool, node) {
+		list_del(&mhi_buf->node);
 		dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
 			       DMA_FROM_DEVICE);
 		__free_pages(mhi_buf->page, order);
 	}
 
-	kfree(netbuf_pool);
+	kfree(pool);
 
 	return -ENOMEM;
 }
 
 static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
 {
-	int i;
-	struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool;
 	struct device *dev = mhi_netdev->mhi_dev->dev.parent;
-	struct mhi_buf *mhi_buf;
+	struct mhi_buf *mhi_buf, *tmp;
 
-	for (i = 0; i < mhi_netdev->pool_size; i++) {
-		netbuf = netbuf_pool[i];
-		mhi_buf = &netbuf->mhi_buf;
+	list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->recycle_pool, node) {
+		list_del(&mhi_buf->node);
 		dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
 			       DMA_FROM_DEVICE);
 		__free_pages(mhi_buf->page, mhi_netdev->order);
 	}
 
-	kfree(mhi_netdev->netbuf_pool);
-	mhi_netdev->netbuf_pool = NULL;
+	kfree(mhi_netdev->recycle_pool);
+
+	/* free the bg pool */
+	list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->bg_pool, node) {
+		list_del(&mhi_buf->node);
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+		mhi_netdev->bg_pool_size--;
+	}
+}
+
+static int mhi_netdev_alloc_thread(void *data)
+{
+	struct mhi_netdev *mhi_netdev = data;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf, *tmp_buf;
+	const u32 order = mhi_netdev->order;
+	LIST_HEAD(head);
+
+	while (!kthread_should_stop()) {
+		while (mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit) {
+			int buffers = 0, i;
+
+			/* do a bulk allocation */
+			for (i = 0; i < NAPI_POLL_WEIGHT; i++) {
+				if (kthread_should_stop())
+					goto exit_alloc;
+
+				netbuf = mhi_netdev_alloc(NULL, GFP_KERNEL,
+							  order);
+				if (!netbuf)
+					continue;
+
+				mhi_buf = (struct mhi_buf *)netbuf;
+				list_add(&mhi_buf->node, &head);
+				buffers++;
+			}
+
+			/* add the list to main pool */
+			spin_lock_bh(&mhi_netdev->bg_lock);
+			list_splice_init(&head, mhi_netdev->bg_pool);
+			mhi_netdev->bg_pool_size += buffers;
+			spin_unlock_bh(&mhi_netdev->bg_lock);
+		}
+
+		/* replenish the ring */
+		napi_schedule(mhi_netdev->napi);
+
+		/* wait for buffers to run low or thread to stop */
+		wait_event_interruptible(mhi_netdev->alloc_event,
+			kthread_should_stop() ||
+			mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit);
+	}
+
+exit_alloc:
+	list_for_each_entry_safe(mhi_buf, tmp_buf, &head, node) {
+		list_del(&mhi_buf->node);
+		__free_pages(mhi_buf->page, order);
+	}
+
+	return 0;
 }
 
 static int mhi_netdev_poll(struct napi_struct *napi, int budget)
@@ -361,10 +492,10 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
 	}
 
 	/* queue new buffers */
-	mhi_netdev_queue(mhi_netdev);
+	mhi_netdev_queue(mhi_netdev, mhi_dev);
 
 	if (rsc_dev)
-		mhi_netdev_queue(rsc_dev);
+		mhi_netdev_queue(mhi_netdev, rsc_dev->mhi_dev);
 
 	/* complete work if # of packet processed less than allocated budget */
 	if (rx_work < budget)
@@ -658,6 +789,8 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
 	struct mhi_net_chain *chain = mhi_netdev->chain;
 
 	netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
+	if (likely(netbuf->recycle))
+		list_add_tail(&mhi_buf->node, mhi_netdev->recycle_pool);
 
 	/* modem is down, drop the buffer */
 	if (mhi_result->transaction_status == -ENOTCONN) {
@@ -708,6 +841,47 @@ static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
 
 struct dentry *dentry;
 
+static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
+{
+	struct mhi_netdev *mhi_netdev = m->private;
+
+	seq_printf(m,
+		   "mru:%u order:%u pool_size:%d, bg_pool_size:%d bg_pool_limit:%d abuf:%u kbuf:%u rbuf:%u\n",
+		   mhi_netdev->mru, mhi_netdev->order, mhi_netdev->pool_size,
+		   mhi_netdev->bg_pool_size, mhi_netdev->bg_pool_limit,
+		   mhi_netdev->abuffers, mhi_netdev->kbuffers,
+		   mhi_netdev->rbuffers);
+
+	return 0;
+}
+
+static int mhi_netdev_debugfs_stats_open(struct inode *inode, struct file *fp)
+{
+	return single_open(fp, mhi_netdev_debugfs_stats_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_stats = {
+	.open = mhi_netdev_debugfs_stats_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+	struct mhi_netdev *mhi_netdev = data;
+	struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+	mhi_netdev->chain = NULL;
+
+	if (rsc_dev)
+		rsc_dev->chain = NULL;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+			 mhi_netdev_debugfs_chain, "%llu\n");
+
 static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
 {
 	char node_name[32];
@@ -724,6 +898,11 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
 	mhi_netdev->dentry = debugfs_create_dir(node_name, dentry);
 	if (IS_ERR_OR_NULL(mhi_netdev->dentry))
 		return;
+
+	debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
+				   mhi_netdev, &debugfs_stats);
+	debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+				   mhi_netdev, &debugfs_chain);
 }
 
 static void mhi_netdev_create_debugfs_dir(void)
@@ -755,12 +934,12 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)
 		return;
 	}
 
+	kthread_stop(mhi_netdev->alloc_task);
 	netif_stop_queue(mhi_netdev->ndev);
 	napi_disable(mhi_netdev->napi);
 	unregister_netdev(mhi_netdev->ndev);
 	netif_napi_del(mhi_netdev->napi);
 	free_netdev(mhi_netdev->ndev);
-	mhi_netdev_free_pool(mhi_netdev);
 
 	if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
 		debugfs_remove_recursive(mhi_netdev->dentry);
@@ -782,6 +961,9 @@ static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev,
 	mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl;
 	mhi_netdev->is_rsc_dev = true;
 	mhi_netdev->chain = parent->chain;
+	mhi_netdev->rsc_parent = parent;
+	mhi_netdev->recycle_pool = parent->recycle_pool;
+	mhi_netdev->bg_pool = parent->bg_pool;
 }
 
 static int mhi_netdev_probe(struct mhi_device *mhi_dev,
@@ -803,6 +985,13 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
 	if (!mhi_netdev)
 		return -ENOMEM;
 
+	/* move mhi channels to start state */
+	ret = mhi_prepare_for_transfer(mhi_dev);
+	if (ret) {
+		MSG_ERR("Failed to start channels ret %d\n", ret);
+		return ret;
+	}
+
 	mhi_netdev->mhi_dev = mhi_dev;
 	mhi_device_set_devdata(mhi_dev, mhi_netdev);
 
@@ -850,6 +1039,38 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
 		if (ret)
 			return ret;
 
+		/* setup pool size ~2x ring length*/
+		nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+		mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
+		if (nr_tre > mhi_netdev->pool_size)
+			mhi_netdev->pool_size <<= 1;
+		mhi_netdev->pool_size <<= 1;
+
+		/* if we expect child device to share then double the pool */
+		if (of_parse_phandle(of_node, "mhi,rsc-child", 0))
+			mhi_netdev->pool_size <<= 1;
+
+		/* allocate memory pool */
+		ret = mhi_netdev_alloc_pool(mhi_netdev);
+		if (ret)
+			return -ENOMEM;
+
+		/* create a background task to allocate memory */
+		mhi_netdev->bg_pool = kmalloc(sizeof(*mhi_netdev->bg_pool),
+					      GFP_KERNEL);
+		if (!mhi_netdev->bg_pool)
+			return -ENOMEM;
+
+		init_waitqueue_head(&mhi_netdev->alloc_event);
+		INIT_LIST_HEAD(mhi_netdev->bg_pool);
+		spin_lock_init(&mhi_netdev->bg_lock);
+		mhi_netdev->bg_pool_limit = mhi_netdev->pool_size / 4;
+		mhi_netdev->alloc_task = kthread_run(mhi_netdev_alloc_thread,
+						     mhi_netdev,
+						     mhi_netdev->ndev->name);
+		if (IS_ERR(mhi_netdev->alloc_task))
+			return PTR_ERR(mhi_netdev->alloc_task);
+
 		/* create ipc log buffer */
 		snprintf(node_name, sizeof(node_name),
 			 "%s_%04x_%02u.%02u.%02u_%u",
@@ -863,25 +1084,6 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
 		mhi_netdev_create_debugfs(mhi_netdev);
 	}
 
-	/* move mhi channels to start state */
-	ret = mhi_prepare_for_transfer(mhi_dev);
-	if (ret) {
-		MSG_ERR("Failed to start channels ret %d\n", ret);
-		goto error_start;
-	}
-
-	/* setup pool size ~2x ring length*/
-	nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
-	mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
-	if (nr_tre > mhi_netdev->pool_size)
-		mhi_netdev->pool_size <<= 1;
-	mhi_netdev->pool_size <<= 1;
-
-	/* allocate memory pool */
-	ret = mhi_netdev_alloc_pool(mhi_netdev);
-	if (ret)
-		goto error_start;
-
 	/* link child node with parent node if it's children dev */
 	if (p_netdev)
 		p_netdev->rsc_dev = mhi_netdev;
@@ -892,18 +1094,6 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
 	napi_schedule(mhi_netdev->napi);
 
 	return 0;
-
-error_start:
-	if (phandle)
-		return ret;
-
-	netif_stop_queue(mhi_netdev->ndev);
-	napi_disable(mhi_netdev->napi);
-	unregister_netdev(mhi_netdev->ndev);
-	netif_napi_del(mhi_netdev->napi);
-	free_netdev(mhi_netdev->ndev);
-
-	return ret;
 }
 
 static const struct mhi_device_id mhi_netdev_match_table[] = {
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 33338f4..9fff109 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/
 
-#include <linux/debugfs.h>
+#include <linux/async.h>
 #include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
@@ -19,8 +19,6 @@
 
 #define MHI_SAT_DRIVER_NAME "mhi_satellite"
 
-static bool mhi_sat_defer_init;
-
 /* logging macros */
 #define IPC_LOG_PAGES (10)
 #define IPC_LOG_LVL (MHI_MSG_LVL_INFO)
@@ -147,17 +145,13 @@ enum mhi_ev_ccs {
 /* satellite subsystem definitions */
 enum subsys_id {
 	SUBSYS_ADSP,
-	SUBSYS_CDSP,
 	SUBSYS_SLPI,
-	SUBSYS_MODEM,
 	SUBSYS_MAX,
 };
 
 static const char * const subsys_names[SUBSYS_MAX] = {
 	[SUBSYS_ADSP] = "adsp",
-	[SUBSYS_CDSP] = "cdsp",
 	[SUBSYS_SLPI] = "slpi",
-	[SUBSYS_MODEM] = "modem",
 };
 
 struct mhi_sat_subsys {
@@ -235,6 +229,21 @@ struct mhi_sat_packet {
 	void *msg; /* incoming message */
 };
 
+enum mhi_sat_state {
+	SAT_READY, /* initial state when device is presented to driver */
+	SAT_RUNNING, /* subsystem can communicate with the device */
+	SAT_DISCONNECTED, /* rpmsg link is down */
+	SAT_FATAL_DETECT, /* device is down as fatal error was detected early */
+	SAT_ERROR, /* device is down after error or graceful shutdown */
+	SAT_DISABLED, /* set if rpmsg link goes down after device is down */
+};
+
+#define MHI_SAT_ACTIVE(cntrl) (cntrl->state == SAT_RUNNING)
+#define MHI_SAT_FATAL_DETECT(cntrl) (cntrl->state == SAT_FATAL_DETECT)
+#define MHI_SAT_ALLOW_CONNECTION(cntrl) (cntrl->state == SAT_READY || \
+					 cntrl->state == SAT_DISCONNECTED)
+#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
+
 struct mhi_sat_cntrl {
 	struct list_head node;
 
@@ -250,6 +259,7 @@ struct mhi_sat_cntrl {
 
 	struct work_struct connect_work; /* subsystem connection worker */
 	struct work_struct process_work; /* incoming packets processor */
+	async_cookie_t error_cookie; /* synchronize device error handling */
 
 	/* mhi core/controller configurations */
 	u32 dev_id; /* unique device ID with BDF as per connection topology */
@@ -261,7 +271,8 @@ struct mhi_sat_cntrl {
 	int num_devices; /* mhi devices current count */
 	int max_devices; /* count of maximum devices for subsys/controller */
 	u16 seq; /* internal sequence number for all outgoing packets */
-	bool active; /* flag set if hello packet/MHI_CFG event was sent */
+	enum mhi_sat_state state; /* controller state manager */
+	spinlock_t state_lock; /* lock to change controller state */
 
 	/* command completion variables */
 	u16 last_cmd_seq; /* sequence number of last sent command packet */
@@ -285,9 +296,6 @@ struct mhi_sat_driver {
 
 	struct mhi_sat_subsys *subsys; /* pointer to subsystem array */
 	unsigned int num_subsys;
-
-	struct dentry *dentry; /* debugfs directory */
-	bool deferred_init_done; /* flag for deferred init protection */
 };
 
 static struct mhi_sat_driver mhi_sat_driver;
@@ -566,6 +574,83 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
 	}
 }
 
+/* send sys_err command to subsystem if device asserts or is powered off */
+static void mhi_sat_send_sys_err(struct mhi_sat_cntrl *sat_cntrl)
+{
+	struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+	struct sat_tre *pkt;
+	void *msg;
+	int ret;
+
+	/* flush all pending work */
+	flush_work(&sat_cntrl->connect_work);
+	flush_work(&sat_cntrl->process_work);
+
+	msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+
+	MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+	if (!msg)
+		return;
+
+	pkt = SAT_TRE_OFFSET(msg);
+	pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR;
+	pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0;
+	pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1;
+
+	mutex_lock(&sat_cntrl->cmd_wait_mutex);
+
+	ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD,
+			       SAT_RESERVED_SEQ_NUM, msg,
+			       SAT_MSG_SIZE(1));
+	kfree(msg);
+	if (ret) {
+		MHI_SAT_ERR("Failed to notify SYS_ERR cmd\n");
+		mutex_unlock(&sat_cntrl->cmd_wait_mutex);
+		return;
+	}
+
+	MHI_SAT_LOG("SYS_ERR command sent\n");
+
+	/* blocking call to wait for command completion event */
+	mhi_sat_wait_cmd_completion(sat_cntrl);
+
+	mutex_unlock(&sat_cntrl->cmd_wait_mutex);
+}
+
+static void mhi_sat_error_worker(void *data, async_cookie_t cookie)
+{
+	struct mhi_sat_cntrl *sat_cntrl = data;
+	struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+	struct sat_tre *pkt;
+	void *msg;
+	int ret;
+
+	MHI_SAT_LOG("Entered\n");
+
+	/* flush all pending work */
+	flush_work(&sat_cntrl->connect_work);
+	flush_work(&sat_cntrl->process_work);
+
+	msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+
+	MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+	if (!msg)
+		return;
+
+	pkt = SAT_TRE_OFFSET(msg);
+	pkt->ptr = MHI_TRE_EVT_MHI_STATE_PTR;
+	pkt->dword[0] = MHI_TRE_EVT_MHI_STATE_D0(MHI_STATE_SYS_ERR);
+	pkt->dword[1] = MHI_TRE_EVT_MHI_STATE_D1;
+
+	ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT,
+			       SAT_RESERVED_SEQ_NUM, msg,
+			       SAT_MSG_SIZE(1));
+	kfree(msg);
+
+	MHI_SAT_LOG("SYS_ERROR state change event send %s!\n", ret ? "failure" :
+		    "success");
+}
+
 static void mhi_sat_process_worker(struct work_struct *work)
 {
 	struct mhi_sat_cntrl *sat_cntrl = container_of(work,
@@ -588,6 +673,9 @@ static void mhi_sat_process_worker(struct work_struct *work)
 
 		list_del(&packet->node);
 
+		if (!MHI_SAT_ACTIVE(sat_cntrl))
+			goto process_next;
+
 		mhi_sat_process_cmds(sat_cntrl, hdr, pkt);
 
 		/* send response event(s) */
@@ -596,6 +684,7 @@ static void mhi_sat_process_worker(struct work_struct *work)
 				 SAT_MSG_SIZE(SAT_TRE_NUM_PKTS(
 					      hdr->payload_size)));
 
+process_next:
 		kfree(packet);
 	}
 
@@ -607,21 +696,26 @@ static void mhi_sat_connect_worker(struct work_struct *work)
 	struct mhi_sat_cntrl *sat_cntrl = container_of(work,
 					struct mhi_sat_cntrl, connect_work);
 	struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+	enum mhi_sat_state prev_state;
 	struct sat_tre *pkt;
 	void *msg;
 	int ret;
 
+	spin_lock_irq(&sat_cntrl->state_lock);
 	if (!subsys->rpdev || sat_cntrl->max_devices != sat_cntrl->num_devices
-	    || sat_cntrl->active)
+	    || !(MHI_SAT_ALLOW_CONNECTION(sat_cntrl))) {
+		spin_unlock_irq(&sat_cntrl->state_lock);
 		return;
+	}
+	prev_state = sat_cntrl->state;
+	sat_cntrl->state = SAT_RUNNING;
+	spin_unlock_irq(&sat_cntrl->state_lock);
 
 	MHI_SAT_LOG("Entered\n");
 
 	msg = kmalloc(SAT_MSG_SIZE(3), GFP_ATOMIC);
 	if (!msg)
-		return;
-
-	sat_cntrl->active = true;
+		goto error_connect_work;
 
 	pkt = SAT_TRE_OFFSET(msg);
 
@@ -648,11 +742,18 @@ static void mhi_sat_connect_worker(struct work_struct *work)
 	kfree(msg);
 	if (ret) {
 		MHI_SAT_ERR("Failed to send hello packet:%d\n", ret);
-		sat_cntrl->active = false;
-		return;
+		goto error_connect_work;
 	}
 
 	MHI_SAT_LOG("Device 0x%x sent hello packet\n", sat_cntrl->dev_id);
+
+	return;
+
+error_connect_work:
+	spin_lock_irq(&sat_cntrl->state_lock);
+	if (MHI_SAT_ACTIVE(sat_cntrl))
+		sat_cntrl->state = prev_state;
+	spin_unlock_irq(&sat_cntrl->state_lock);
 }
 
 static void mhi_sat_process_events(struct mhi_sat_cntrl *sat_cntrl,
@@ -697,7 +798,7 @@ static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 	}
 
 	/* Inactive controller cannot process incoming commands */
-	if (unlikely(!sat_cntrl->active)) {
+	if (unlikely(!MHI_SAT_ACTIVE(sat_cntrl))) {
 		MHI_SAT_ERR("Message for inactive controller!\n");
 		return 0;
 	}
@@ -732,10 +833,21 @@ static void mhi_sat_rpmsg_remove(struct rpmsg_device *rpdev)
 	/* unprepare each controller/device from transfer */
 	mutex_lock(&subsys->cntrl_mutex);
 	list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) {
-		if (!sat_cntrl->active)
-			continue;
+		async_synchronize_cookie(sat_cntrl->error_cookie + 1);
 
-		sat_cntrl->active = false;
+		spin_lock_irq(&sat_cntrl->state_lock);
+		/*
+		 * move to disabled state if early error fatal is detected
+		 * and rpmsg link goes down before device remove call from
+		 * mhi is received
+		 */
+		if (MHI_SAT_IN_ERROR_STATE(sat_cntrl)) {
+			sat_cntrl->state = SAT_DISABLED;
+			spin_unlock_irq(&sat_cntrl->state_lock);
+			continue;
+		}
+		sat_cntrl->state = SAT_DISCONNECTED;
+		spin_unlock_irq(&sat_cntrl->state_lock);
 
 		flush_work(&sat_cntrl->connect_work);
 		flush_work(&sat_cntrl->process_work);
@@ -781,6 +893,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 	if (!subsys)
 		return -EINVAL;
 
+	mutex_lock(&subsys->cntrl_mutex);
+
 	MHI_SUBSYS_LOG("Received RPMSG probe\n");
 
 	dev_set_drvdata(&rpdev->dev, subsys);
@@ -793,6 +907,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
 		schedule_work(&sat_cntrl->connect_work);
 	spin_unlock_irq(&subsys->cntrl_lock);
 
+	mutex_unlock(&subsys->cntrl_mutex);
+
 	return 0;
 }
 
@@ -814,6 +930,21 @@ static struct rpmsg_driver mhi_sat_rpmsg_driver = {
 static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev,
 				  enum MHI_CB mhi_cb)
 {
+	struct mhi_sat_device *sat_dev = mhi_device_get_devdata(mhi_dev);
+	struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl;
+	struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+	unsigned long flags;
+
+	if (mhi_cb != MHI_CB_FATAL_ERROR)
+		return;
+
+	MHI_SAT_LOG("Device fatal error detected\n");
+	spin_lock_irqsave(&sat_cntrl->state_lock, flags);
+	if (MHI_SAT_ACTIVE(sat_cntrl))
+		sat_cntrl->error_cookie = async_schedule(mhi_sat_error_worker,
+							 sat_cntrl);
+	sat_cntrl->state = SAT_FATAL_DETECT;
+	spin_unlock_irqrestore(&sat_cntrl->state_lock, flags);
 }
 
 static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
@@ -822,9 +953,7 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
 	struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl;
 	struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
 	struct mhi_buf *buf, *tmp;
-	struct sat_tre *pkt;
-	void *msg;
-	int ret;
+	bool send_sys_err = false;
 
 	/* remove device node from probed list */
 	mutex_lock(&sat_cntrl->list_mutex);
@@ -834,51 +963,32 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
 	sat_cntrl->num_devices--;
 
 	mutex_lock(&subsys->cntrl_mutex);
-	/* prepare SYS_ERR command if first device is being removed */
-	if (sat_cntrl->active) {
-		sat_cntrl->active = false;
 
-		/* flush all pending work */
-		flush_work(&sat_cntrl->connect_work);
-		flush_work(&sat_cntrl->process_work);
+	async_synchronize_cookie(sat_cntrl->error_cookie + 1);
 
-		msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+	/* send sys_err if first device is removed */
+	spin_lock_irq(&sat_cntrl->state_lock);
+	if (MHI_SAT_ACTIVE(sat_cntrl) || MHI_SAT_FATAL_DETECT(sat_cntrl))
+		send_sys_err = true;
+	sat_cntrl->state = SAT_ERROR;
+	spin_unlock_irq(&sat_cntrl->state_lock);
 
-		MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+	if (send_sys_err)
+		mhi_sat_send_sys_err(sat_cntrl);
 
-		pkt = SAT_TRE_OFFSET(msg);
-		pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR;
-		pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0;
-		pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1;
-
-		/* acquire cmd_wait_mutex before sending command */
-		mutex_lock(&sat_cntrl->cmd_wait_mutex);
-
-		ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD,
-				       SAT_RESERVED_SEQ_NUM, msg,
-				       SAT_MSG_SIZE(1));
-		kfree(msg);
-		if (ret) {
-			MHI_SAT_ERR("Failed to notify SYS_ERR\n");
-			mutex_unlock(&sat_cntrl->cmd_wait_mutex);
-			goto exit_sys_err_send;
-		}
-
-		MHI_SAT_LOG("SYS_ERR command sent\n");
-
-		/* blocking call to wait for command completion event */
-		mhi_sat_wait_cmd_completion(sat_cntrl);
-
-		mutex_unlock(&sat_cntrl->cmd_wait_mutex);
-	}
-
-exit_sys_err_send:
 	/* exit if some devices are still present */
 	if (sat_cntrl->num_devices) {
 		mutex_unlock(&subsys->cntrl_mutex);
 		return;
 	}
 
+	/*
+	 * cancel any pending work as it is possible that work gets queued
+	 * when rpmsg probe comes in before controller is removed
+	 */
+	cancel_work_sync(&sat_cntrl->connect_work);
+	cancel_work_sync(&sat_cntrl->process_work);
+
 	/* remove address mappings */
 	mutex_lock(&sat_cntrl->list_mutex);
 	list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) {
@@ -937,6 +1047,7 @@ static int mhi_sat_dev_probe(struct mhi_device *mhi_dev,
 		mutex_init(&sat_cntrl->list_mutex);
 		mutex_init(&sat_cntrl->cmd_wait_mutex);
 		spin_lock_init(&sat_cntrl->pkt_lock);
+		spin_lock_init(&sat_cntrl->state_lock);
 		INIT_WORK(&sat_cntrl->connect_work, mhi_sat_connect_worker);
 		INIT_WORK(&sat_cntrl->process_work, mhi_sat_process_worker);
 		INIT_LIST_HEAD(&sat_cntrl->dev_list);
@@ -1006,17 +1117,6 @@ static const struct mhi_device_id mhi_sat_dev_match_table[] = {
 	{ .chan = "ADSP_7", .driver_data = SUBSYS_ADSP },
 	{ .chan = "ADSP_8", .driver_data = SUBSYS_ADSP },
 	{ .chan = "ADSP_9", .driver_data = SUBSYS_ADSP },
-	/* CDSP */
-	{ .chan = "CDSP_0", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_1", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_2", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_3", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_4", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_5", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_6", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_7", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_8", .driver_data = SUBSYS_CDSP },
-	{ .chan = "CDSP_9", .driver_data = SUBSYS_CDSP },
 	/* SLPI */
 	{ .chan = "SLPI_0", .driver_data = SUBSYS_SLPI },
 	{ .chan = "SLPI_1", .driver_data = SUBSYS_SLPI },
@@ -1028,17 +1128,6 @@ static const struct mhi_device_id mhi_sat_dev_match_table[] = {
 	{ .chan = "SLPI_7", .driver_data = SUBSYS_SLPI },
 	{ .chan = "SLPI_8", .driver_data = SUBSYS_SLPI },
 	{ .chan = "SLPI_9", .driver_data = SUBSYS_SLPI },
-	/* MODEM */
-	{ .chan = "MODEM_0", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_1", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_2", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_3", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_4", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_5", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_6", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_7", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_8", .driver_data = SUBSYS_MODEM },
-	{ .chan = "MODEM_9", .driver_data = SUBSYS_MODEM },
 	{},
 };
 
@@ -1053,44 +1142,6 @@ static struct mhi_driver mhi_sat_dev_driver = {
 	},
 };
 
-int mhi_sat_trigger_init(void *data, u64 val)
-{
-	struct mhi_sat_subsys *subsys;
-	int i, ret;
-
-	if (mhi_sat_driver.deferred_init_done)
-		return -EIO;
-
-	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
-	if (ret)
-		goto error_sat_trigger_init;
-
-	ret = mhi_driver_register(&mhi_sat_dev_driver);
-	if (ret)
-		goto error_sat_trigger_register;
-
-	mhi_sat_driver.deferred_init_done = true;
-
-	return 0;
-
-error_sat_trigger_register:
-	unregister_rpmsg_driver(&mhi_sat_rpmsg_driver);
-
-error_sat_trigger_init:
-	subsys = mhi_sat_driver.subsys;
-	for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) {
-		ipc_log_context_destroy(subsys->ipc_log);
-		mutex_destroy(&subsys->cntrl_mutex);
-	}
-	kfree(mhi_sat_driver.subsys);
-	mhi_sat_driver.subsys = NULL;
-
-	return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL,
-			mhi_sat_trigger_init, "%llu\n");
-
 static int mhi_sat_init(void)
 {
 	struct mhi_sat_subsys *subsys;
@@ -1116,20 +1167,6 @@ static int mhi_sat_init(void)
 		subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0);
 	}
 
-	/* create debugfs entry if defer_init is enabled */
-	if (mhi_sat_defer_init) {
-		mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL);
-		if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) {
-			ret = -ENODEV;
-			goto error_sat_init;
-		}
-
-		debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL,
-				    &mhi_sat_debugfs_fops);
-
-		return 0;
-	}
-
 	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
 	if (ret)
 		goto error_sat_init;
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index 978c627..e31eaa4 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -12,6 +12,7 @@
 #include <linux/of_device.h>
 #include <linux/poll.h>
 #include <linux/slab.h>
+#include <linux/termios.h>
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/uaccess.h>
@@ -46,6 +47,7 @@ struct uci_dev {
 	size_t mtu;
 	int ref_count;
 	bool enabled;
+	u32 tiocm;
 	void *ipc_log;
 };
 
@@ -145,11 +147,20 @@ static long mhi_uci_ioctl(struct file *file,
 {
 	struct uci_dev *uci_dev = file->private_data;
 	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
 	long ret = -ERESTARTSYS;
 
 	mutex_lock(&uci_dev->mutex);
-	if (uci_dev->enabled)
+
+	if (cmd == TIOCMGET) {
+		spin_lock_bh(&uci_chan->lock);
+		ret = uci_dev->tiocm;
+		uci_dev->tiocm = 0;
+		spin_unlock_bh(&uci_chan->lock);
+	} else if (uci_dev->enabled) {
 		ret = mhi_ioctl(mhi_dev, cmd, arg);
+	}
+
 	mutex_unlock(&uci_dev->mutex);
 
 	return ret;
@@ -212,9 +223,16 @@ static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
 	spin_lock_bh(&uci_chan->lock);
 	if (!uci_dev->enabled) {
 		mask = POLLERR;
-	} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
-		MSG_VERB("Client can read from node\n");
-		mask |= POLLIN | POLLRDNORM;
+	} else {
+		if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
+			MSG_VERB("Client can read from node\n");
+			mask |= POLLIN | POLLRDNORM;
+		}
+
+		if (uci_dev->tiocm) {
+			MSG_VERB("Line status changed\n");
+			mask |= POLLPRI;
+		}
 	}
 	spin_unlock_bh(&uci_chan->lock);
 
@@ -646,6 +664,20 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
 	wake_up(&uci_chan->wq);
 }
 
+static void mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB reason)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	unsigned long flags;
+
+	if (reason == MHI_CB_DTR_SIGNAL) {
+		spin_lock_irqsave(&uci_chan->lock, flags);
+		uci_dev->tiocm = mhi_dev->tiocm;
+		spin_unlock_irqrestore(&uci_chan->lock, flags);
+		wake_up(&uci_chan->wq);
+	}
+}
+
 /* .driver_data stores max mtu */
 static const struct mhi_device_id mhi_uci_match_table[] = {
 	{ .chan = "LOOPBACK", .driver_data = 0x1000 },
@@ -664,6 +696,7 @@ static struct mhi_driver mhi_uci_driver = {
 	.probe = mhi_uci_probe,
 	.ul_xfer_cb = mhi_ul_xfer_cb,
 	.dl_xfer_cb = mhi_dl_xfer_cb,
+	.status_cb = mhi_status_cb,
 	.driver = {
 		.name = MHI_UCI_DRIVER_NAME,
 		.owner = THIS_MODULE,
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2819868..f8c47e9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -551,13 +551,21 @@
 	  subsystems crash.
 
 config MSM_ADSPRPC
-        tristate "QTI ADSP RPC driver"
-        depends on QCOM_GLINK
-        help
-          Provides a communication mechanism that allows clients to
-          make remote method invocations across processor boundary to
-          applications/compute DSP processor.
-		  Say M if you want to enable this module.
+	tristate "QTI FastRPC driver"
+	depends on QCOM_GLINK
+	help
+		Provides a communication mechanism that allows clients to
+		make remote method invocations across processor boundary to
+		applications/compute DSP processor.
+		Say M if you want to enable this module.
+
+config ADSPRPC_DEBUG
+	bool "Debug logs in FastRPC driver"
+	help
+		Enable debug logs in the fastrpc driver. Flag will be
+		disabled by default to maximize RPC performance as debug
+		logging will impact RPC overhead.
+		Say Y here if you want to enable the logs.
 
 config MSM_RDBG
 	tristate "QTI Remote debug driver"
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index a457a86..bc0de32 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -17,6 +17,7 @@
 #include <linux/msm_ion.h>
 #include <soc/qcom/secure_buffer.h>
 #include <linux/rpmsg.h>
+#include <linux/ipc_logging.h>
 #include <soc/qcom/subsystem_notif.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/service-notifier.h>
@@ -129,6 +130,15 @@
 			(int64_t *)(perf_ptr + offset)\
 				: (int64_t *)NULL) : (int64_t *)NULL)
 
+#define FASTRPC_GLINK_LOG_PAGES 8
+#define LOG_FASTRPC_GLINK_MSG(ctx, x, ...)	\
+	do {				\
+		if (ctx)		\
+			ipc_log_string(ctx, "%s (%d, %d): "x,	\
+				current->comm, current->tgid, current->pid, \
+				##__VA_ARGS__); \
+	} while (0)
+
 static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
 					unsigned long code,
 					void *data);
@@ -295,6 +305,7 @@ struct fastrpc_channel_ctx {
 	/* Indicates, if channel is restricted to secure node only */
 	int secure;
 	struct fastrpc_dsp_capabilities dsp_cap_kernel;
+	void *ipc_log_ctx;
 };
 
 struct fastrpc_apps {
@@ -796,7 +807,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 	struct fastrpc_session_ctx *sess;
 	struct fastrpc_apps *apps = fl->apps;
 	int cid = fl->cid;
-	struct fastrpc_channel_ctx *chan = &apps->channel[cid];
+	struct fastrpc_channel_ctx *chan = NULL;
 	struct fastrpc_mmap *map = NULL;
 	dma_addr_t region_phys = 0;
 	void *region_vaddr = NULL;
@@ -804,6 +815,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 	int err = 0, vmid, sgl_index = 0;
 	struct scatterlist *sgl = NULL;
 
+	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+	chan = &apps->channel[cid];
+
 	if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
 		return 0;
 	map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -1423,7 +1439,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
 
 static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 {
-	remote_arg64_t *rpra, *lrpra;
+	remote_arg64_t *rpra;
 	remote_arg_t *lpra = ctx->lpra;
 	struct smq_invoke_buf *list;
 	struct smq_phy_page *pages, *ipage;
@@ -1438,7 +1454,11 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	int mflags = 0;
 	uint64_t *fdlist;
 	uint32_t *crclist;
-	int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
+	uint32_t earlyHint;
+	int64_t *perf_counter = NULL;
+
+	if (ctx->fl->profile)
+		perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
 
 	/* calculate size of the metadata */
 	rpra = NULL;
@@ -1477,8 +1497,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		ipage += 1;
 	}
 	mutex_unlock(&ctx->fl->map_mutex);
+
+	/* metalen includes meta data, fds, crc and early wakeup hint */
 	metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
-				 (sizeof(uint32_t) * M_CRCLIST);
+			(sizeof(uint32_t) * M_CRCLIST) + sizeof(earlyHint);
 
 	/* allocate new local rpra buffer */
 	lrpralen = (size_t)&list[0];
@@ -1487,11 +1509,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		if (err)
 			goto bail;
 	}
-	if (ctx->lbuf->virt)
-		memset(ctx->lbuf->virt, 0, lrpralen);
-
-	lrpra = ctx->lbuf->virt;
-	ctx->lrpra = lrpra;
+	ctx->lrpra = ctx->lbuf->virt;
 
 	/* calculate len required for copying */
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
@@ -1541,13 +1559,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 	/* map ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
-	for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
+	for (i = 0; rpra && i < inbufs + outbufs; ++i) {
 		struct fastrpc_mmap *map = ctx->maps[i];
 		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
 		size_t len = lpra[i].buf.len;
 
-		rpra[i].buf.pv = lrpra[i].buf.pv = 0;
-		rpra[i].buf.len = lrpra[i].buf.len = len;
+		rpra[i].buf.pv = 0;
+		rpra[i].buf.len = len;
 		if (!len)
 			continue;
 		if (map) {
@@ -1575,7 +1593,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 			pages[idx].addr = map->phys + offset;
 			pages[idx].size = num << PAGE_SHIFT;
 		}
-		rpra[i].buf.pv = lrpra[i].buf.pv = buf;
+		rpra[i].buf.pv = buf;
 	}
 	PERF_END);
 	for (i = bufs; i < bufs + handles; ++i) {
@@ -1585,15 +1603,16 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		pages[i].size = map->size;
 	}
 	fdlist = (uint64_t *)&pages[bufs + handles];
-	for (i = 0; i < M_FDLIST; i++)
-		fdlist[i] = 0;
 	crclist = (uint32_t *)&fdlist[M_FDLIST];
-	memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
+	/* reset fds, crc and early wakeup hint memory */
+	/* remote process updates these values before responding */
+	memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST +
+			sizeof(uint32_t)*M_CRCLIST + sizeof(earlyHint));
 
 	/* copy non ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
 	rlen = copylen - metalen;
-	for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
+	for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
 		size_t mlen;
@@ -1612,7 +1631,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		VERIFY(err, rlen >= mlen);
 		if (err)
 			goto bail;
-		rpra[i].buf.pv = lrpra[i].buf.pv =
+		rpra[i].buf.pv =
 			 (args - ctx->overps[oix]->offset);
 		pages[list[i].pgidx].addr = ctx->buf->phys -
 					    ctx->overps[oix]->offset +
@@ -1645,7 +1664,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra && lrpra && rpra[i].buf.len &&
+		if (rpra && rpra[i].buf.len &&
 			ctx->overps[oix]->mstart) {
 			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
@@ -1659,13 +1678,15 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 		}
 	}
 	PERF_END);
-	for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
-		rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
-		rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
-		rpra[i].dma.offset = lrpra[i].dma.offset =
-			 (uint32_t)(uintptr_t)lpra[i].buf.pv;
+	for (i = bufs; rpra && i < bufs + handles; i++) {
+		rpra[i].dma.fd = ctx->fds[i];
+		rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
+		rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
 	}
 
+	/* Copy rpra to local buffer */
+	if (ctx->lrpra && rpra && lrpralen > 0)
+		memcpy(ctx->lrpra, rpra, lrpralen);
  bail:
 	return err;
 }
@@ -1755,13 +1776,14 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
 				uint64_to_ptr(rpra[i].buf.pv))) {
 			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
-					DMA_TO_DEVICE);
+					DMA_BIDIRECTIONAL);
 				dma_buf_end_cpu_access(map->buf,
-					DMA_TO_DEVICE);
-			} else
+					DMA_BIDIRECTIONAL);
+			} else {
 				dmac_flush_range(
 					uint64_to_ptr(rpra[i].buf.pv), (char *)
 					uint64_to_ptr(rpra[i].buf.pv + 1));
+			}
 		}
 
 		end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
@@ -1769,12 +1791,13 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
 		if (!IS_CACHE_ALIGNED(end)) {
 			if (map && map->buf) {
 				dma_buf_begin_cpu_access(map->buf,
-					DMA_TO_DEVICE);
+					DMA_BIDIRECTIONAL);
 				dma_buf_end_cpu_access(map->buf,
-					DMA_TO_DEVICE);
-			} else
+					DMA_BIDIRECTIONAL);
+			} else {
 				dmac_flush_range((char *)end,
 					(char *)end + 1);
+			}
 		}
 	}
 }
@@ -1853,6 +1876,10 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
 		goto bail;
 	}
 	err = rpmsg_send(channel_ctx->rpdev->ept, (void *)msg, sizeof(*msg));
+	LOG_FASTRPC_GLINK_MSG(channel_ctx->ipc_log_ctx,
+		"sent pkt %pK (sz %d): ctx 0x%llx, handle 0x%x, sc 0x%x (rpmsg err %d)",
+		(void *)msg, sizeof(*msg),
+		msg->invoke.header.ctx, handle, ctx->sc, err);
 	mutex_unlock(&channel_ctx->rpmsg_mutex);
  bail:
 	return err;
@@ -1942,11 +1969,13 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 			goto bail;
 	}
 
-	if (!fl->sctx->smmu.coherent) {
-		PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
-		inv_args_pre(ctx);
-		PERF_END);
-	}
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
+	inv_args_pre(ctx);
+	PERF_END);
+
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
+	inv_args(ctx);
+	PERF_END);
 
 	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
 	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
@@ -1965,8 +1994,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	}
 
 	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
-	if (!fl->sctx->smmu.coherent)
-		inv_args(ctx);
+	inv_args(ctx);
 	PERF_END);
 
 	VERIFY(err, 0 == (err = ctx->retval));
@@ -2945,10 +2973,9 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
 	return err;
 }
 
-static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+static inline int get_cid_from_rpdev(struct rpmsg_device *rpdev)
 {
-	int err = 0;
-	int cid = -1;
+	int err = 0, cid = -1;
 
 	VERIFY(err, !IS_ERR_OR_NULL(rpdev));
 	if (err)
@@ -2963,6 +2990,19 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
 	else if (!strcmp(rpdev->dev.parent->of_node->name, "mdsp"))
 		cid = MDSP_DOMAIN_ID;
 
+	return cid;
+}
+
+static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+	int err = 0;
+	int cid = -1;
+
+	VERIFY(err, !IS_ERR_OR_NULL(rpdev));
+	if (err)
+		return -EINVAL;
+
+	cid = get_cid_from_rpdev(rpdev);
 	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
@@ -2971,6 +3011,19 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
 	mutex_unlock(&gcinfo[cid].rpmsg_mutex);
 	pr_info("adsprpc: %s: opened rpmsg channel for %s\n",
 		__func__, gcinfo[cid].subsys);
+
+#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
+	if (!gcinfo[cid].ipc_log_ctx)
+		gcinfo[cid].ipc_log_ctx =
+			ipc_log_context_create(FASTRPC_GLINK_LOG_PAGES,
+				gcinfo[cid].name, 0);
+	if (!gcinfo[cid].ipc_log_ctx)
+		pr_warn("adsprpc: %s: failed to create IPC log context for %s\n",
+			__func__, gcinfo[cid].subsys);
+	else
+		pr_info("adsprpc: %s: enabled IPC logging for %s\n",
+			__func__, gcinfo[cid].subsys);
+#endif
 bail:
 	if (err)
 		pr_err("adsprpc: rpmsg probe of %s cid %d failed\n",
@@ -2988,15 +3041,7 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
 	if (err)
 		return;
 
-	if (!strcmp(rpdev->dev.parent->of_node->name, "cdsp"))
-		cid = CDSP_DOMAIN_ID;
-	else if (!strcmp(rpdev->dev.parent->of_node->name, "adsp"))
-		cid = ADSP_DOMAIN_ID;
-	else if (!strcmp(rpdev->dev.parent->of_node->name, "dsps"))
-		cid = SDSP_DOMAIN_ID;
-	else if (!strcmp(rpdev->dev.parent->of_node->name, "mdsp"))
-		cid = MDSP_DOMAIN_ID;
-
+	cid = get_cid_from_rpdev(rpdev);
 	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
@@ -3024,6 +3069,17 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 	if (err)
 		goto bail;
 
+#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
+	int cid = -1;
+
+	cid = get_cid_from_rpdev(rpdev);
+	if (cid >= 0 && cid < NUM_CHANNELS) {
+		LOG_FASTRPC_GLINK_MSG(gcinfo[cid].ipc_log_ctx,
+			"recvd pkt %pK (sz %d): ctx 0x%llx, retVal %d",
+			data, len, rsp->ctx, rsp->retval);
+	}
+#endif
+
 	index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
 	VERIFY(err, index < FASTRPC_CTX_MAX);
 	if (err)
@@ -3041,7 +3097,8 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 	context_notify_user(me->ctxtable[index], rsp->retval);
 bail:
 	if (err)
-		pr_err("adsprpc: invalid response or context (err %d)\n", err);
+		pr_err("adsprpc: ERROR: %s: invalid response (data %pK, len %d) from remote subsystem (err %d)\n",
+				__func__, data, len, err);
 	return err;
 }
 
@@ -4500,6 +4557,8 @@ static void __exit fastrpc_device_exit(void)
 	for (i = 0; i < NUM_CHANNELS; i++) {
 		if (!gcinfo[i].name)
 			continue;
+		if (me->channel[i].ipc_log_ctx)
+			ipc_log_context_destroy(me->channel[i].ipc_log_ctx);
 		subsys_notif_unregister_notifier(me->channel[i].handle,
 						&me->channel[i].nb);
 	}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5ba540c..2ae87eb 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2091,6 +2091,11 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
 	if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
 		return ret;
 
+	reg_entry.cmd_code = 0;
+	reg_entry.subsys_id = 0;
+	reg_entry.cmd_code_hi = 0;
+	reg_entry.cmd_code_lo = 0;
+
 	if (header_len >= (sizeof(uint8_t)))
 		reg_entry.cmd_code = header->cmd_code;
 	if (header_len >= (2 * sizeof(uint8_t)))
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index c45de725..6d84722 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -780,7 +780,7 @@ struct diagchar_dev {
 	int dci_tag;
 	int dci_client_id[MAX_DCI_CLIENTS];
 	struct mutex dci_mutex;
-	spinlock_t rpmsginfo_lock[NUM_PERIPHERALS];
+	struct mutex rpmsginfo_mutex[NUM_PERIPHERALS];
 	int num_dci_client;
 	unsigned char *apps_dci_buf;
 	int dci_state;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index e3e6d75..9d5417d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -4360,7 +4360,7 @@ static int __init diagchar_init(void)
 	mutex_init(&driver->hdlc_recovery_mutex);
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
 		mutex_init(&driver->diagfwd_channel_mutex[i]);
-		spin_lock_init(&driver->rpmsginfo_lock[i]);
+		mutex_init(&driver->rpmsginfo_mutex[i]);
 		driver->diag_id_sent[i] = 0;
 	}
 	init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c
index 6dda72a..c1262c1 100644
--- a/drivers/char/diag/diagfwd_rpmsg.c
+++ b/drivers/char/diag/diagfwd_rpmsg.c
@@ -391,17 +391,12 @@ static void diag_state_open_rpmsg(void *ctxt)
 static void diag_rpmsg_queue_read(void *ctxt)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!ctxt)
 		return;
 
 	rpmsg_info = (struct diag_rpmsg_info *)ctxt;
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
-	if (rpmsg_info->hdl && rpmsg_info->wq &&
-		atomic_read(&rpmsg_info->opened))
-		queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
 }
 
 static void diag_state_close_rpmsg(void *ctxt)
@@ -435,7 +430,6 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 	struct diag_rpmsg_info *rpmsg_info =  NULL;
 	struct diagfwd_info *fwd_info = NULL;
 	int ret_val = 0;
-	unsigned long flags;
 
 	if (!ctxt || !buf || buf_len <= 0)
 		return -EIO;
@@ -446,16 +440,15 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
 		return -EIO;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!atomic_read(&rpmsg_info->opened) ||
 		!rpmsg_info->hdl || !rpmsg_info->inited) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"diag:RPMSG channel not opened");
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return -EIO;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	fwd_info = rpmsg_info->fwd_ctxt;
 
@@ -479,25 +472,22 @@ static void diag_rpmsg_read_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							read_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	if (!atomic_read(&rpmsg_info->opened)) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
 	if (!rpmsg_info->inited) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		diag_ws_release();
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	diagfwd_channel_read(rpmsg_info->fwd_ctxt);
 }
@@ -507,7 +497,6 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 	struct diag_rpmsg_info *rpmsg_info = NULL;
 	int err = 0;
 	struct rpmsg_device *rpdev = NULL;
-	unsigned long flags;
 
 	if (!ctxt || !buf)
 		return -EIO;
@@ -519,16 +508,14 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl ||
 		!atomic_read(&rpmsg_info->opened)) {
 		pr_err_ratelimited("diag: In %s, rpmsg not inited, rpmsg_info: %pK, buf: %pK, len: %d\n",
 				 __func__, rpmsg_info, buf, len);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return -ENODEV;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
 
 	rpdev = (struct rpmsg_device *)rpmsg_info->hdl;
 	err = rpmsg_send(rpdev->ept, buf, len);
@@ -538,6 +525,7 @@ static int  diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
 	} else
 		err = -ENOMEM;
 
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	return err;
 
 }
@@ -547,18 +535,16 @@ static void diag_rpmsg_late_init_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							late_init_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->hdl) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "rpmsg late init p: %d t: %d\n",
@@ -571,18 +557,16 @@ static void diag_rpmsg_open_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							open_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 	if (rpmsg_info->type != TYPE_CNTL) {
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
@@ -597,19 +581,17 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work)
 	struct diag_rpmsg_info *rpmsg_info = container_of(work,
 							struct diag_rpmsg_info,
 							close_work);
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	if (!rpmsg_info->inited || !rpmsg_info->hdl) {
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		return;
 	}
 	rpmsg_info->hdl = NULL;
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	diagfwd_channel_close(rpmsg_info->fwd_ctxt);
 }
 
@@ -722,20 +704,18 @@ static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info)
 
 int diag_rpmsg_init_peripheral(uint8_t peripheral)
 {
-	unsigned long flags;
-
 	if (peripheral >= NUM_PERIPHERALS) {
 		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
 			peripheral);
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 	rpmsg_late_init(&rpmsg_data[peripheral]);
 	rpmsg_late_init(&rpmsg_dci[peripheral]);
 	rpmsg_late_init(&rpmsg_cmd[peripheral]);
 	rpmsg_late_init(&rpmsg_dci_cmd[peripheral]);
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 
 	return 0;
 }
@@ -743,7 +723,6 @@ int diag_rpmsg_init_peripheral(uint8_t peripheral)
 static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 {
 	char wq_name[DIAG_RPMSG_NAME_SZ + 12];
-	unsigned long flags;
 
 	if (!rpmsg_info)
 		return;
@@ -763,7 +742,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 	INIT_WORK(&(rpmsg_info->close_work), diag_rpmsg_close_work_fn);
 	INIT_WORK(&(rpmsg_info->read_work), diag_rpmsg_read_work_fn);
 	INIT_WORK(&(rpmsg_info->late_init_work), diag_rpmsg_late_init_work_fn);
-	spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 	rpmsg_info->hdl = NULL;
 	rpmsg_info->fwd_ctxt = NULL;
 	atomic_set(&rpmsg_info->opened, 0);
@@ -772,7 +751,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
 		"%s initialized fwd_ctxt: %pK hdl: %pK\n",
 		rpmsg_info->name, rpmsg_info->fwd_ctxt,
 		rpmsg_info->hdl);
-	spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+	mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 }
 
 void diag_rpmsg_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
@@ -790,7 +769,6 @@ int diag_rpmsg_init(void)
 {
 	uint8_t peripheral;
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
@@ -800,10 +778,9 @@ int diag_rpmsg_init(void)
 		diagfwd_cntl_register(TRANSPORT_RPMSG, rpmsg_info->peripheral,
 					(void *)rpmsg_info, &rpmsg_ops,
 					&(rpmsg_info->fwd_ctxt));
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		rpmsg_info->inited = 1;
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 		diagfwd_channel_open(rpmsg_info->fwd_ctxt);
 		diagfwd_late_open(rpmsg_info->fwd_ctxt);
 		__diag_rpmsg_init(&rpmsg_data[peripheral]);
@@ -836,31 +813,27 @@ static void __diag_rpmsg_exit(struct diag_rpmsg_info *rpmsg_info)
 void diag_rpmsg_early_exit(void)
 {
 	int peripheral = 0;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		if (peripheral != PERIPHERAL_WDSP)
 			continue;
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_cntl[peripheral]);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 	}
 }
 
 void diag_rpmsg_exit(void)
 {
 	int peripheral = 0;
-	unsigned long flags;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
-		spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_data[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_cmd[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci[peripheral]);
 		__diag_rpmsg_exit(&rpmsg_dci_cmd[peripheral]);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
 	}
 }
 
@@ -886,7 +859,6 @@ static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name)
 static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!rpdev)
 		return 0;
@@ -896,11 +868,10 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
 
-		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		rpmsg_info->hdl = rpdev;
 		atomic_set(&rpmsg_info->opened, 1);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 
 		dev_set_drvdata(&rpdev->dev, rpmsg_info);
 		diagfwd_channel_read(rpmsg_info->fwd_ctxt);
@@ -913,17 +884,15 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
 static void diag_rpmsg_remove(struct rpmsg_device *rpdev)
 {
 	struct diag_rpmsg_info *rpmsg_info = NULL;
-	unsigned long flags;
 
 	if (!rpdev)
 		return;
 
 	rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
 	if (rpmsg_info) {
-		spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+		mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		atomic_set(&rpmsg_info->opened, 0);
-		spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
-			flags);
+		mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
 		queue_work(rpmsg_info->wq, &rpmsg_info->close_work);
 	}
 }
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 27c1f64..23e8e3d 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -652,13 +652,10 @@ static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len)
 				 info->name);
 
 			mutex_lock(&driver->diag_notifier_mutex);
-			if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) {
+			if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP)
 				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-				"diag: %s is up, stopping cleanup: bootup_req = %d\n",
+				"diag: %s is up, bootup_req = %d\n",
 				info->name, (int)bootup_req[info->peripheral]);
-				mutex_unlock(&driver->diag_notifier_mutex);
-				break;
-			}
 			mutex_unlock(&driver->diag_notifier_mutex);
 			socket_close_channel(info);
 		}
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 3348136..1131524 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
 			continue;
 
 		div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+		if (div > GENERATED_MAX_DIV + 1)
+			div = GENERATED_MAX_DIV + 1;
 
 		clk_generated_best_diff(req, parent, parent_rate, div,
 					&best_diff, &best_rate);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index a72c058..bd73664 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -407,3 +407,29 @@
 	  Support for the debug clock controller on Qualcomm Technologies, Inc
 	  LITO devices.
 	  Say Y if you want to support the clock measurement functionality.
+
+config SM_GCC_BENGAL
+	tristate "BENGAL Global Clock Controller"
+	select QCOM_GDSC
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the global clock controller on Bengal devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI,
+	  I2C, USB, UFS, SDCC, PCIe, Camera, Video etc.
+
+config SM_GPUCC_BENGAL
+	tristate "BENGAL Graphics Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the graphics clock controller on Qualcomm Technologies, Inc
+	  BENGAL devices.
+	  Say Y if you want to support graphics controller devices.
+
+config SM_DISPCC_BENGAL
+	tristate "BENGAL Display Clock Controller"
+	select SM_GCC_BENGAL
+	help
+	  Support for the display clock controller on Qualcomm Technologies, Inc.
+	  BENGAL devices.
+	  Say Y if you want to support display devices and functionality such as
+	  splash screen.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 40dca85..3e58b7b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -54,9 +54,12 @@
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
 obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o
+obj-$(CONFIG_SM_DISPCC_BENGAL) += dispcc-bengal.o
 obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
+obj-$(CONFIG_SM_GCC_BENGAL) += gcc-bengal.o
 obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
+obj-$(CONFIG_SM_GPUCC_BENGAL) += gpucc-bengal.o
+obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SM_NPUCC_LITO) += npucc-lito.o
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
-obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index 95637b2..f15be8e 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -328,7 +328,6 @@ static const struct alpha_pll_config cam_cc_pll2_config_sm8250_v2 = {
 	.config_ctl_val = 0x08200920,
 	.config_ctl_hi_val = 0x05008011,
 	.config_ctl_hi1_val = 0x00000000,
-	.test_ctl_val = 0x00010000,
 	.user_ctl_val = 0x00000100,
 	.user_ctl_hi_val = 0x00000000,
 	.user_ctl_hi1_val = 0x00000000,
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
new file mode 100644
index 0000000..4f48c64
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_DISP_CC_PLL0_OUT_MAIN,
+	P_DSI0_PHY_PLL_OUT_BYTECLK,
+	P_DSI0_PHY_PLL_OUT_DSICLK,
+	P_DSI1_PHY_PLL_OUT_DSICLK,
+	P_GPLL0_OUT_MAIN,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_byteclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"disp_cc_pll0_out_main",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_dsiclk",
+	"dsi1_phy_pll_out_dsiclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_5[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco spark_vco[] = {
+	{ 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+	.l = 0x28,
+	.alpha = 0x0,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = spark_vco,
+	.num_vco = ARRAY_SIZE(spark_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_main = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_disp_cc_pll0_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_pll0_out_main",
+		.parent_names = (const char *[]){ "disp_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+	.reg = 0x20d4,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_byte0_div_clk_src",
+		.parent_names =
+			(const char *[]){ "disp_cc_mdss_byte0_clk_src" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+	.cmd_rcgr = 0x2154,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_ahb_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOW] = 37500000,
+			[VDD_NOMINAL] = 75000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+	.cmd_rcgr = 0x20bc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 164000000,
+			[VDD_LOW] = 187500000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+	.cmd_rcgr = 0x20d8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+	.cmd_rcgr = 0x2074,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_mdp_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 192000000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 307200000,
+			[VDD_NOMINAL] = 384000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+	.cmd_rcgr = 0x205c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk0_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_pixel_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 183310056,
+			[VDD_LOW] = 250000000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+	.cmd_rcgr = 0x208c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_rot_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000,
+			[VDD_LOWER] = 192000000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 307200000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+	.cmd_rcgr = 0x20a4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_vsync_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+	F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+	.cmd_rcgr = 0x6050,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_5,
+	.freq_tbl = ftbl_disp_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_sleep_clk_src",
+		.parent_names = disp_cc_parent_names_5,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 32000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+	.cmd_rcgr = 0x6034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_xo_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 19200000},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+	.halt_reg = 0x2024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+	.halt_reg = 0x2028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+	.halt_reg = 0x202c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x202c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+	.halt_reg = 0x2008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+	.halt_reg = 0x2018,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x2018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_lut_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+	.halt_reg = 0x4004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x4004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_non_gdsc_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+	.halt_reg = 0x2004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+	.halt_reg = 0x2010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rot_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_rot_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+	.halt_reg = 0x2020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+	.halt_reg = 0x6068,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+	.halt_reg = 0x604c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x604c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_xo_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *disp_cc_bengal_clocks[] = {
+	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+	[DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+	[DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+	[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+	[DISP_CC_PLL0_OUT_MAIN] = &disp_cc_pll0_out_main.clkr,
+	[DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+	[DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+	[DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+	[DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_bengal_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x10000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_bengal_desc = {
+	.config = &disp_cc_bengal_regmap_config,
+	.clks = disp_cc_bengal_clocks,
+	.num_clks = ARRAY_SIZE(disp_cc_bengal_clocks),
+};
+
+static const struct of_device_id dispcc_bengal_match_table[] = {
+	{ .compatible = "qcom,bengal-dispcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_bengal_match_table);
+
+static int dispcc_bengal_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+					"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &disp_cc_bengal_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the disp_cc registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
+	clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &disp_cc_bengal_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Display CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver dispcc_bengal_driver = {
+	.probe = dispcc_bengal_probe,
+	.driver = {
+		.name = "bengal-dispcc",
+		.of_match_table = dispcc_bengal_match_table,
+	},
+};
+
+static int __init disp_cc_bengal_init(void)
+{
+	return platform_driver_register(&dispcc_bengal_driver);
+}
+subsys_initcall(disp_cc_bengal_init);
+
+static void __exit disp_cc_bengal_exit(void)
+{
+	platform_driver_unregister(&dispcc_bengal_driver);
+}
+module_exit(disp_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC bengal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
new file mode 100644
index 0000000..b2cad01
--- /dev/null
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -0,0 +1,3897 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level-bengal.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_AUX2,
+	P_GPLL0_OUT_EARLY,
+	P_GPLL10_OUT_MAIN,
+	P_GPLL11_OUT_AUX,
+	P_GPLL11_OUT_AUX2,
+	P_GPLL11_OUT_MAIN,
+	P_GPLL3_OUT_EARLY,
+	P_GPLL3_OUT_MAIN,
+	P_GPLL4_OUT_MAIN,
+	P_GPLL6_OUT_EARLY,
+	P_GPLL6_OUT_MAIN,
+	P_GPLL7_OUT_MAIN,
+	P_GPLL8_OUT_EARLY,
+	P_GPLL8_OUT_MAIN,
+	P_GPLL9_OUT_EARLY,
+	P_GPLL9_OUT_MAIN,
+	P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"core_bi_pll_test_se",
+};
+static const char * const gcc_parent_names_0_ao[] = {
+	"bi_tcxo_ao",
+	"gpll0",
+	"gpll0_out_aux2",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_GPLL6_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"gpll6_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_SLEEP_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL9_OUT_EARLY, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_MAIN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll9",
+	"gpll10_out_main",
+	"gpll9_out_main",
+	"gpll3_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_GPLL4_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_MAIN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"gpll4_out_main",
+	"gpll3_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL8_OUT_EARLY, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL8_OUT_MAIN, 4 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_MAIN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll8",
+	"gpll10_out_main",
+	"gpll8_out_main",
+	"gpll9_out_main",
+	"gpll3_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL8_OUT_EARLY, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL6_OUT_MAIN, 4 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_EARLY, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll8",
+	"gpll10_out_main",
+	"gpll6_out_main",
+	"gpll9_out_main",
+	"gpll3",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL4_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_EARLY, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"gpll10_out_main",
+	"gpll4_out_main",
+	"gpll3",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL8_OUT_EARLY, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL8_OUT_MAIN, 4 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_EARLY, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_8[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll8",
+	"gpll10_out_main",
+	"gpll8_out_main",
+	"gpll9_out_main",
+	"gpll3",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL8_OUT_MAIN, 4 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_EARLY, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_9[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"gpll10_out_main",
+	"gpll8_out_main",
+	"gpll9_out_main",
+	"gpll3",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL8_OUT_EARLY, 2 },
+	{ P_GPLL10_OUT_MAIN, 3 },
+	{ P_GPLL6_OUT_EARLY, 4 },
+	{ P_GPLL9_OUT_MAIN, 5 },
+	{ P_GPLL3_OUT_MAIN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll8",
+	"gpll10_out_main",
+	"gpll6",
+	"gpll9_out_main",
+	"gpll3_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL0_OUT_AUX2, 2 },
+	{ P_GPLL7_OUT_MAIN, 3 },
+	{ P_GPLL4_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_11[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_aux2",
+	"gpll7_out_main",
+	"gpll4_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_SLEEP_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_12[] = {
+	"bi_tcxo",
+	"sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL11_OUT_MAIN, 1 },
+	{ P_GPLL11_OUT_AUX, 2 },
+	{ P_GPLL11_OUT_AUX2, 3 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_13[] = {
+	"bi_tcxo",
+	"gpll11_out_main",
+	"gpll11_out_aux",
+	"gpll11_out_aux2",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_14[] = {
+	"bi_tcxo",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EARLY, 1 },
+	{ P_GPLL6_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_15[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll6_out_main",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco brammo_vco[] = {
+	{ 500000000, 1250000000, 0 },
+};
+
+static struct pll_vco default_vco[] = {
+	{ 1000000000, 2000000000, 0 },
+	{ 750000000, 1500000000, 1 },
+	{ 500000000, 1000000000, 2 },
+	{ 250000000, 500000000, 3 },
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+	[CLK_ALPHA_PLL_TYPE_DEFAULT] =  {
+		[PLL_OFF_L_VAL] = 0x04,
+		[PLL_OFF_ALPHA_VAL] = 0x08,
+		[PLL_OFF_ALPHA_VAL_U] = 0x0c,
+		[PLL_OFF_TEST_CTL] = 0x10,
+		[PLL_OFF_TEST_CTL_U] = 0x14,
+		[PLL_OFF_USER_CTL] = 0x18,
+		[PLL_OFF_USER_CTL_U] = 0x1C,
+		[PLL_OFF_CONFIG_CTL] = 0x20,
+		[PLL_OFF_STATUS] = 0x24,
+	},
+	[CLK_ALPHA_PLL_TYPE_BRAMMO] =  {
+		[PLL_OFF_L_VAL] = 0x04,
+		[PLL_OFF_ALPHA_VAL] = 0x08,
+		[PLL_OFF_ALPHA_VAL_U] = 0x0c,
+		[PLL_OFF_TEST_CTL] = 0x10,
+		[PLL_OFF_TEST_CTL_U] = 0x14,
+		[PLL_OFF_USER_CTL] = 0x18,
+		[PLL_OFF_CONFIG_CTL] = 0x1C,
+		[PLL_OFF_STATUS] = 0x20,
+	},
+};
+
+static struct clk_alpha_pll gpll0 = {
+	.offset = 0x0,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_aux2[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll0_out_aux2,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_aux2",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_main = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll0_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_main",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll10_config = {
+	.l = 0x3c,
+	.vco_val = 0x1 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll10 = {
+	.offset = 0xa000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll10",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll10_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll10_out_main = {
+	.offset = 0xa000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll10_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll10_out_main",
+		.parent_names = (const char *[]){ "gpll10" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config gpll11_config = {
+	.l = 0x1F,
+	.alpha = 0x0,
+	.alpha_hi = 0x40,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll11 = {
+	.offset = 0xb000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll11",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll11_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll11_out_main = {
+	.offset = 0xb000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll11_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll11_out_main",
+		.parent_names = (const char *[]){ "gpll11" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll3 = {
+	.offset = 0x3000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(3),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_alpha_pll gpll4 = {
+	.offset = 0x4000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll4",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll4_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_main = {
+	.offset = 0x4000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll4_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll4_out_main",
+		.parent_names = (const char *[]){ "gpll4" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll6 = {
+	.offset = 0x6000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll6",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+	.offset = 0x6000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll6_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll6_out_main",
+		.parent_names = (const char *[]){ "gpll6" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll7 = {
+	.offset = 0x7000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll7",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll7_out_main[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll7_out_main = {
+	.offset = 0x7000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll7_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll7_out_main",
+		.parent_names = (const char *[]){ "gpll7" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+/* 800MHz configuration */
+static const struct alpha_pll_config gpll8_config = {
+	.l = 0x29,
+	.alpha = 0xAA000000,
+	.alpha_hi = 0xAA,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.early_output_mask = BIT(3),
+	.post_div_val = 0x1 << 8,
+	.post_div_mask = GENMASK(11, 8),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll8 = {
+	.offset = 0x8000,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll8",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+	.offset = 0x8000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll8_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+	.width = 4,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll8_out_main",
+		.parent_names = (const char *[]){ "gpll8" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+	},
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll9_config = {
+	.l = 0x3C,
+	.alpha = 0x0,
+	.post_div_val = 0x1 << 8,
+	.post_div_mask = GENMASK(9, 8),
+	.main_output_mask = BIT(0),
+	.config_ctl_val = 0x000040C9,
+};
+
+static struct clk_alpha_pll gpll9 = {
+	.offset = 0x9000,
+	.vco_table = brammo_vco,
+	.num_vco = ARRAY_SIZE(brammo_vco),
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+	.clkr = {
+		.enable_reg = 0x79000,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll9",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_LOWER] = 1250000000,
+				[VDD_LOW] = 1250000000,
+				[VDD_NOMINAL] = 1250000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+	.offset = 0x9000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll9_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+	.width = 2,
+	.regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll9_out_main",
+		.parent_names = (const char *[]){ "gpll9" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+	.reg = 0x1a04c,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+		.parent_names =
+			(const char *[]){ "gcc_usb30_prim_mock_utmi_clk_src" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+	F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+	F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+	.cmd_rcgr = 0x5802c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_7,
+	.freq_tbl = ftbl_gcc_camss_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_axi_clk_src",
+		.parent_names = gcc_parent_names_7,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 150000000,
+			[VDD_LOW_L1] = 200000000,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_cci_clk_src = {
+	.cmd_rcgr = 0x56000,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_9,
+	.freq_tbl = ftbl_gcc_camss_cci_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_cci_clk_src",
+		.parent_names = gcc_parent_names_9,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 37500000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+	F(268800000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x59000,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_csi0phytimer_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 200000000,
+			[VDD_NOMINAL] = 268800000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x5901c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_csi1phytimer_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 200000000,
+			[VDD_NOMINAL] = 268800000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x59038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_csi2phytimer_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 200000000,
+			[VDD_NOMINAL] = 268800000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+	F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
+	F(64000000, P_GPLL9_OUT_MAIN, 1, 1, 9),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+	.cmd_rcgr = 0x51000,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_mclk0_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 64000000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+	.cmd_rcgr = 0x5101c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_mclk1_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 64000000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+	.cmd_rcgr = 0x51038,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_mclk2_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 64000000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+	.cmd_rcgr = 0x51054,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_mclk3_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 64000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(171428571, P_GPLL0_OUT_EARLY, 3.5, 0, 0),
+	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+	.cmd_rcgr = 0x55024,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_8,
+	.freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_ope_ahb_clk_src",
+		.parent_names = gcc_parent_names_8,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 171428571,
+			[VDD_NOMINAL] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+	F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+	F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+	F(580000000, P_GPLL8_OUT_EARLY, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+	.cmd_rcgr = 0x55004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_8,
+	.freq_tbl = ftbl_gcc_camss_ope_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_ope_clk_src",
+		.parent_names = gcc_parent_names_8,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 266600000,
+			[VDD_NOMINAL] = 465000000,
+			[VDD_HIGH] = 580000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+	F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
+	F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+	.cmd_rcgr = 0x52004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_0_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 460800000,
+			[VDD_NOMINAL] = 576000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+	F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+	F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+	.cmd_rcgr = 0x52094,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_6,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_0_csid_clk_src",
+		.parent_names = gcc_parent_names_6,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 384000000,
+			[VDD_HIGH] = 426400000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+	.cmd_rcgr = 0x52024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_1_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 460800000,
+			[VDD_NOMINAL] = 576000000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+	.cmd_rcgr = 0x520b4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_6,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_1_csid_clk_src",
+		.parent_names = gcc_parent_names_6,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 384000000,
+			[VDD_HIGH] = 426400000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+	.cmd_rcgr = 0x52044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_2_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 256000000,
+			[VDD_LOW_L1] = 460800000,
+			[VDD_NOMINAL] = 576000000},
+	},
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+	.cmd_rcgr = 0x520d4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_6,
+	.freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_2_csid_clk_src",
+		.parent_names = gcc_parent_names_6,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 384000000,
+			[VDD_HIGH] = 426400000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+	F(341333333, P_GPLL6_OUT_EARLY, 1, 4, 9),
+	F(384000000, P_GPLL6_OUT_EARLY, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+	.cmd_rcgr = 0x52064,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_10,
+	.freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_tfe_cphy_rx_clk_src",
+		.parent_names = gcc_parent_names_10,
+		.num_parents = 8,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 341333333,
+			[VDD_HIGH] = 384000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0),
+	F(80000000, P_GPLL0_OUT_EARLY, 7.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+	.cmd_rcgr = 0x58010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_7,
+	.freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_camss_top_ahb_clk_src",
+		.parent_names = gcc_parent_names_7,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 80000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+	F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+	.cmd_rcgr = 0x4d004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_2,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp1_clk_src",
+		.parent_names = gcc_parent_names_2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+	.cmd_rcgr = 0x4e004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_2,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp2_clk_src",
+		.parent_names = gcc_parent_names_2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+	.cmd_rcgr = 0x4f004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_2,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp3_clk_src",
+		.parent_names = gcc_parent_names_2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_GPLL0_OUT_AUX2, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+	.cmd_rcgr = 0x20010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_pdm2_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_pdm2_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 60000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+	F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625),
+	F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625),
+	F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75),
+	F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25),
+	F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75),
+	F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+	F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15),
+	F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375),
+	F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75),
+	F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625),
+	F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0),
+	F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s0_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+	.cmd_rcgr = 0x1f148,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s1_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+	.cmd_rcgr = 0x1f278,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s2_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+	.cmd_rcgr = 0x1f3a8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s3_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+	.cmd_rcgr = 0x1f4d8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s4_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+	.cmd_rcgr = 0x1f608,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s5_clk_src",
+	.parent_names = gcc_parent_names_1,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+	.cmd_rcgr = 0x1f738,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+	F(144000, P_BI_TCXO, 16, 3, 25),
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3),
+	F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2),
+	F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+	F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+	.cmd_rcgr = 0x38028,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_apps_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.flags = CLK_OPS_PARENT_ENABLE,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 384000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+	F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+	F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+	F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+	.cmd_rcgr = 0x38010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_ice_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 150000000,
+			[VDD_LOW_L1] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+	.cmd_rcgr = 0x1e00c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_11,
+	.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc2_apps_clk_src",
+		.parent_names = gcc_parent_names_11,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+		.flags = CLK_OPS_PARENT_ENABLE,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 202000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+	F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+	F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+	.cmd_rcgr = 0x45020,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_axi_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+	F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+	F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+	.cmd_rcgr = 0x45048,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_ice_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 75000000,
+			[VDD_LOW] = 150000000,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+	F(9600000, P_BI_TCXO, 2, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+	.cmd_rcgr = 0x4507c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+	F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+	.cmd_rcgr = 0x45060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_unipro_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000,
+			[VDD_LOW] = 75000000,
+			[VDD_NOMINAL] = 150000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+	F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0),
+	F(133333333, P_GPLL0_OUT_EARLY, 4.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+	.cmd_rcgr = 0x1a01c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_master_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 66666667,
+			[VDD_LOW] = 133333333,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x1a034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+	.cmd_rcgr = 0x1a060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_12,
+	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb3_prim_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_12,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+	F(133000000, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+	F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
+	F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+	F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+	.cmd_rcgr = 0x58060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_13,
+	.freq_tbl = ftbl_gcc_video_venus_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_video_venus_clk_src",
+		.parent_names = gcc_parent_names_13,
+		.num_parents = 5,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 133000000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 300000000,
+			[VDD_NOMINAL] = 384000000},
+	},
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+	.halt_reg = 0x1d004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1d004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1d004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ahb2phy_csi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+	.halt_reg = 0x1d008,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1d008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1d008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ahb2phy_usb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+	.halt_reg = 0x71154,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x71154,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x71154,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_bimc_gpu_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+	.halt_reg = 0x23004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x23004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_boot_rom_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+	.halt_reg = 0x17070,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17070,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(27),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cam_throttle_nrt_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+	.halt_reg = 0x1706c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x1706c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(26),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cam_throttle_rt_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+	.halt_reg = 0x17008,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x17008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+	.halt_reg = 0x17028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x17028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+	.halt_reg = 0x58044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x58044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_camnoc_atb_clk = {
+	.halt_reg = 0x5804c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x5804c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x5804c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_camnoc_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_camnoc_nts_xo_clk = {
+	.halt_reg = 0x58050,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x58050,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x58050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_camnoc_nts_xo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+	.halt_reg = 0x56018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x56018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_cci_0_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_cci_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+	.halt_reg = 0x52088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x52088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_cphy_0_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+	.halt_reg = 0x5208c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5208c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_cphy_1_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_cphy_2_clk = {
+	.halt_reg = 0x52090,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x52090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_cphy_2_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+	.halt_reg = 0x59018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x59018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+	.halt_reg = 0x59034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x59034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+	.halt_reg = 0x59050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x59050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+	.halt_reg = 0x51018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x51018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_mclk0_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+	.halt_reg = 0x51034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x51034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_mclk1_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+	.halt_reg = 0x51050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x51050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_mclk2_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+	.halt_reg = 0x5106c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5106c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_mclk3_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+	.halt_reg = 0x58054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x58054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_nrt_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+	.halt_reg = 0x5503c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5503c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_ope_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_ope_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+	.halt_reg = 0x5501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_ope_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_ope_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+	.halt_reg = 0x5805c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5805c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_rt_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+	.halt_reg = 0x5201c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5201c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_0_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+	.halt_reg = 0x5207c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5207c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+	.halt_reg = 0x520ac,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x520ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_0_csid_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+	.halt_reg = 0x5203c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5203c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_1_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+	.halt_reg = 0x52080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x52080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+	.halt_reg = 0x520cc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x520cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_1_csid_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_2_clk = {
+	.halt_reg = 0x5205c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5205c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_2_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
+	.halt_reg = 0x52084,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x52084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_2_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_tfe_2_csid_clk = {
+	.halt_reg = 0x520ec,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x520ec,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_tfe_2_csid_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_tfe_2_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+	.halt_reg = 0x58028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x58028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camss_top_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_camss_top_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+	.halt_reg = 0x1a084,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1a084,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1a084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cfg_noc_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+	.halt_reg = 0x2b000,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b000,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(21),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+	.halt_reg = 0x2b004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(22),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_gnoc_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_throttle_core_clk = {
+	.halt_reg = 0x2b180,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b180,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(30),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_throttle_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_throttle_xo_clk = {
+	.halt_reg = 0x2b17c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2b17c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_throttle_xo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+	.halt_reg = 0x1700c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1700c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1700c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(20),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_gpll0_div_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+	.halt_reg = 0x17020,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17020,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_hf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+	.halt_reg = 0x17064,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17064,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(5),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_throttle_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+	.halt_reg = 0x1702c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1702c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp1_clk = {
+	.halt_reg = 0x4d000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4d000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp1_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp2_clk = {
+	.halt_reg = 0x4e000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4e000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp2_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp3_clk = {
+	.halt_reg = 0x4f000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4f000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp3_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+	.halt_reg = 0x36004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x36004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x36004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(16),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_div_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0_out_aux2",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+	.halt_reg = 0x36100,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x36100,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_iref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+	.halt_reg = 0x3600c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3600c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3600c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_memnoc_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+	.halt_reg = 0x36018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x36018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_snoc_dvm_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+	.halt_reg = 0x36048,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x36048,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(31),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_throttle_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_throttle_xo_clk = {
+	.halt_reg = 0x36044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x36044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_throttle_xo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+	.halt_reg = 0x2000c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2000c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm2_clk",
+			.parent_names = (const char *[]){
+				"gcc_pdm2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+	.halt_reg = 0x20004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x20004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x20004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+	.halt_reg = 0x20008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x20008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_xo4_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+	.halt_reg = 0x21004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x21004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_prng_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+	.halt_reg = 0x17014,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_camera_nrt_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+	.halt_reg = 0x17060,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17060,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_camera_rt_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_cpuss_cfg_ahb_clk = {
+	.halt_reg = 0x2b178,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b178,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(18),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_cpuss_cfg_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+	.halt_reg = 0x17018,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17018,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_disp_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+	.halt_reg = 0x36040,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x36040,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_gpu_cfg_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+	.halt_reg = 0x17010,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(25),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_video_vcodec_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+	.halt_reg = 0x1f014,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_2x_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+	.halt_reg = 0x1f00c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+	.halt_reg = 0x1f144,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s0_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+	.halt_reg = 0x1f274,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s1_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+	.halt_reg = 0x1f3a4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s2_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+	.halt_reg = 0x1f4d4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s3_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+	.halt_reg = 0x1f604,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(14),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s4_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+	.halt_reg = 0x1f734,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s5_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s5_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+	.halt_reg = 0x1f004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x1f004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_m_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+	.halt_reg = 0x1f008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x1f008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7900c,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_s_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+	.halt_reg = 0x38008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x38008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+	.halt_reg = 0x38004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x38004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_ENABLE_HAND_OFF,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+	.halt_reg = 0x3800c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3800c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3800c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+	.halt_reg = 0x1e008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1e008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+	.halt_reg = 0x1e004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1e004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+	.halt_reg = 0x2b06c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b06c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_cpuss_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
+	.halt_reg = 0x45098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x45098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+	.halt_reg = 0x1a080,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1a080,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1a080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+	.halt_reg = 0x45014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+	.halt_reg = 0x45010,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+	.halt_reg = 0x45044,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45044,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+	.halt_reg = 0x45078,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45078,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+	.halt_reg = 0x4501c,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x4501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_rx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+	.halt_reg = 0x45018,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x45018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_tx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+	.halt_reg = 0x45040,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45040,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+	.halt_reg = 0x1a010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_master_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+	.halt_reg = 0x1a018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+	.halt_reg = 0x1a014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+	.halt_reg = 0x9f000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9f000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+	.halt_reg = 0x1a054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_com_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+	.halt_reg = 0x1a058,
+	.halt_check = BRANCH_HALT_SKIP,
+	.hwcg_reg = 0x1a058,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1a058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_pipe_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+	.halt_reg = 0x6e008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6e008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_vcodec0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+	.halt_reg = 0x6e010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6e010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_venus_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+	.halt_reg = 0x6e004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6e004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_venus_ctl_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+	.halt_reg = 0x17004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+	.halt_reg = 0x1701c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1701c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1701c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_axi0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+	.halt_reg = 0x17068,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17068,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x79004,
+		.enable_mask = BIT(28),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_throttle_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+	.halt_reg = 0x580a4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x580a4,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x580a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_vcodec0_sys_clk",
+			.parent_names = (const char *[]){
+				"gcc_video_venus_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+	.halt_reg = 0x5808c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5808c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_venus_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_video_venus_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+	.halt_reg = 0x17024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x17024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_xo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gcc_bengal_clocks[] = {
+	[GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+	[GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+	[GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+	[GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+	[GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+	[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+	[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+	[GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+	[GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+	[GCC_CAMSS_CAMNOC_ATB_CLK] = &gcc_camss_camnoc_atb_clk.clkr,
+	[GCC_CAMSS_CAMNOC_NTS_XO_CLK] = &gcc_camss_camnoc_nts_xo_clk.clkr,
+	[GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+	[GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr,
+	[GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+	[GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+	[GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr,
+	[GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+	[GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+	[GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+	[GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+	[GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+	[GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr,
+	[GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+	[GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+	[GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+	[GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+	[GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+	[GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+	[GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+	[GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+	[GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+	[GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+	[GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+	[GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+	[GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+	[GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+	[GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+	[GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+	[GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+	[GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+	[GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+	[GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+	[GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+	[GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+	[GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+	[GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+	[GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr,
+	[GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr,
+	[GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr,
+	[GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr,
+	[GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr,
+	[GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+	[GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+	[GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+	[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+	[GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+	[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+	[GCC_CPUSS_THROTTLE_CORE_CLK] = &gcc_cpuss_throttle_core_clk.clkr,
+	[GCC_CPUSS_THROTTLE_XO_CLK] = &gcc_cpuss_throttle_xo_clk.clkr,
+	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+	[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+	[GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+	[GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+	[GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+	[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+	[GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+	[GCC_GPU_THROTTLE_XO_CLK] = &gcc_gpu_throttle_xo_clk.clkr,
+	[GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+	[GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+	[GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+	[GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+	[GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+	[GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+	[GCC_QMIP_CPUSS_CFG_AHB_CLK] = &gcc_qmip_cpuss_cfg_ahb_clk.clkr,
+	[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+	[GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+	[GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+	[GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+	[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+	[GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+	[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+	[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+	[GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+	[GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+	[GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
+	[GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+	[GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+		&gcc_ufs_phy_unipro_core_clk_src.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+		&gcc_usb30_prim_mock_utmi_clk_src.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+		&gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+	[GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+	[GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+	[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+	[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+	[GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+	[GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+	[GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+	[GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+	[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+	[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+	[GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+	[GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+	[GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+	[GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+	[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+	[GPLL0] = &gpll0.clkr,
+	[GPLL0_OUT_AUX2] = &gpll0_out_aux2.clkr,
+	[GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+	[GPLL10] = &gpll10.clkr,
+	[GPLL10_OUT_MAIN] = &gpll10_out_main.clkr,
+	[GPLL11] = &gpll11.clkr,
+	[GPLL11_OUT_MAIN] = &gpll11_out_main.clkr,
+	[GPLL3] = &gpll3.clkr,
+	[GPLL4] = &gpll4.clkr,
+	[GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+	[GPLL6] = &gpll6.clkr,
+	[GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+	[GPLL7] = &gpll7.clkr,
+	[GPLL7_OUT_MAIN] = &gpll7_out_main.clkr,
+	[GPLL8] = &gpll8.clkr,
+	[GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+	[GPLL9] = &gpll9.clkr,
+	[GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_bengal_resets[] = {
+	[GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+	[GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 },
+	[GCC_UFS_PHY_BCR] = { 0x45000 },
+	[GCC_USB30_PRIM_BCR] = { 0x1a000 },
+	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+	[GCC_VCODEC0_BCR] = { 0x58094 },
+	[GCC_VENUS_BCR] = { 0x58078 },
+	[GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+};
+
+static const struct regmap_config gcc_bengal_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xc7000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_bengal_desc = {
+	.config = &gcc_bengal_regmap_config,
+	.clks = gcc_bengal_clocks,
+	.num_clks = ARRAY_SIZE(gcc_bengal_clocks),
+	.resets = gcc_bengal_resets,
+	.num_resets = ARRAY_SIZE(gcc_bengal_resets),
+};
+
+static const struct of_device_id gcc_bengal_match_table[] = {
+	{ .compatible = "qcom,bengal-gcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_bengal_match_table);
+
+static int gcc_bengal_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret;
+
+	regmap = qcom_cc_map(pdev, &gcc_bengal_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao");
+	if (IS_ERR(vdd_cx_ao.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx_ao regulator\n");
+		return PTR_ERR(vdd_cx_ao.regulator[0]);
+	}
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+			ARRAY_SIZE(gcc_dfs_clocks));
+	if (ret)
+		return ret;
+
+	/* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+
+	clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+	clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+	clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+	clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+
+	ret = qcom_cc_really_probe(pdev, &gcc_bengal_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GCC clocks\n");
+	return ret;
+}
+
+static struct platform_driver gcc_bengal_driver = {
+	.probe = gcc_bengal_probe,
+	.driver = {
+		.name = "gcc-bengal",
+		.of_match_table = gcc_bengal_match_table,
+	},
+};
+
+static int __init gcc_bengal_init(void)
+{
+	return platform_driver_register(&gcc_bengal_driver);
+}
+subsys_initcall(gcc_bengal_init);
+
+static void __exit gcc_bengal_exit(void)
+{
+	platform_driver_unregister(&gcc_bengal_driver);
+}
+module_exit(gcc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI GCC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-bengal.c b/drivers/clk/qcom/gpucc-bengal.c
new file mode 100644
index 0000000..91f62b2
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-bengal.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK          0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT         4
+#define CX_GMU_CBCR_WAKE_MASK           0xf
+#define CX_GMU_CBCR_WAKE_SHIFT          8
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL0_2X_DIV_CLK_SRC,
+	P_GPU_CC_PLL0_OUT_AUX2,
+	P_GPU_CC_PLL0_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_AUX,
+	P_GPU_CC_PLL1_OUT_AUX2,
+	P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_main",
+	"gpu_cc_pll1_out_main",
+	"gpll0_out_main",
+	"gpll0_out_main_div",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_2X_DIV_CLK_SRC, 1 },
+	{ P_GPU_CC_PLL0_OUT_AUX2, 2 },
+	{ P_GPU_CC_PLL1_OUT_AUX, 3 },
+	{ P_GPU_CC_PLL1_OUT_AUX2, 4 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_aux",
+	"gpu_cc_pll0_out_aux2",
+	"gpu_cc_pll1_out_aux",
+	"gpu_cc_pll1_out_aux2",
+	"gpll0_out_main",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco default_vco[] = {
+	{ 1000000000, 2000000000, 0 },
+	{ 750000000, 1500000000, 1 },
+	{ 500000000, 1000000000, 2 },
+	{ 250000000, 500000000, 3 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+	.l = 0x1B,
+	.alpha = 0x55000000,
+	.alpha_hi = 0xB5,
+	.alpha_en_mask = BIT(24),
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.aux2_output_mask = BIT(2),
+	.config_ctl_val = 0x40008529,
+};
+
+/* 532MHz configuration */
+static struct clk_alpha_pll gpu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_aux2[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_aux2 = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpu_cc_pll0_out_aux2,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_aux2),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll0_out_aux2",
+		.parent_names = (const char *[]){ "gpu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+/* 640MHz configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+	.l = 0x21,
+	.alpha = 0x55555555,
+	.alpha_hi = 0x55,
+	.alpha_en_mask = BIT(24),
+	.vco_val = 0x2 << 20,
+	.vco_mask = GENMASK(21, 20),
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+	.offset = 0x100,
+	.vco_table = default_vco,
+	.num_vco = ARRAY_SIZE(default_vco),
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 1000000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll1_out_aux[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
+	.offset = 0x100,
+	.post_div_shift = 15,
+	.post_div_table = post_div_table_gpu_cc_pll1_out_aux,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll1_out_aux),
+	.width = 3,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll1_out_aux",
+		.parent_names = (const char *[]){ "gpu_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(200000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_names = gpu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+	F(320000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+	F(465000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+	F(600000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(820000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(900000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(950000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	F(980000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+	.cmd_rcgr = 0x101c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_1,
+	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gx_gfx3d_clk_src",
+		.parent_names = gpu_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 320000000,
+			[VDD_LOW] = 465000000,
+			[VDD_LOW_L1] = 600000000,
+			[VDD_NOMINAL] = 745000000,
+			[VDD_NOMINAL_L1] = 820000000,
+			[VDD_HIGH] = 900000000,
+			[VDD_HIGH_L1] = 980000000},
+	},
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+	.halt_reg = 0x10a4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+	.halt_reg = 0x1060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+	.halt_reg = 0x1054,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x1054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+	.halt_reg = 0x5000,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x5000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			 .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+			 .ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gpu_cc_bengal_clocks[] = {
+	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+	[GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+	[GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+	[GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+	[GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr,
+	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+	[GPU_CC_PLL1_OUT_AUX] = &gpu_cc_pll1_out_aux.clkr,
+	[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+	[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_bengal_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7008,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_bengal_desc = {
+	.config = &gpu_cc_bengal_regmap_config,
+	.clks = gpu_cc_bengal_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_bengal_clocks),
+};
+
+static const struct of_device_id gpucc_bengal_match_table[] = {
+	{ .compatible = "qcom,bengal-gpucc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpucc_bengal_match_table);
+
+static int gpucc_bengal_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	unsigned int value, mask;
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_bengal_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	clk_alpha_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+	clk_alpha_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+	mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+	value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+								mask, value);
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_bengal_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GPUCC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GPUCC clocks\n");
+	return ret;
+}
+
+static struct platform_driver gpucc_bengal_driver = {
+	.probe = gpucc_bengal_probe,
+	.driver = {
+		.name = "bengal-gpucc",
+		.of_match_table = gpucc_bengal_match_table,
+	},
+};
+
+static int __init gpu_cc_bengal_init(void)
+{
+	return platform_driver_register(&gpucc_bengal_driver);
+}
+subsys_initcall(gpu_cc_bengal_init);
+
+static void __exit gpu_cc_bengal_exit(void)
+{
+	platform_driver_unregister(&gpucc_bengal_driver);
+}
+module_exit(gpu_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/vdd-level-bengal.h b/drivers/clk/qcom/vdd-level-bengal.h
new file mode 100644
index 0000000..619977f
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-bengal.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_H
+
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+enum vdd_levels {
+	VDD_NONE,
+	VDD_MIN,		/* MIN SVS */
+	VDD_LOWER,		/* SVS2 */
+	VDD_LOW,		/* SVS */
+	VDD_LOW_L1,		/* SVSL1 */
+	VDD_NOMINAL,		/* NOM */
+	VDD_NOMINAL_L1,		/* NOM L1 */
+	VDD_HIGH,		/* TURBO */
+	VDD_HIGH_L1,		/* TURBO */
+	VDD_NUM,
+};
+
+static int vdd_corner[] = {
+	[VDD_NONE]    = 0,
+	[VDD_MIN]     = RPMH_REGULATOR_LEVEL_MIN_SVS,
+	[VDD_LOWER]   = RPMH_REGULATOR_LEVEL_LOW_SVS,
+	[VDD_LOW]     = RPMH_REGULATOR_LEVEL_SVS,
+	[VDD_LOW_L1]  = RPMH_REGULATOR_LEVEL_SVS_L1,
+	[VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+	[VDD_NOMINAL_L1] = RPMH_REGULATOR_LEVEL_NOM_L1,
+	[VDD_HIGH]    = RPMH_REGULATOR_LEVEL_TURBO,
+	[VDD_HIGH_L1]    = RPMH_REGULATOR_LEVEL_TURBO_L1,
+};
+
+#endif
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f4b013e..24485be 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
 	/* Reset module */
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 
 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
 	udelay(35);
@@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
 	unsigned int reg = id / 32;
 	unsigned int bit = id % 32;
 	u32 bitmask = BIT(bit);
-	unsigned long flags;
-	u32 value;
 
 	dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-	spin_lock_irqsave(&priv->rmw_lock, flags);
-	value = readl(priv->base + SRCR(reg));
-	value |= bitmask;
-	writel(value, priv->base + SRCR(reg));
-	spin_unlock_irqrestore(&priv->rmw_lock, flags);
+	writel(bitmask, priv->base + SRCR(reg));
 	return 0;
 }
 
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 8789247..bad8099 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -2,6 +2,7 @@
 	tristate "Clock support for Spreadtrum SoCs"
 	depends on ARCH_SPRD || COMPILE_TEST
 	default ARCH_SPRD
+	select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 995685f..ecaf191 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1900,8 +1900,10 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
 	ret = cpufreq_driver->fast_switch(policy, target_freq);
-	if (ret)
+	if (ret) {
 		cpufreq_times_record_transition(policy, ret);
+		cpufreq_stats_record_transition(policy, ret);
+	}
 
 	return ret;
 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129..21b919b 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -30,11 +30,12 @@ struct cpufreq_stats {
 static void cpufreq_stats_update(struct cpufreq_stats *stats)
 {
 	unsigned long long cur_time = get_jiffies_64();
+	unsigned long flags;
 
-	spin_lock(&cpufreq_stats_lock);
+	spin_lock_irqsave(&cpufreq_stats_lock, flags);
 	stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
 	stats->last_time = cur_time;
-	spin_unlock(&cpufreq_stats_lock);
+	spin_unlock_irqrestore(&cpufreq_stats_lock, flags);
 }
 
 static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
@@ -58,9 +59,6 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
 	ssize_t len = 0;
 	int i;
 
-	if (policy->fast_switch_enabled)
-		return 0;
-
 	cpufreq_stats_update(stats);
 	for (i = 0; i < stats->state_num; i++) {
 		len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -84,9 +82,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 	ssize_t len = 0;
 	int i, j;
 
-	if (policy->fast_switch_enabled)
-		return 0;
-
 	len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
 	len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
 	for (i = 0; i < stats->state_num; i++) {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index c7710c1..a0620c9 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -145,11 +145,19 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	int err = -ENODEV;
 
 	cpu = of_get_cpu_node(policy->cpu, NULL);
-
-	of_node_put(cpu);
 	if (!cpu)
 		goto out;
 
+	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+	of_node_put(cpu);
+	if (!max_freqp) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* we need the freq in kHz */
+	max_freq = *max_freqp / 1000;
+
 	dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
 	if (!dn)
 		dn = of_find_compatible_node(NULL, NULL,
@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	}
 
 	pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
-	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-	if (!max_freqp) {
-		err = -EINVAL;
-		goto out_unmap_sdcpwr;
-	}
-
-	/* we need the freq in kHz */
-	max_freq = *max_freqp / 1000;
-
 	pr_debug("max clock-frequency is at %u kHz\n", max_freq);
 	pr_debug("initializing frequency table\n");
 
@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 	return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
-out_unmap_sdcpwr:
-	iounmap(sdcpwr_mapbase);
-
 out_unmap_sdcasr:
 	iounmap(sdcasr_mapbase);
 out:
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ca1f0d7..e5dcb29 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
 static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
 				   unsigned int authsize)
 {
+	switch (authsize) {
+	case 16:
+	case 15:
+	case 14:
+	case 13:
+	case 12:
+	case 8:
+	case 4:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
 	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 	INIT_LIST_HEAD(&rctx->cmd.entry);
 	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
 	rctx->cmd.u.aes.type = ctx->u.aes.type;
 	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
 	rctx->cmd.u.aes.action = encrypt;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e212bad..1e2e421 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
 	unsigned long long *final;
 	unsigned int dm_offset;
+	unsigned int authsize;
 	unsigned int jobid;
 	unsigned int ilen;
 	bool in_place = true; /* Default value */
@@ -646,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 	if (!aes->key) /* Gotta have a key SGL */
 		return -EINVAL;
 
+	/* Zero defaults to 16 bytes, the maximum size */
+	authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
+	switch (authsize) {
+	case 16:
+	case 15:
+	case 14:
+	case 13:
+	case 12:
+	case 8:
+	case 4:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	/* First, decompose the source buffer into AAD & PT,
 	 * and the destination buffer into AAD, CT & tag, or
 	 * the input into CT & tag.
@@ -660,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 		p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
 	} else {
 		/* Input length for decryption includes tag */
-		ilen = aes->src_len - AES_BLOCK_SIZE;
+		ilen = aes->src_len - authsize;
 		p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
 	}
 
@@ -769,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 		while (src.sg_wa.bytes_left) {
 			ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
 			if (!src.sg_wa.bytes_left) {
-				unsigned int nbytes = aes->src_len
-						      % AES_BLOCK_SIZE;
+				unsigned int nbytes = ilen % AES_BLOCK_SIZE;
 
 				if (nbytes) {
 					op.eom = 1;
@@ -842,19 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
 	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
 		/* Put the ciphered tag after the ciphertext. */
-		ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+		ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
 	} else {
 		/* Does this ciphered tag match the input? */
-		ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+		ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
 					   DMA_BIDIRECTIONAL);
 		if (ret)
 			goto e_tag;
-		ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+		ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
 		if (ret)
 			goto e_tag;
 
 		ret = crypto_memneq(tag.address, final_wa.address,
-				    AES_BLOCK_SIZE) ? -EBADMSG : 0;
+				    authsize) ? -EBADMSG : 0;
 		ccp_dm_free(&tag);
 	}
 
@@ -862,11 +877,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 	ccp_dm_free(&final_wa);
 
 e_dst:
-	if (aes->src_len && !in_place)
+	if (ilen > 0 && !in_place)
 		ccp_free_data(&dst, cmd_q);
 
 e_src:
-	if (aes->src_len)
+	if (ilen > 0)
 		ccp_free_data(&src, cmd_q);
 
 e_aad:
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index afcf2ce..86e47fe 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -262,7 +262,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
 		esoc_mdm_log(
 		"ESOC_FORCE_PWR_OFF: Queueing request: ESOC_REQ_SHUTDOWN\n");
 		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
-		mdm_toggle_soft_reset(mdm, false);
+		mdm_power_down(mdm);
 		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
 		break;
 	case ESOC_RESET:
@@ -484,7 +484,7 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
 		mdm->ready = false;
 		esoc_mdm_log(
 		"ESOC_PRIMARY_REBOOT: Powering down the modem\n");
-		mdm_toggle_soft_reset(mdm, false);
+		mdm_power_down(mdm);
 		break;
 	};
 }
@@ -556,6 +556,7 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
 		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
 		dev_dbg(dev, "status = 1: mdm is now ready\n");
 		mdm->ready = true;
+		esoc_clink_evt_notify(ESOC_BOOT_STATE, esoc);
 		mdm_trigger_dbg(mdm);
 		queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
 		if (mdm->get_restart_reason)
@@ -1080,26 +1081,27 @@ static int sdx55m_setup_hw(struct mdm_ctrl *mdm,
 		dev_err(mdm->dev, "Failed to parse DT gpios\n");
 		goto err_destroy_wrkq;
 	}
+	if (!of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
+		ret = mdm_pon_dt_init(mdm);
+		if (ret) {
+			esoc_mdm_log("Failed to parse PON DT gpios\n");
+			dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
+			goto err_destroy_wrkq;
+		}
 
-	ret = mdm_pon_dt_init(mdm);
-	if (ret) {
-		esoc_mdm_log("Failed to parse PON DT gpios\n");
-		dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
-		goto err_destroy_wrkq;
+		ret = mdm_pon_setup(mdm);
+		if (ret) {
+			esoc_mdm_log("Failed to setup PON\n");
+			dev_err(mdm->dev, "Failed to setup PON\n");
+			goto err_destroy_wrkq;
+		}
 	}
 
 	ret = mdm_pinctrl_init(mdm);
 	if (ret) {
 		esoc_mdm_log("Failed to init pinctrl\n");
 		dev_err(mdm->dev, "Failed to init pinctrl\n");
-		goto err_destroy_wrkq;
-	}
-
-	ret = mdm_pon_setup(mdm);
-	if (ret) {
-		esoc_mdm_log("Failed to setup PON\n");
-		dev_err(mdm->dev, "Failed to setup PON\n");
-		goto err_destroy_wrkq;
+		goto err_release_ipc;
 	}
 
 	ret = mdm_configure_ipc(mdm, pdev);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index b5bc12c..c90b81e 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -28,20 +28,21 @@ enum esoc_pon_state {
 
 enum {
 	 PWR_OFF = 0x1,
-	 PWR_ON,
-	 BOOT,
-	 RUN,
-	 CRASH,
-	 IN_DEBUG,
 	 SHUTDOWN,
 	 RESET,
 	 PEER_CRASH,
+	 IN_DEBUG,
+	 CRASH,
+	 PWR_ON,
+	 BOOT,
+	 RUN,
 };
 
 struct mdm_drv {
 	unsigned int mode;
 	struct esoc_eng cmd_eng;
 	struct completion pon_done;
+	struct completion ssr_ready;
 	struct completion req_eng_wait;
 	struct esoc_clink *esoc_clink;
 	enum esoc_pon_state pon_state;
@@ -142,6 +143,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
 		"ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n");
 		mdm_drv->pon_state = PON_FAIL;
 		complete(&mdm_drv->pon_done);
+		complete(&mdm_drv->ssr_ready);
+		break;
+	case ESOC_BOOT_STATE:
+		if (mdm_drv->mode == PWR_OFF) {
+			esoc_mdm_log(
+			"ESOC_BOOT_STATE: Observed status high from modem.\n");
+			mdm_drv->mode = BOOT;
+		}
 		break;
 	case ESOC_RUN_STATE:
 		esoc_mdm_log(
@@ -149,12 +158,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
 		mdm_drv->pon_state = PON_SUCCESS;
 		mdm_drv->mode = RUN,
 		complete(&mdm_drv->pon_done);
+		complete(&mdm_drv->ssr_ready);
 		break;
 	case ESOC_RETRY_PON_EVT:
 		esoc_mdm_log(
 		"ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n");
 		mdm_drv->pon_state = PON_RETRY;
 		complete(&mdm_drv->pon_done);
+		complete(&mdm_drv->ssr_ready);
 		break;
 	case ESOC_UNEXPECTED_RESET:
 		esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n");
@@ -164,19 +175,16 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
 			esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n");
 
 		/*
-		 * Modem can crash while we are waiting for pon_done during
-		 * a subsystem_get(). Setting mode to CRASH will prevent a
-		 * subsequent subsystem_get() from entering poweron ops. Avoid
-		 * this by seting mode to CRASH only if device was up and
-		 * running.
+		 * Ignore all modem errfatals if the status is not up
+		 * or modem in run state.
 		 */
-		if (mdm_drv->mode == CRASH)
+		if (mdm_drv->mode <= CRASH) {
 			esoc_mdm_log(
-			"Modem in crash state already. Ignoring.\n");
-		if (mdm_drv->mode != RUN)
-			esoc_mdm_log("Modem not up. Ignoring.\n");
-		if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
+			"Modem in crash state or not booted. Ignoring.\n");
 			return;
+		}
+		esoc_mdm_log("Setting crash flag\n");
+		mdm_drv->mode = CRASH;
 		queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
 		break;
 	case ESOC_REQ_ENG_ON:
@@ -194,12 +202,16 @@ static void mdm_ssr_fn(struct work_struct *work)
 	struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
 	struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
 
+	/* Wait for pon to complete. Start SSR only if pon is success */
+	wait_for_completion(&mdm_drv->ssr_ready);
+	if (mdm_drv->pon_state != PON_SUCCESS) {
+		esoc_mdm_log("Got errfatal but ignoring as boot failed\n");
+		return;
+	}
+
 	esoc_client_link_mdm_crash(mdm_drv->esoc_clink);
-
 	mdm_wait_for_status_low(mdm, false);
-
-	esoc_mdm_log("Starting SSR work and setting crash state\n");
-	mdm_drv->mode = CRASH;
+	esoc_mdm_log("Starting SSR work\n");
 
 	/*
 	 * If restarting esoc fails, the SSR framework triggers a kernel panic
@@ -282,12 +294,14 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
 	 container_of(crashed_subsys, struct esoc_clink, subsys);
 	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
 
 	esoc_mdm_log("Shutdown request from SSR\n");
 
 	mutex_lock(&mdm_drv->poff_lock);
 	if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
 		esoc_mdm_log("Shutdown in crash mode\n");
+		mdm_wait_for_status_low(mdm, false);
 		if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) {
 			/* We want to mask debug command.
 			 * In this case return success
@@ -360,7 +374,9 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink,
 	esoc_client_link_power_off(esoc_clink, poff_flags);
 	mdm_disable_irqs(mdm);
 	mdm_drv->pon_state = PON_INIT;
+	mdm_drv->mode = PWR_OFF;
 	reinit_completion(&mdm_drv->pon_done);
+	reinit_completion(&mdm_drv->ssr_ready);
 	reinit_completion(&mdm_drv->req_eng_wait);
 }
 
@@ -408,6 +424,7 @@ static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
 		break;
 	case BOOT_FAIL_ACTION_NOP:
 		esoc_mdm_log("Leaving the modem in its curent state\n");
+		mdm_drv->mode = PWR_OFF;
 		return -EIO;
 	case BOOT_FAIL_ACTION_SHUTDOWN:
 	default:
@@ -571,6 +588,7 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
 	}
 	esoc_set_drv_data(esoc_clink, mdm_drv);
 	init_completion(&mdm_drv->pon_done);
+	init_completion(&mdm_drv->ssr_ready);
 	init_completion(&mdm_drv->req_eng_wait);
 	INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
 	mdm_drv->esoc_clink = esoc_clink;
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index abde8c7..1dfff3a 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -206,6 +206,12 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm)
 	return 0;
 }
 
+static int sdx55m_power_down(struct mdm_ctrl *mdm)
+{
+	esoc_mdm_log("Performing warm reset as cold reset is not supported\n");
+	return sdx55m_toggle_soft_reset(mdm, false);
+}
+
 static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
 {
 	dev_dbg(mdm->dev, "Triggering mdm cold reset");
@@ -318,6 +324,7 @@ struct mdm_pon_ops sdx50m_pon_ops = {
 struct mdm_pon_ops sdx55m_pon_ops = {
 	.pon = mdm4x_do_first_power_on,
 	.soft_reset = sdx55m_toggle_soft_reset,
+	.poff_force = sdx55m_power_down,
 	.dt_init = mdm4x_pon_dt_init,
 	.setup = mdm4x_pon_setup,
 };
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6da80f3..6da0ab4 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -198,7 +198,7 @@
 
 config ISCSI_IBFT_FIND
 	bool "iSCSI Boot Firmware Table Attributes"
-	depends on X86 && ACPI
+	depends on X86 && ISCSI_IBFT
 	default n
 	help
 	  This option enables the kernel to find the region of memory
@@ -209,7 +209,8 @@
 config ISCSI_IBFT
 	tristate "iSCSI Boot Firmware Table Attributes module"
 	select ISCSI_BOOT_SYSFS
-	depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+	select ISCSI_IBFT_FIND if X86
+	depends on ACPI && SCSI && SCSI_LOWLEVEL
 	default	n
 	help
 	  This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f..966aef3 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBFT_ISCSI_VERSION);
 
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
 struct ibft_hdr {
 	u8 id;
 	u8 version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5fb937..65cecfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e3f5e5d..f4b89d1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link,
 
 static void destruct(struct dc *dc)
 {
-	dc_release_state(dc->current_state);
-	dc->current_state = NULL;
+	if (dc->current_state) {
+		dc_release_state(dc->current_state);
+		dc->current_state = NULL;
+	}
 
 	destroy_links(dc);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e0a96ab..f0d68aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -222,7 +222,7 @@ bool resource_construct(
 		 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
 		 */
 		update_num_audio(&straps, &num_audio, &pool->audio_support);
-		for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+		for (i = 0; i < caps->num_audio; i++) {
 			struct audio *aud = create_funcs->create_audio(ctx, i);
 
 			if (aud == NULL) {
@@ -1713,6 +1713,12 @@ static struct audio *find_first_free_audio(
 			return pool->audios[i];
 		}
 	}
+
+    /* use engine id to find free audio */
+	if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+		return pool->audios[id];
+	}
+
 	/*not found the matching one, first come first serve*/
 	for (i = 0; i < pool->audio_count; i++) {
 		if (res_ctx->is_audio_acquired[i] == false) {
@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
 		pix_clk /= 2;
 	if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
 		switch (timing->display_color_depth) {
+		case COLOR_DEPTH_666:
 		case COLOR_DEPTH_888:
 			normalized_pix_clk = pix_clk;
 			break;
@@ -1949,7 +1956,7 @@ enum dc_status resource_map_pool_resources(
 	/* TODO: Add check if ASIC support and EDID audio */
 	if (!stream->sink->converter_disable_audio &&
 	    dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
-	    stream->audio_info.mode_count) {
+	    stream->audio_info.mode_count && stream->audio_info.flags.all) {
 		pipe_ctx->stream_res.audio = find_first_free_audio(
 		&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 070ab56..da8b198 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -242,6 +242,10 @@ static void dmcu_set_backlight_level(
 	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
 	REG_WRITE(BIOS_SCRATCH_2, s2);
+
+	/* waitDMCUReadyForCmd */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+			0, 1, 80000);
 }
 
 static void dce_abm_init(struct abm *abm)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c0b9ca1..f4469fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -159,7 +159,7 @@ struct resource_pool {
 	struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
 	unsigned int clk_src_count;
 
-	struct audio *audios[MAX_PIPES];
+	struct audio *audios[MAX_AUDIOS];
 	unsigned int audio_count;
 	struct audio_support audio_support;
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433e..7190174 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
  * Data types shared between different Virtual HW blocks
  ******************************************************************************/
 
+#define MAX_AUDIOS 7
 #define MAX_PIPES 6
 
 struct gamma_curve {
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6..7a3e5a8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -46,6 +46,7 @@
 config DRM_LVDS_ENCODER
 	tristate "Transparent parallel to LVDS encoder support"
 	depends on OF
+	select DRM_KMS_HELPER
 	select DRM_PANEL_BRIDGE
 	help
 	  Support for transparent parallel to LVDS encoders that don't require
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 77b1800..2fefca4 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -794,7 +794,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
 	struct drm_device *dev = fb->dev;
 	struct drm_atomic_state *state;
 	struct drm_plane *plane;
-	struct drm_connector *conn;
+	struct drm_connector *conn __maybe_unused;
 	struct drm_connector_state *conn_state;
 	int i, ret;
 	unsigned plane_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 0ddb6ee..df22843 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
 	scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
 	do {
 		cpu_relax();
-	} while (retry > 1 &&
+	} while (--retry > 1 &&
 		 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
 	do {
 		cpu_relax();
 		scaler_write(1, SCALER_INT_EN);
-	} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+	} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
 
 	return retry ? 0 : -EIO;
 }
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index a132a80..77df790 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
 	else
 		txesc2_div = 10;
 
-	I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
-	I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+	I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+	I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
 }
 
 /* Program BXT Mipi clocks and dividers */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 89bd242..610139b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1298,7 +1298,8 @@ static int add_gpu_components(struct device *dev,
 	if (!np)
 		return 0;
 
-	drm_of_component_match_add(dev, matchptr, compare_of, np);
+	if (of_device_is_available(np))
+		drm_of_component_match_add(dev, matchptr, compare_of, np);
 
 	of_node_put(np);
 
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 5cb4cbb..e37611c 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -194,7 +194,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -220,7 +219,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -286,7 +284,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -306,7 +303,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -384,7 +380,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_256K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 16,
 	},
@@ -510,7 +505,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -593,7 +587,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_256K + SZ_16K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -612,7 +605,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
 		.gpudev = &adreno_a5xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_8K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -740,6 +732,43 @@ static const struct adreno_reglist a630_vbif_regs[] = {
 	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+
+/* For a615, a616, a618, a630, a640 and a680 */
+static const struct a6xx_protected_regs a630_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x09e70, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0be20, 0x0de1f, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x00000, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -749,7 +778,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -764,6 +792,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
 	.hwcg_count = ARRAY_SIZE(a630_hwcg_regs),
 	.vbif = a630_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a630_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 /* For a615, a616 and a618 */
@@ -847,7 +877,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -862,6 +891,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -873,7 +904,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -888,6 +918,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -951,6 +983,44 @@ static const struct adreno_reglist a650_gbif_regs[] = {
 	{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
 };
 
+/* These are for a620 and a650 */
+static const struct a6xx_protected_regs a620_protected_regs[] = {
+	{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+	{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+	{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+	{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+	{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+	{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+	{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+	{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+	{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+	{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+	{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+	{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+	{ A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+	{ A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+	{ A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+	{ A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+	{ A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+	{ A6XX_CP_PROTECT_REG + 19, 0x08e80, 0x090ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 21, 0x09e60, 0x09e71, 1 },
+	{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
+	{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
+	{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
+	{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
+	{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
+	{ A6XX_CP_PROTECT_REG + 27, 0x0b608, 0x0b60f, 1 },
+	{ A6XX_CP_PROTECT_REG + 28, 0x0be02, 0x0be03, 1 },
+	{ A6XX_CP_PROTECT_REG + 29, 0x0be20, 0x0de1f, 1 },
+	{ A6XX_CP_PROTECT_REG + 30, 0x0f000, 0x0fbff, 1 },
+	{ A6XX_CP_PROTECT_REG + 31, 0x0fc00, 0x11bff, 0 },
+	{ A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x00000, 1 },
+	{ 0 },
+};
+
 static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
@@ -960,7 +1030,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -976,6 +1045,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
 	.vbif = a650_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
 };
 
 static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1048,7 +1119,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_1M, //Verified 1MB
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1063,6 +1133,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1119,14 +1191,13 @@ static const struct adreno_reglist a650_hwcg_regs[] = {
 
 static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	{
-		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, 0),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IFPC,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
-		.num_protected_regs = 0x30,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1141,6 +1212,35 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
 	.pdc_in_aop = true,
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a620_protected_regs,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
+	{
+		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
+			ADRENO_IFPC | ADRENO_PREEMPTION,
+		.gpudev = &adreno_a6xx_gpudev,
+		.gmem_base = 0,
+		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
+		.busy_mask = 0xfffffffe,
+		.bus_width = 32,
+	},
+	.prim_fifo_threshold = 0x00300000,
+	.pdc_address_offset = 0x000300A0,
+	.gmu_major = 2,
+	.gmu_minor = 0,
+	.sqefw_name = "a650_sqe.fw",
+	.gmufw_name = "a650_gmu.bin",
+	.zap_name = "a650_zap",
+	.vbif = a650_gbif_regs,
+	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
+	.veto_fal10 = true,
+	.pdc_in_aop = true,
+	.hang_detect_cycles = 0x3ffff,
+	.protected_regs = a620_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1150,7 +1250,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_2M,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1165,6 +1264,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
 	.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_reglist a612_hwcg_regs[] = {
@@ -1221,11 +1322,10 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A612, 6, 1, 2, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
-			ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN,
+			ADRENO_IFPC,
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = (SZ_128K + SZ_4K),
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1238,6 +1338,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 	.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
 	.vbif = a640_vbif_regs,
 	.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1249,7 +1351,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_base = 0x100000,
 		.gmem_size = SZ_512K,
-		.num_protected_regs = 0x20,
 		.busy_mask = 0xfffffffe,
 		.bus_width = 32,
 	},
@@ -1264,6 +1365,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 	.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
 	.vbif = a615_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+	.hang_detect_cycles = 0x3fffff,
+	.protected_regs = a630_protected_regs,
 };
 
 static const struct adreno_gpu_core *adreno_gpulist[] = {
@@ -1291,6 +1394,7 @@ static const struct adreno_gpu_core *adreno_gpulist[] = {
 	&adreno_gpu_core_a620.base,
 	&adreno_gpu_core_a640.base,
 	&adreno_gpu_core_a650.base,
+	&adreno_gpu_core_a650v2.base,
 	&adreno_gpu_core_a680.base,
 	&adreno_gpu_core_a612.base,
 	&adreno_gpu_core_a616.base,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 5c7d91e..a53171e 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -50,7 +50,6 @@ static struct adreno_device device_3d0 = {
 			.irq_name = "kgsl_3d0_irq",
 		},
 		.iomemname = "kgsl_3d0_reg_memory",
-		.shadermemname = "kgsl_3d0_shader_memory",
 		.ftbl = &adreno_functable,
 	},
 	.ft_policy = KGSL_FT_DEFAULT_POLICY,
@@ -1559,7 +1558,6 @@ static int adreno_remove(struct platform_device *pdev)
 	if (efuse_base != NULL)
 		iounmap(efuse_base);
 
-	adreno_perfcounter_close(adreno_dev);
 	kgsl_device_platform_remove(device);
 
 	gmu_core_remove(device);
@@ -1935,17 +1933,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 			adreno_support_64bit(adreno_dev))
 		gpudev->enable_64bit(adreno_dev);
 
-	if (adreno_dev->perfctr_pwr_lo == 0) {
-		ret = adreno_perfcounter_get(adreno_dev,
-			KGSL_PERFCOUNTER_GROUP_PWR, 1,
-			&adreno_dev->perfctr_pwr_lo, NULL,
-			PERFCOUNTER_FLAG_KERNEL);
-
-		if (WARN_ONCE(ret, "Unable to get perfcounters for DCVS\n"))
-			adreno_dev->perfctr_pwr_lo = 0;
-	}
-
-
 	if (device->pwrctrl.bus_control) {
 		/* VBIF waiting for RAM */
 		if (adreno_dev->starved_ram_lo == 0) {
@@ -2051,15 +2038,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 		}
 	}
 
-	if (gmu_core_isenabled(device) && adreno_dev->perfctr_ifpc_lo == 0) {
-		ret = adreno_perfcounter_get(adreno_dev,
-				KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 4,
-				&adreno_dev->perfctr_ifpc_lo, NULL,
-				PERFCOUNTER_FLAG_KERNEL);
-		if (WARN_ONCE(ret, "Unable to get perf counter for IFPC\n"))
-			adreno_dev->perfctr_ifpc_lo = 0;
-	}
-
 	/* Clear the busy_data stats - we're starting over from scratch */
 	adreno_dev->busy_data.gpu_busy = 0;
 	adreno_dev->busy_data.bif_ram_cycles = 0;
@@ -3037,55 +3015,10 @@ static int adreno_suspend_context(struct kgsl_device *device)
 	return adreno_idle(device);
 }
 
-/**
- * adreno_read - General read function to read adreno device memory
- * @device - Pointer to the GPU device struct (for adreno device)
- * @base - Base address (kernel virtual) where the device memory is mapped
- * @offsetwords - Offset in words from the base address, of the memory that
- * is to be read
- * @value - Value read from the device memory
- * @mem_len - Length of the device memory mapped to the kernel
- */
-static void adreno_read(struct kgsl_device *device, void __iomem *base,
-		unsigned int offsetwords, unsigned int *value,
-		unsigned int mem_len)
-{
-
-	void __iomem *reg;
-
-	/* Make sure we're not reading from invalid memory */
-	if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
-		"Out of bounds register read: 0x%x/0x%x\n",
-			offsetwords, mem_len >> 2))
-		return;
-
-	reg = (base + (offsetwords << 2));
-
-	if (!in_interrupt())
-		kgsl_pre_hwaccess(device);
-
-	*value = __raw_readl(reg);
-	/*
-	 * ensure this read finishes before the next one.
-	 * i.e. act like normal readl()
-	 */
-	rmb();
-}
-
 static void adreno_retry_rbbm_read(struct kgsl_device *device,
-		void __iomem *base, unsigned int offsetwords,
-		unsigned int *value, unsigned int mem_len)
+		unsigned int offsetwords, unsigned int *value)
 {
 	int i;
-	void __iomem *reg;
-
-	/* Make sure we're not reading from invalid memory */
-	if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
-		"Out of bounds register read: 0x%x/0x%x\n",
-		offsetwords, mem_len >> 2))
-		return;
-
-	reg = (base + (offsetwords << 2));
 
 	/*
 	 * If 0xdeafbead was transient, second read is expected to return the
@@ -3093,7 +3026,7 @@ static void adreno_retry_rbbm_read(struct kgsl_device *device,
 	 * 0xdeafbead, read it enough times to guarantee that.
 	 */
 	for (i = 0; i < 16; i++) {
-		*value = readl_relaxed(reg);
+		*value = readl_relaxed(device->reg_virt + (offsetwords << 2));
 		/*
 		 * Read barrier needed so that register is read from hardware
 		 * every iteration
@@ -3110,12 +3043,13 @@ static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
 {
 	if (adreno_is_a650(ADRENO_DEVICE(device)) ||
 		adreno_is_a620v1(ADRENO_DEVICE(device))) {
-		if (((offsetwords > 0x0) && (offsetwords < 0x3FF)) ||
-			((offsetwords > 0x4FA) && (offsetwords < 0x53F)) ||
-			((offsetwords > 0x556) && (offsetwords < 0x5FF)) ||
-			((offsetwords > 0xF400) && (offsetwords < 0xFFFF)))
+		if (((offsetwords >= 0x0) && (offsetwords <= 0x3FF)) ||
+		((offsetwords >= 0x4FA) && (offsetwords <= 0x53F)) ||
+		((offsetwords >= 0x556) && (offsetwords <= 0x5FF)) ||
+		((offsetwords >= 0xF400) && (offsetwords <= 0xFFFF)))
 			return  true;
 	}
+
 	return false;
 }
 
@@ -3127,26 +3061,22 @@ static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
 static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
 	unsigned int *value)
 {
-	adreno_read(device, device->reg_virt, offsetwords, value,
-						device->reg_len);
+	/* Make sure we're not reading from invalid memory */
+	if (WARN(offsetwords * sizeof(uint32_t) >= device->reg_len,
+		"Out of bounds register read: 0x%x/0x%x\n",
+			offsetwords, device->reg_len >> 2))
+		return;
+
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	*value = readl_relaxed(device->reg_virt + (offsetwords << 2));
+	/* Order this read with respect to the following memory accesses */
+	rmb();
 
 	if ((*value == 0xdeafbead) &&
 		adreno_is_rbbm_batch_reg(device, offsetwords))
-		adreno_retry_rbbm_read(device, device->reg_virt, offsetwords,
-			value, device->reg_len);
-}
-
-/**
- * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
- * @device - GPU device whose shader memory is to be read
- * @offsetwords - Offset in words, of the shader memory address to be read
- * @value - Pointer to where the read shader mem value is to be stored
- */
-void adreno_shadermem_regread(struct kgsl_device *device,
-	unsigned int offsetwords, unsigned int *value)
-{
-	adreno_read(device, device->shader_mem_virt, offsetwords, value,
-					device->shader_mem_len);
+		adreno_retry_rbbm_read(device, offsetwords, value);
 }
 
 static void adreno_regwrite(struct kgsl_device *device,
@@ -3615,33 +3545,29 @@ static void adreno_power_stats(struct kgsl_device *device,
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct adreno_busy_data *busy = &adreno_dev->busy_data;
 	int64_t adj = 0;
+	u64 gpu_busy;
 
 	memset(stats, 0, sizeof(*stats));
 
-	/* Get the busy cycles counted since the counter was last reset */
-	if (adreno_dev->perfctr_pwr_lo != 0) {
-		uint64_t gpu_busy;
+	gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
+		&busy->gpu_busy);
 
-		gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
-			&busy->gpu_busy);
+	if (gpudev->read_throttling_counters) {
+		adj = gpudev->read_throttling_counters(adreno_dev);
+		if (adj < 0 && -adj > gpu_busy)
+			adj = 0;
 
-		if (gpudev->read_throttling_counters) {
-			adj = gpudev->read_throttling_counters(adreno_dev);
-			if (adj < 0 && -adj > gpu_busy)
-				adj = 0;
+		gpu_busy += adj;
+	}
 
-			gpu_busy += adj;
-		}
-
-		if (adreno_is_a6xx(adreno_dev)) {
-			/* clock sourced from XO */
-			stats->busy_time = gpu_busy * 10;
-			do_div(stats->busy_time, 192);
-		} else {
-			/* clock sourced from GFX3D */
-			stats->busy_time = adreno_ticks_to_us(gpu_busy,
-				kgsl_pwrctrl_active_freq(pwr));
-		}
+	if (adreno_is_a6xx(adreno_dev)) {
+		/* clock sourced from XO */
+		stats->busy_time = gpu_busy * 10;
+		do_div(stats->busy_time, 192);
+	} else {
+		/* clock sourced from GFX3D */
+		stats->busy_time = adreno_ticks_to_us(gpu_busy,
+			kgsl_pwrctrl_active_freq(pwr));
 	}
 
 	if (device->pwrctrl.bus_control) {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 032c7f00..5aacdd0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -96,8 +96,6 @@
 #define ADRENO_MIN_VOLT BIT(15)
 /* The core supports IO-coherent memory */
 #define ADRENO_IOCOHERENT BIT(16)
-/* To retain RBBM perfcntl enable setting in IFPC */
-#define ADRENO_PERFCTRL_RETAIN BIT(17)
 /*
  * The GMU supports Adaptive Clock Distribution (ACD)
  * for droop mitigation
@@ -352,7 +350,6 @@ struct adreno_reglist {
  * @gpudev: Pointer to the GPU family specific functions for this core
  * @gmem_base: Base address of binning memory (GMEM/OCMEM)
  * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
- * @num_protected_regs: number of protected registers
  * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
  * @bus_width: Bytes transferred in 1 cycle
  */
@@ -363,7 +360,6 @@ struct adreno_gpu_core {
 	struct adreno_gpudev *gpudev;
 	unsigned long gmem_base;
 	size_t gmem_size;
-	unsigned int num_protected_regs;
 	unsigned int busy_mask;
 	u32 bus_width;
 };
@@ -891,7 +887,6 @@ struct adreno_gpudev {
 	int (*rb_start)(struct adreno_device *adreno_dev);
 	int (*microcode_read)(struct adreno_device *adreno_dev);
 	void (*perfcounter_init)(struct adreno_device *adreno_dev);
-	void (*perfcounter_close)(struct adreno_device *adreno_dev);
 	void (*start)(struct adreno_device *adreno_dev);
 	bool (*is_sptp_idle)(struct adreno_device *adreno_dev);
 	int (*regulator_enable)(struct adreno_device *adreno_dev);
@@ -902,8 +897,6 @@ struct adreno_gpudev {
 	int64_t (*read_throttling_counters)(struct adreno_device *adreno_dev);
 	void (*count_throttles)(struct adreno_device *adreno_dev,
 					uint64_t adj);
-	int (*enable_pwr_counters)(struct adreno_device *adrneo_dev,
-				unsigned int counter);
 	unsigned int (*preemption_pre_ibsubmit)(
 				struct adreno_device *adreno_dev,
 				struct adreno_ringbuffer *rb,
@@ -1049,10 +1042,6 @@ int adreno_set_constraint(struct kgsl_device *device,
 				struct kgsl_context *context,
 				struct kgsl_device_constraint *constraint);
 
-void adreno_shadermem_regread(struct kgsl_device *device,
-						unsigned int offsetwords,
-						unsigned int *value);
-
 void adreno_snapshot(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot,
 		struct kgsl_context *context);
@@ -1464,43 +1453,6 @@ static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
 void adreno_reglist_write(struct adreno_device *adreno_dev,
 		const struct adreno_reglist *list, u32 count);
 
-/**
- * adreno_set_protected_registers() - Protect the specified range of registers
- * from being accessed by the GPU
- * @adreno_dev: pointer to the Adreno device
- * @index: Pointer to the index of the protect mode register to write to
- * @reg: Starting dword register to write
- * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
- *
- * Add the range of registers to the list of protected mode registers that will
- * cause an exception if the GPU accesses them.  There are 16 available
- * protected mode registers.  Index is used to specify which register to write
- * to - the intent is to call this function multiple times with the same index
- * pointer for each range and the registers will be magically programmed in
- * incremental fashion
- */
-static inline void adreno_set_protected_registers(
-		struct adreno_device *adreno_dev, unsigned int *index,
-		unsigned int reg, int mask_len)
-{
-	unsigned int val;
-	unsigned int base =
-		adreno_getreg(adreno_dev, ADRENO_REG_CP_PROTECT_REG_0);
-	unsigned int offset = *index;
-	unsigned int max_slots = adreno_dev->gpucore->num_protected_regs ?
-				adreno_dev->gpucore->num_protected_regs : 16;
-
-	/* Do we have a free slot? */
-	if (WARN(*index >= max_slots, "Protected register slots full: %d/%d\n",
-					*index, max_slots))
-		return;
-
-	val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0xFFFFF);
-
-	kgsl_regwrite(KGSL_DEVICE(adreno_dev), base + offset, val);
-	*index = *index + 1;
-}
-
 #ifdef CONFIG_DEBUG_FS
 void adreno_debugfs_init(struct adreno_device *adreno_dev);
 void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 3456c14..680afa0 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -611,6 +611,9 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev)
 
 	gpudev->vbif_xin_halt_ctrl0_mask = A30X_VBIF_XIN_HALT_CTRL0_MASK;
 
+	/* Set the GPU busy counter for frequency scaling */
+	adreno_dev->perfctr_pwr_lo = A3XX_RBBM_PERFCTR_PWR_1_LO;
+
 	/* Check efuse bits for various capabilties */
 	a3xx_check_features(adreno_dev);
 }
@@ -968,8 +971,10 @@ static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
 static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO,
 		A3XX_RBBM_PERFCTR_PWR_0_HI, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO,
-		A3XX_RBBM_PERFCTR_PWR_1_HI, -1, 0 },
+	/*
+	 * A3XX_RBBM_PERFCTR_PWR_1_LO is used for frequency scaling and removed
+	 * from the pool of available counters
+	 */
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
@@ -1063,64 +1068,50 @@ static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
 		counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
 			a3xx_perfcounters_vbif2_pwr;
 	}
-
-	/*
-	 * Enable the GPU busy count counter. This is a fixed counter on
-	 * A3XX so we don't need to bother checking the return value
-	 */
-	adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
-		NULL, NULL, PERFCOUNTER_FLAG_KERNEL);
 }
 
-static void a3xx_perfcounter_close(struct adreno_device *adreno_dev)
-{
-	adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
-		PERFCOUNTER_FLAG_KERNEL);
-}
+struct {
+	u32 reg;
+	u32 base;
+	u32 count;
+} a3xx_protected_blocks[] = {
+	/* RBBM */
+	{ A3XX_CP_PROTECT_REG_0,      0x0018, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 1,  0x0020, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 2,  0x0033, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 3,  0x0042, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 4,  0x0050, 4 },
+	{ A3XX_CP_PROTECT_REG_0 + 5,  0x0063, 0 },
+	{ A3XX_CP_PROTECT_REG_0 + 6,  0x0100, 4 },
+	/* CP */
+	{ A3XX_CP_PROTECT_REG_0 + 7,  0x01c0, 5 },
+	{ A3XX_CP_PROTECT_REG_0 + 8,  0x01ec, 1 },
+	{ A3XX_CP_PROTECT_REG_0 + 9,  0x01f6, 1 },
+	{ A3XX_CP_PROTECT_REG_0 + 10, 0x01f8, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 11, 0x045e, 2 },
+	{ A3XX_CP_PROTECT_REG_0 + 12, 0x0460, 4 },
+	/* RB */
+	{ A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
+	/* VBIF */
+	{ A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
+	/* SMMU */
+	{ A3XX_CP_PROTECT_REG_0 + 15, 0xa000, 12 },
+	/* There are no remaining protected mode registers for a3xx */
+};
 
-/**
- * a3xx_protect_init() - Initializes register protection on a3xx
- * @adreno_dev: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
-static void a3xx_protect_init(struct adreno_device *adreno_dev)
+static void a3xx_protect_init(struct kgsl_device *device)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	int index = 0;
-	struct kgsl_protected_registers *iommu_regs;
+	int i;
 
-	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
 
-	/* RBBM registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x18, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x20, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x33, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x42, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x50, 4);
-	adreno_set_protected_registers(adreno_dev, &index, 0x63, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0x100, 4);
+	for (i = 0; i < ARRAY_SIZE(a3xx_protected_blocks); i++) {
+		u32 val = 0x60000000 |
+			(a3xx_protected_blocks[i].count << 24) |
+			(a3xx_protected_blocks[i].base << 2);
 
-	/* CP registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x1C0, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1EC, 1);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1F6, 1);
-	adreno_set_protected_registers(adreno_dev, &index, 0x1F8, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x45E, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x460, 4);
-
-	/* RB registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
-
-	/* VBIF registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x3000, 6);
-
-	/* SMMU registers */
-	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
-	if (iommu_regs)
-		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, ilog2(iommu_regs->range));
+		kgsl_regwrite(device, a3xx_protected_blocks[i].reg, val);
+	}
 }
 
 static void a3xx_start(struct adreno_device *adreno_dev)
@@ -1169,7 +1160,7 @@ static void a3xx_start(struct adreno_device *adreno_dev)
 	kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL_DEFAULT);
 
 	/* Turn on protection */
-	a3xx_protect_init(adreno_dev);
+	a3xx_protect_init(device);
 
 	/* Turn on performance counters */
 	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -1514,7 +1505,6 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
 	.init = a3xx_init,
 	.microcode_read = a3xx_microcode_read,
 	.perfcounter_init = a3xx_perfcounter_init,
-	.perfcounter_close = a3xx_perfcounter_close,
 	.start = a3xx_start,
 	.snapshot = a3xx_snapshot,
 	.coresight = {&a3xx_coresight},
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index e34be4c..b636991 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -104,13 +104,9 @@ static size_t a3xx_snapshot_shader_memory(struct kgsl_device *device,
 	u8 *buf, size_t remain, void *priv)
 {
 	struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
-	unsigned int i;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	void *data = buf + sizeof(*header);
 	unsigned int shader_read_len = SHADER_MEMORY_SIZE;
 
-	if (shader_read_len > (device->shader_mem_len >> 2))
-		shader_read_len = (device->shader_mem_len >> 2);
-
 	if (remain < DEBUG_SECTION_SZ(shader_read_len)) {
 		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
 		return 0;
@@ -120,21 +116,23 @@ static size_t a3xx_snapshot_shader_memory(struct kgsl_device *device,
 	header->size = shader_read_len;
 
 	/* Map shader memory to kernel, for dumping */
-	if (device->shader_mem_virt == NULL)
-		device->shader_mem_virt = devm_ioremap(device->dev,
-					device->shader_mem_phys,
-					device->shader_mem_len);
+	if (IS_ERR_OR_NULL(device->shader_mem_virt)) {
+		struct resource *res;
 
-	if (device->shader_mem_virt == NULL) {
-		dev_err(device->dev,
-			     "Unable to map shader memory region\n");
+		res = platform_get_resource_byname(device->pdev,
+			IORESOURCE_MEM, "kgsl_3d0_shader_memory");
+
+		if (res)
+			device->shader_mem_virt =
+				devm_ioremap_resource(&device->pdev->dev, res);
+	}
+
+	if (IS_ERR_OR_NULL(device->shader_mem_virt)) {
+		dev_err(device->dev, "Unable to map the shader memory\n");
 		return 0;
 	}
 
-	/* Now, dump shader memory to snapshot */
-	for (i = 0; i < shader_read_len; i++)
-		adreno_shadermem_regread(device, i, &data[i]);
-
+	memcpy_fromio(data, device->shader_mem_virt, shader_read_len << 2);
 
 	return DEBUG_SECTION_SZ(shader_read_len);
 }
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index f392ef4..12f5c21 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -146,6 +146,9 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev)
 	adreno_dev->lm_leakage = A530_DEFAULT_LEAKAGE;
 	adreno_dev->speed_bin = 0;
 
+	/* Set the GPU busy counter to use for frequency scaling */
+	adreno_dev->perfctr_pwr_lo = A5XX_RBBM_PERFCTR_RBBM_0_LO;
+
 	/* Check efuse bits for various capabilties */
 	a5xx_check_features(adreno_dev);
 }
@@ -290,57 +293,69 @@ static void a5xx_remove(struct adreno_device *adreno_dev)
 		a5xx_critical_packet_destroy(adreno_dev);
 }
 
-/**
- * a5xx_protect_init() - Initializes register protection on a5xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
+const static struct {
+	u32 reg;
+	u32 base;
+	u32 count;
+} a5xx_protected_blocks[] = {
+	/* RBBM */
+	{  A5XX_CP_PROTECT_REG_0,     0x004, 2 },
+	{  A5XX_CP_PROTECT_REG_0 + 1, 0x008, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 2, 0x010, 4 },
+	{  A5XX_CP_PROTECT_REG_0 + 3, 0x020, 5 },
+	{  A5XX_CP_PROTECT_REG_0 + 4, 0x040, 6 },
+	{  A5XX_CP_PROTECT_REG_0 + 5, 0x080, 6 },
+	/* Content protection */
+	{  A5XX_CP_PROTECT_REG_0 + 6, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4 },
+	{  A5XX_CP_PROTECT_REG_0 + 7, A5XX_RBBM_SECVID_TRUST_CNTL, 1 },
+	/* CP */
+	{  A5XX_CP_PROTECT_REG_0 + 8, 0x800, 6 },
+	{  A5XX_CP_PROTECT_REG_0 + 9, 0x840, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 10, 0x880, 5 },
+	{  A5XX_CP_PROTECT_REG_0 + 11, 0xaa0, 0 },
+	/* RB */
+	{  A5XX_CP_PROTECT_REG_0 + 12, 0xcc0, 0 },
+	{  A5XX_CP_PROTECT_REG_0 + 13, 0xcf0, 1 },
+	/* VPC */
+	{  A5XX_CP_PROTECT_REG_0 + 14, 0xe68, 3 },
+	{  A5XX_CP_PROTECT_REG_0 + 15, 0xe70, 4 },
+	/* UCHE */
+	{  A5XX_CP_PROTECT_REG_0 + 16, 0xe80, 4 },
+	/* A5XX_CP_PROTECT_REG_17 will be used for SMMU */
+	/* A5XX_CP_PROTECT_REG_18 - A5XX_CP_PROTECT_REG_31 are available */
+};
+
+static void _setprotectreg(struct kgsl_device *device, u32 offset,
+		u32 base, u32 count)
+{
+	kgsl_regwrite(device, offset, 0x60000000 | (count << 24) | (base << 2));
+}
+
 static void a5xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	int index = 0;
-	struct kgsl_protected_registers *iommu_regs;
+	u32 reg;
+	int i;
 
 	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);
 
-	/* RBBM registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
-	adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
-	adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
-	adreno_set_protected_registers(adreno_dev, &index, 0x80, 6);
+	for (i = 0; i < ARRAY_SIZE(a5xx_protected_blocks); i++) {
+		reg = a5xx_protected_blocks[i].reg;
 
-	/* Content protection registers */
-	adreno_set_protected_registers(adreno_dev, &index,
-		   A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4);
-	adreno_set_protected_registers(adreno_dev, &index,
-		   A5XX_RBBM_SECVID_TRUST_CNTL, 1);
+		_setprotectreg(device, reg, a5xx_protected_blocks[i].base,
+			a5xx_protected_blocks[i].count);
+	}
 
-	/* CP registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0x800, 6);
-	adreno_set_protected_registers(adreno_dev, &index, 0x840, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0x880, 5);
-	adreno_set_protected_registers(adreno_dev, &index, 0x0AA0, 0);
-
-	/* RB registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
-	adreno_set_protected_registers(adreno_dev, &index, 0xCF0, 1);
-
-	/* VPC registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xE68, 3);
-	adreno_set_protected_registers(adreno_dev, &index, 0xE70, 4);
-
-	/* UCHE registers */
-	adreno_set_protected_registers(adreno_dev, &index, 0xE80, ilog2(16));
-
-	/* SMMU registers */
-	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
-	if (iommu_regs)
-		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, ilog2(iommu_regs->range));
+	/*
+	 * For a530 and a540 the SMMU region is 0x20000 bytes long and 0x10000
+	 * bytes on all other targets. The base offset for both is 0x40000.
+	 * Write it to the next available slot
+	 */
+	if (adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))
+		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x20000));
+	else
+		_setprotectreg(device, reg + 1, 0x40000, ilog2(0x10000));
 }
 
 /*
@@ -1255,24 +1270,6 @@ static void a5xx_count_throttles(struct adreno_device *adreno_dev,
 		adreno_dev->lm_threshold_cross = adj;
 }
 
-static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
-		unsigned int counter)
-{
-	/*
-	 * On 5XX we have to emulate the PWR counters which are physically
-	 * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
-	 * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
-	 * to take away too many of the generic RBBM counters.
-	 */
-
-	if (counter == 0)
-		return -EINVAL;
-
-	kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
-
-	return 0;
-}
-
 /* FW driven idle 10% throttle */
 #define IDLE_10PCT 0
 /* number of cycles when clock is throttled by 50% (CRC) */
@@ -1444,6 +1441,9 @@ static void a5xx_start(struct adreno_device *adreno_dev)
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
 
+	/* Program RBBM counter 0 to report GPU busy for frequency scaling */
+	kgsl_regwrite(device, A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
 	/*
 	 * Enable the RBBM error reporting bits.  This lets us get
 	 * useful information on failure
@@ -2093,11 +2093,11 @@ static struct adreno_perfcount_register a5xx_perfcounters_cp[] = {
 		A5XX_RBBM_PERFCTR_CP_7_HI, 7, A5XX_CP_PERFCTR_CP_SEL_7 },
 };
 
-/*
- * Note that PERFCTR_RBBM_0 is missing - it is used to emulate the PWR counters.
- * See below.
- */
 static struct adreno_perfcount_register a5xx_perfcounters_rbbm[] = {
+	/*
+	 * A5XX_RBBM_PERFCTR_RBBM_0 is used for frequency scaling and omitted
+	 * from the poool of available counters
+	 */
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_1_LO,
 		A5XX_RBBM_PERFCTR_RBBM_1_HI, 9, A5XX_RBBM_PERFCTR_RBBM_SEL_1 },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_2_LO,
@@ -2346,22 +2346,6 @@ static struct adreno_perfcount_register a5xx_perfcounters_alwayson[] = {
 		A5XX_RBBM_ALWAYSON_COUNTER_HI, -1 },
 };
 
-/*
- * 5XX targets don't really have physical PERFCTR_PWR registers - we emulate
- * them using similar performance counters from the RBBM block. The difference
- * between using this group and the RBBM group is that the RBBM counters are
- * reloaded after a power collapse which is not how the PWR counters behaved on
- * legacy hardware. In order to limit the disruption on the rest of the system
- * we go out of our way to ensure backwards compatibility. Since RBBM counters
- * are in short supply, we don't emulate PWR:0 which nobody uses - mark it as
- * broken.
- */
-static struct adreno_perfcount_register a5xx_perfcounters_pwr[] = {
-	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_0_LO,
-		A5XX_RBBM_PERFCTR_RBBM_0_HI, -1, 0},
-};
-
 static struct adreno_perfcount_register a5xx_pwrcounters_sp[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_0_LO,
 		A5XX_SP_POWER_COUNTER_0_HI, -1, A5XX_SP_POWERCTR_SP_SEL_0 },
@@ -2481,8 +2465,6 @@ static struct adreno_perfcount_group a5xx_perfcounter_groups
 	A5XX_PERFCOUNTER_GROUP(SP, sp),
 	A5XX_PERFCOUNTER_GROUP(RB, rb),
 	A5XX_PERFCOUNTER_GROUP(VSC, vsc),
-	A5XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
-		ADRENO_PERFCOUNTER_GROUP_FIXED),
 	A5XX_PERFCOUNTER_GROUP(VBIF, vbif),
 	A5XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
@@ -3184,7 +3166,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
 	.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
 	.read_throttling_counters = a5xx_read_throttling_counters,
 	.count_throttles = a5xx_count_throttles,
-	.enable_pwr_counters = a5xx_enable_pwr_counters,
 	.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
 	.preemption_yield_enable =
 				a5xx_preemption_yield_enable,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index a1e7bcb9..1a7cdbb 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -13,126 +13,92 @@
 #include "adreno_llc.h"
 #include "adreno_pm4types.h"
 #include "adreno_trace.h"
-#include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
-static struct a6xx_protected_regs {
-	unsigned int base;
-	unsigned int count;
-	int read_protect;
-} a6xx_protected_regs_group[] = {
-	{ 0x600, 0x51, 0 },
-	{ 0xAE50, 0x2, 1 },
-	{ 0x9624, 0x13, 1 },
-	{ 0x8630, 0x8, 1 },
-	{ 0x9E70, 0x1, 1 },
-	{ 0x9E78, 0x187, 1 },
-	{ 0xF000, 0x810, 1 },
-	{ 0xFC00, 0x3, 0 },
-	{ 0x50E, 0x0, 1 },
-	{ 0x50F, 0x0, 0 },
-	{ 0x510, 0x0, 1 },
-	{ 0x0, 0x4F9, 0 },
-	{ 0x501, 0xA, 0 },
-	{ 0x511, 0x44, 0 },
-	{ 0xE00, 0x1, 1 },
-	{ 0xE03, 0xB, 1 },
-	{ 0x8E00, 0x0, 1 },
-	{ 0x8E50, 0xF, 1 },
-	{ 0xBE02, 0x0, 1 },
-	{ 0xBE20, 0x11F3, 1 },
-	{ 0x800, 0x82, 1 },
-	{ 0x8A0, 0x8, 1 },
-	{ 0x8AB, 0x19, 1 },
-	{ 0x900, 0x4D, 1 },
-	{ 0x98D, 0x76, 1 },
-	{ 0x8D0, 0x23, 0 },
-	{ 0x980, 0x4, 0 },
-	{ 0xA630, 0x0, 1 },
-};
-
 /* IFPC & Preemption static powerup restore list */
-static struct reg_list_pair {
-	uint32_t offset;
-	uint32_t val;
-} a6xx_pwrup_reglist[] = {
-	{ A6XX_VSC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_GRAS_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_RB_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_PC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_HLSQ_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_VFD_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_VPC_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_UCHE_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_SP_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_TPL1_ADDR_MODE_CNTL, 0x0 },
-	{ A6XX_UCHE_WRITE_RANGE_MAX_LO, 0x0 },
-	{ A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0 },
-	{ A6XX_UCHE_TRAP_BASE_LO, 0x0 },
-	{ A6XX_UCHE_TRAP_BASE_HI, 0x0 },
-	{ A6XX_UCHE_WRITE_THRU_BASE_LO, 0x0 },
-	{ A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MAX_LO, 0x0 },
-	{ A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0 },
-	{ A6XX_UCHE_FILTER_CNTL, 0x0 },
-	{ A6XX_UCHE_CACHE_WAYS, 0x0 },
-	{ A6XX_UCHE_MODE_CNTL, 0x0 },
-	{ A6XX_RB_NC_MODE_CNTL, 0x0 },
-	{ A6XX_TPL1_NC_MODE_CNTL, 0x0 },
-	{ A6XX_SP_NC_MODE_CNTL, 0x0 },
-	{ A6XX_PC_DBG_ECO_CNTL, 0x0 },
-	{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
+static u32 a6xx_pwrup_reglist[] = {
+	A6XX_VSC_ADDR_MODE_CNTL,
+	A6XX_GRAS_ADDR_MODE_CNTL,
+	A6XX_RB_ADDR_MODE_CNTL,
+	A6XX_PC_ADDR_MODE_CNTL,
+	A6XX_HLSQ_ADDR_MODE_CNTL,
+	A6XX_VFD_ADDR_MODE_CNTL,
+	A6XX_VPC_ADDR_MODE_CNTL,
+	A6XX_UCHE_ADDR_MODE_CNTL,
+	A6XX_SP_ADDR_MODE_CNTL,
+	A6XX_TPL1_ADDR_MODE_CNTL,
+	A6XX_UCHE_WRITE_RANGE_MAX_LO,
+	A6XX_UCHE_WRITE_RANGE_MAX_HI,
+	A6XX_UCHE_TRAP_BASE_LO,
+	A6XX_UCHE_TRAP_BASE_HI,
+	A6XX_UCHE_WRITE_THRU_BASE_LO,
+	A6XX_UCHE_WRITE_THRU_BASE_HI,
+	A6XX_UCHE_GMEM_RANGE_MIN_LO,
+	A6XX_UCHE_GMEM_RANGE_MIN_HI,
+	A6XX_UCHE_GMEM_RANGE_MAX_LO,
+	A6XX_UCHE_GMEM_RANGE_MAX_HI,
+	A6XX_UCHE_FILTER_CNTL,
+	A6XX_UCHE_CACHE_WAYS,
+	A6XX_UCHE_MODE_CNTL,
+	A6XX_RB_NC_MODE_CNTL,
+	A6XX_TPL1_NC_MODE_CNTL,
+	A6XX_SP_NC_MODE_CNTL,
+	A6XX_PC_DBG_ECO_CNTL,
+	A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
 };
 
 /* IFPC only static powerup restore list */
-static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
-	{ A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
-	{ A6XX_CP_CHICKEN_DBG, 0x0 },
-	{ A6XX_CP_DBG_ECO_CNTL, 0x0 },
-	{ A6XX_CP_PROTECT_CNTL, 0x0 },
-	{ A6XX_CP_PROTECT_REG, 0x0 },
-	{ A6XX_CP_PROTECT_REG+1, 0x0 },
-	{ A6XX_CP_PROTECT_REG+2, 0x0 },
-	{ A6XX_CP_PROTECT_REG+3, 0x0 },
-	{ A6XX_CP_PROTECT_REG+4, 0x0 },
-	{ A6XX_CP_PROTECT_REG+5, 0x0 },
-	{ A6XX_CP_PROTECT_REG+6, 0x0 },
-	{ A6XX_CP_PROTECT_REG+7, 0x0 },
-	{ A6XX_CP_PROTECT_REG+8, 0x0 },
-	{ A6XX_CP_PROTECT_REG+9, 0x0 },
-	{ A6XX_CP_PROTECT_REG+10, 0x0 },
-	{ A6XX_CP_PROTECT_REG+11, 0x0 },
-	{ A6XX_CP_PROTECT_REG+12, 0x0 },
-	{ A6XX_CP_PROTECT_REG+13, 0x0 },
-	{ A6XX_CP_PROTECT_REG+14, 0x0 },
-	{ A6XX_CP_PROTECT_REG+15, 0x0 },
-	{ A6XX_CP_PROTECT_REG+16, 0x0 },
-	{ A6XX_CP_PROTECT_REG+17, 0x0 },
-	{ A6XX_CP_PROTECT_REG+18, 0x0 },
-	{ A6XX_CP_PROTECT_REG+19, 0x0 },
-	{ A6XX_CP_PROTECT_REG+20, 0x0 },
-	{ A6XX_CP_PROTECT_REG+21, 0x0 },
-	{ A6XX_CP_PROTECT_REG+22, 0x0 },
-	{ A6XX_CP_PROTECT_REG+23, 0x0 },
-	{ A6XX_CP_PROTECT_REG+24, 0x0 },
-	{ A6XX_CP_PROTECT_REG+25, 0x0 },
-	{ A6XX_CP_PROTECT_REG+26, 0x0 },
-	{ A6XX_CP_PROTECT_REG+27, 0x0 },
-	{ A6XX_CP_PROTECT_REG+28, 0x0 },
-	{ A6XX_CP_PROTECT_REG+29, 0x0 },
-	{ A6XX_CP_PROTECT_REG+30, 0x0 },
-	{ A6XX_CP_PROTECT_REG+31, 0x0 },
-	{ A6XX_CP_AHB_CNTL, 0x0 },
+static u32 a6xx_ifpc_pwrup_reglist[] = {
+	A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
+	A6XX_CP_CHICKEN_DBG,
+	A6XX_CP_DBG_ECO_CNTL,
+	A6XX_CP_PROTECT_CNTL,
+	A6XX_CP_PROTECT_REG,
+	A6XX_CP_PROTECT_REG+1,
+	A6XX_CP_PROTECT_REG+2,
+	A6XX_CP_PROTECT_REG+3,
+	A6XX_CP_PROTECT_REG+4,
+	A6XX_CP_PROTECT_REG+5,
+	A6XX_CP_PROTECT_REG+6,
+	A6XX_CP_PROTECT_REG+7,
+	A6XX_CP_PROTECT_REG+8,
+	A6XX_CP_PROTECT_REG+9,
+	A6XX_CP_PROTECT_REG+10,
+	A6XX_CP_PROTECT_REG+11,
+	A6XX_CP_PROTECT_REG+12,
+	A6XX_CP_PROTECT_REG+13,
+	A6XX_CP_PROTECT_REG+14,
+	A6XX_CP_PROTECT_REG+15,
+	A6XX_CP_PROTECT_REG+16,
+	A6XX_CP_PROTECT_REG+17,
+	A6XX_CP_PROTECT_REG+18,
+	A6XX_CP_PROTECT_REG+19,
+	A6XX_CP_PROTECT_REG+20,
+	A6XX_CP_PROTECT_REG+21,
+	A6XX_CP_PROTECT_REG+22,
+	A6XX_CP_PROTECT_REG+23,
+	A6XX_CP_PROTECT_REG+24,
+	A6XX_CP_PROTECT_REG+25,
+	A6XX_CP_PROTECT_REG+26,
+	A6XX_CP_PROTECT_REG+27,
+	A6XX_CP_PROTECT_REG+28,
+	A6XX_CP_PROTECT_REG+29,
+	A6XX_CP_PROTECT_REG+30,
+	A6XX_CP_PROTECT_REG+31,
+	A6XX_CP_AHB_CNTL,
 };
 
-static struct reg_list_pair a615_pwrup_reglist[] = {
-	{ A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
+/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
+static u32 a650_pwrup_reglist[] = {
+	A6XX_CP_PROTECT_REG + 47,
 };
 
-static struct reg_list_pair a6xx_ifpc_perfctr_reglist[] = {
-	{ A6XX_RBBM_PERFCTR_CNTL, 0x0 },
+static u32 a615_pwrup_reglist[] = {
+	A6XX_UCHE_GBIF_GX_CONFIG,
+};
+
+static u32 a612_pwrup_reglist[] = {
+	A6XX_RBBM_PERFCTR_CNTL,
 };
 
 static void _update_always_on_regs(struct adreno_device *adreno_dev)
@@ -146,21 +112,6 @@ static void _update_always_on_regs(struct adreno_device *adreno_dev)
 		A6XX_CP_ALWAYS_ON_COUNTER_HI;
 }
 
-static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
-	if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
-		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
-		"powerup_register_list")) {
-		adreno_dev->pwrup_reglist.gpuaddr = 0;
-		return;
-	}
-
-	kgsl_sharedmem_set(device, &adreno_dev->pwrup_reglist, 0, 0,
-		PAGE_SIZE);
-}
-
 static void a6xx_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -186,64 +137,42 @@ static void a6xx_init(struct adreno_device *adreno_dev)
 	 * If the GMU is not enabled, rewrite the offset for the always on
 	 * counters to point to the CP always on instead of GMU always on
 	 */
-	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+	if (!gmu_core_isenabled(device))
 		_update_always_on_regs(adreno_dev);
 
-	a6xx_pwrup_reglist_init(adreno_dev);
+	kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
+		PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
+		"powerup_register_list");
 }
 
-/**
- * a6xx_protect_init() - Initializes register protection on a6xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct kgsl_protected_registers *mmu_prot =
-		kgsl_mmu_get_prot_regs(&device->mmu);
-	int i, num_sets;
-	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
-	int max_sets = adreno_dev->gpucore->num_protected_regs;
-	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
+	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
+	const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
+	int i;
 
-	/* enable access protection to privileged registers */
-	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
+	/*
+	 * Enable access protection to privileged registers, fault on an access
+	 * protect violation and select the last span to protect from the start
+	 * address all the way to the end of the register address space
+	 */
+	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
+		(1 << 0) | (1 << 1) | (1 << 3));
 
-	if (mmu_prot) {
-		mmu_base = mmu_prot->base;
-		mmu_range = mmu_prot->range;
-		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
-	}
+	/* Program each register defined by the core definition */
+	for (i = 0; regs[i].reg; i++) {
+		u32 count;
 
-	WARN(req_sets > max_sets,
-		"Size exceeds the num of protection regs available\n");
+		/*
+		 * This is the offset of the end register as counted from the
+		 * start, i.e. # of registers in the range - 1
+		 */
+		count = regs[i].end - regs[i].start;
 
-	/* Protect GPU registers */
-	num_sets = min_t(unsigned int,
-		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
-	for (i = 0; i < num_sets; i++) {
-		struct a6xx_protected_regs *regs =
-					&a6xx_protected_regs_group[i];
-
-		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				regs->base | (regs->count << 18) |
-				(regs->read_protect << 31));
-	}
-
-	/* Protect MMU registers */
-	if (mmu_prot) {
-		while ((i < max_sets) && (mmu_range > 0)) {
-			cur_range = min_t(unsigned int, mmu_range,
-						0x2000);
-			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
-			mmu_base += cur_range;
-			mmu_range -= cur_range;
-			i++;
-		}
+		kgsl_regwrite(device, regs[i].reg,
+			regs[i].start | (count << 18) |
+			(regs[i].noaccess << 31));
 	}
 }
 
@@ -375,80 +304,63 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 		on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
 }
 
+struct a6xx_reglist_list {
+	u32 *regs;
+	u32 count;
+};
+
+#define REGLIST(_a) \
+	 (struct a6xx_reglist_list) { .regs = _a, .count = ARRAY_SIZE(_a), }
+
 static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
 {
-	uint32_t i;
-	struct cpu_gpu_lock *lock;
-	struct reg_list_pair *r;
+	struct a6xx_reglist_list reglist[3];
+	void *ptr = adreno_dev->pwrup_reglist.hostptr;
+	struct cpu_gpu_lock *lock = ptr;
+	int items = 0, i, j;
+	u32 *dest = ptr + sizeof(*lock);
 
-	/* Set up the register values */
-	for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
-		r = &a6xx_ifpc_pwrup_reglist[i];
-		kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
+	/* Static IFPC-only registers */
+	reglist[items++] = REGLIST(a6xx_ifpc_pwrup_reglist);
+
+	/* Static IFPC + preemption registers */
+	reglist[items++] = REGLIST(a6xx_pwrup_reglist);
+
+	/* Add target specific registers */
+	if (adreno_is_a612(adreno_dev))
+		reglist[items++] = REGLIST(a612_pwrup_reglist);
+	else if (adreno_is_a615_family(adreno_dev))
+		reglist[items++] = REGLIST(a615_pwrup_reglist);
+	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
+		reglist[items++] = REGLIST(a650_pwrup_reglist);
+
+	/*
+	 * For each entry in each of the lists, write the offset and the current
+	 * register value into the GPU buffer
+	 */
+	for (i = 0; i < items; i++) {
+		u32 *r = reglist[i].regs;
+
+		for (j = 0; j < reglist[i].count; j++) {
+			*dest++ = r[j];
+			kgsl_regread(KGSL_DEVICE(adreno_dev), r[j], dest++);
+		}
+
+		lock->list_length += reglist[i].count * 2;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
-		r = &a6xx_pwrup_reglist[i];
-		kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
-	}
-
-	lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
-	lock->flag_ucode = 0;
-	lock->flag_kmd = 0;
-	lock->turn = 0;
-
 	/*
 	 * The overall register list is composed of
 	 * 1. Static IFPC-only registers
 	 * 2. Static IFPC + preemption registers
-	 * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+	 * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
 	 *
 	 * The CP views the second and third entries as one dynamic list
-	 * starting from list_offset. Thus, list_length should be the sum
-	 * of all three lists above (of which the third list will start off
-	 * empty). And list_offset should be specified as the size in dwords
-	 * of the static IFPC-only register list.
+	 * starting from list_offset. list_length should be the total dwords in
+	 * all the lists and list_offset should be specified as the size in
+	 * dwords of the first entry in the list.
 	 */
-	lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
-			sizeof(a6xx_pwrup_reglist)) >> 2;
-	lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
-
-	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
-		a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
-	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-		+ sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
-		sizeof(a6xx_pwrup_reglist));
-
-	if (adreno_is_a615_family(adreno_dev)) {
-		for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
-			r = &a615_pwrup_reglist[i];
-			kgsl_regread(KGSL_DEVICE(adreno_dev),
-				r->offset, &r->val);
-		}
-
-		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-			+ sizeof(a6xx_ifpc_pwrup_reglist)
-			+ sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
-			sizeof(a615_pwrup_reglist));
-
-		lock->list_length += sizeof(a615_pwrup_reglist) >> 2;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN)) {
-		for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_perfctr_reglist); i++) {
-			r = &a6xx_ifpc_perfctr_reglist[i];
-			kgsl_regread(KGSL_DEVICE(adreno_dev),
-				r->offset, &r->val);
-		}
-
-		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-				+ sizeof(a6xx_ifpc_pwrup_reglist)
-				+ sizeof(a6xx_pwrup_reglist),
-				a6xx_ifpc_perfctr_reglist,
-				sizeof(a6xx_ifpc_perfctr_reglist));
-
-		lock->list_length += sizeof(a6xx_ifpc_perfctr_reglist) >> 2;
-	}
+	lock->list_offset = reglist[0].count * 2;
 }
 
 /*
@@ -543,6 +455,11 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	/* Turn on performance counters */
 	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
 
+	/* Turn on the IFPC counter (countable 4 on XOCLK4) */
+	if (gmu_core_isenabled(device))
+		gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
+			0xff, 0x4);
+
 	/* Turn on GX_MEM retention */
 	if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
 		kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
@@ -600,9 +517,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (mal << 23) |
 		(lower_bit << 21));
 
-	/* Set hang detection threshold to 0x3FFFFF * 16 cycles */
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
-					(1 << 30) | 0x3fffff);
+				(1 << 30) | a6xx_core->hang_detect_cycles);
 
 	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
 
@@ -631,6 +547,14 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 		kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
 			0x1);
 
+	/*
+	 * Enable GMU power counter 0 to count GPU busy. This is applicable to
+	 * all a6xx targets
+	 */
+	kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+	kgsl_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+	kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
+
 	a6xx_protect_init(adreno_dev);
 
 	if (!patch_reglist && (adreno_dev->pwrup_reglist.gpuaddr != 0)) {
@@ -1057,7 +981,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 {
 	int ret;
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
 
@@ -1067,16 +990,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 			return ret;
 	}
 
-	ret = gmu_core_dev_load_firmware(device);
-	if (ret)
-		return ret;
-
-	ret = gmu_memory_probe(device);
-	if (ret)
-		return ret;
-
-	hfi_init(gmu);
-
 	return 0;
 }
 
@@ -1104,27 +1017,26 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 	return 0;
 }
 
-/* Number of throttling counters for A6xx */
-#define A6XX_GMU_THROTTLE_COUNTERS 3
-
 static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 {
-	int i;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	int64_t adj = -1;
-	uint32_t counts[A6XX_GMU_THROTTLE_COUNTERS];
+	u32 a, b, c;
 	struct adreno_busy_data *busy = &adreno_dev->busy_data;
 
 	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
 		return 0;
 
-	for (i = 0; i < ARRAY_SIZE(counts); i++) {
-		if (!adreno_dev->gpmu_throttle_counters[i])
-			counts[i] = 0;
-		else
-			counts[i] = counter_delta(KGSL_DEVICE(adreno_dev),
-					adreno_dev->gpmu_throttle_counters[i],
-					&busy->throttle_cycles[i]);
-	}
+	/* The counters are selected in a6xx_gmu_enable_lm() */
+	a = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
+		&busy->throttle_cycles[0]);
+
+	b = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
+		&busy->throttle_cycles[1]);
+
+	c = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
+		&busy->throttle_cycles[2]);
+
 
 	/*
 	 * The adjustment is the number of cycles lost to throttling, which
@@ -1134,10 +1046,10 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
 	 * to remove them to prevent appearing to be busier than
 	 * we actually are.
 	 */
-	adj *= ((counts[0] * 15) + (counts[1] * 50) + (counts[2] * 90)) / 100;
+	adj *= ((a * 15) + (b * 50) + (c * 90)) / 100;
 
-	trace_kgsl_clock_throttling(0, counts[1], counts[2],
-			counts[0], adj);
+	trace_kgsl_clock_throttling(0, a, b, c, adj);
+
 	return adj;
 }
 
@@ -2211,50 +2123,11 @@ static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
 		A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
 };
 
-static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
-	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
-};
-
 static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
 		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
 };
 
-static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
-	/*
-	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
-	 * busy count (see the PWR group above). Mark it as broken
-	 * so it's not re-used.
-	 */
-	{ KGSL_PERFCOUNTER_BROKEN, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
-		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
-};
-
 /*
  * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
  * because most of the perfcounter groups need to be restored
@@ -2293,11 +2166,8 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
 	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
-	A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
-		ADRENO_PERFCOUNTER_GROUP_FIXED),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
-	A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
 };
 
 static struct adreno_perfcounters a6xx_perfcounters = {
@@ -2305,33 +2175,6 @@ static struct adreno_perfcounters a6xx_perfcounters = {
 	ARRAY_SIZE(a6xx_perfcounter_groups),
 };
 
-/* Program the GMU power counter to count GPU busy cycles */
-static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
-		unsigned int counter)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
-	/*
-	 * We have a limited number of power counters. Since we're not using
-	 * total GPU cycle count, return error if requested.
-	 */
-	if (counter == 0)
-		return -EINVAL;
-
-	/* We can use GPU without GMU and allow it to count GPU busy cycles */
-	if (!gmu_core_isenabled(device) &&
-			!kgsl_is_register_offset(device,
-				A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK))
-		return -ENODEV;
-
-	kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
-	kgsl_regrmw(device,
-			A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
-	kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
-
-	return 0;
-}
-
 static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
 {
 	unsigned int val;
@@ -2391,8 +2234,24 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 		gpudev->vbif_xin_halt_ctrl0_mask =
 				A6XX_VBIF_XIN_HALT_CTRL0_MASK;
 
+	/* Set the GPU busy counter for frequency scaling */
+	adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;
+
+	/* Set the counter for IFPC */
+	if (gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+		adreno_dev->perfctr_ifpc_lo =
+			A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;
+
 	/* Check efuse bits for various capabilties */
 	a6xx_check_features(adreno_dev);
+
+	/*
+	 * A640 GPUs used a fuse to determine which frequency plan to
+	 * use for the GPU. For A650 GPUs enable using higher frequencies
+	 * based on the LM feature flag.
+	 */
+	if (adreno_is_a650(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		adreno_dev->speed_bin = 1;
 }
 
 
@@ -2557,33 +2416,20 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
 	.offset_0 = ADRENO_REG_REGISTER_MAX,
 };
 
-static void a6xx_perfcounter_init(struct adreno_device *adreno_dev)
+static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
 {
-	/*
-	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A612.
-	 * Mark them as broken so that they can't be used.
-	 */
-	if (adreno_is_a612(adreno_dev)) {
-		a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN;
-		a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN;
-	}
-}
-
-static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
-	struct adreno_perfcount_register *reg, bool update_reg)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
-	struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
-	unsigned int i;
 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-	int ret = 0;
 
+	/* Indicate that the CPU wants the lock */
 	lock->flag_kmd = 1;
-	/* Write flag_kmd before turn */
+
+	/* post the request */
 	wmb();
+
+	/* Wait for our turn */
 	lock->turn = 0;
-	/* Write these fields before looping */
+
+	/* Finish all memory transactions before moving on */
 	mb();
 
 	/*
@@ -2595,60 +2441,76 @@ static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
 		cpu_relax();
 		/* Get the latest updates from GPU */
 		rmb();
-		/*
-		 * Make sure we wait at least 1sec for the lock,
-		 * if we did not get it after 1sec return an error.
-		 */
-		if (time_after(jiffies, timeout) &&
-			(lock->flag_ucode == 1 && lock->turn == 0)) {
-			ret = -EBUSY;
-			goto unlock;
-		}
+
+		if (time_after(jiffies, timeout))
+			break;
 	}
 
-	/* Read flag_ucode and turn before list_length */
-	rmb();
+	if (lock->flag_ucode == 1 && lock->turn == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static void cpu_gpu_unlock(struct cpu_gpu_lock *lock)
+{
+	/* Make sure all writes are done before releasing the lock */
+	wmb();
+	lock->flag_kmd = 0;
+}
+
+static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
+	struct adreno_perfcount_register *reg, bool update_reg)
+{
+	void *ptr = adreno_dev->pwrup_reglist.hostptr;
+	struct cpu_gpu_lock *lock = ptr;
+	u32 *data = ptr + sizeof(*lock);
+	int i, offset = 0;
+
+	if (cpu_gpu_lock(lock)) {
+		cpu_gpu_unlock(lock);
+		return -EBUSY;
+	}
+
 	/*
 	 * If the perfcounter select register is already present in reglist
 	 * update it, otherwise append the <select register, value> pair to
 	 * the end of the list.
 	 */
-	for (i = 0; i < lock->list_length >> 1; i++)
-		if (reg_pair[i].offset == reg->select)
-			break;
-	/*
-	 * If the perfcounter selct register is not present overwrite last entry
-	 * with new entry and add RBBM perf counter enable at the end.
-	 */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN) &&
-			(i == lock->list_length >> 1)) {
-		reg_pair[i-1].offset = reg->select;
-		reg_pair[i-1].val = reg->countable;
+	for (i = 0; i < lock->list_length >> 1; i++) {
+		if (data[offset] == reg->select) {
+			data[offset + 1] = reg->countable;
+			goto update;
+		}
 
-		/* Enable perf counter after performance counter selections */
-		reg_pair[i].offset = A6XX_RBBM_PERFCTR_CNTL;
-		reg_pair[i].val = 1;
-
-	} else {
-		/*
-		 * If perf counter select register is already present in reglist
-		 * just update list without adding the RBBM perfcontrol enable.
-		 */
-		reg_pair[i].offset = reg->select;
-		reg_pair[i].val = reg->countable;
+		offset += 2;
 	}
 
-	if (i == lock->list_length >> 1)
-		lock->list_length += 2;
+	/*
+	 * For a612 targets A6XX_RBBM_PERFCTR_CNTL needs to be the last entry,
+	 * so overwrite the existing A6XX_RBBM_PERFCNTL_CTRL and add it back to
+	 * the end. All other targets just append the new counter to the end.
+	 */
+	if (adreno_is_a612(adreno_dev)) {
+		data[offset - 2] = reg->select;
+		data[offset - 1] = reg->countable;
 
+		data[offset] = A6XX_RBBM_PERFCTR_CNTL,
+		data[offset + 1] = 1;
+	} else {
+		data[offset] = reg->select;
+		data[offset + 1] = reg->countable;
+	}
+
+	lock->list_length += 2;
+
+update:
 	if (update_reg)
-		kgsl_regwrite(device, reg->select, reg->countable);
+		kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg->select,
+			reg->countable);
 
-unlock:
-	/* All writes done before releasing the lock */
-	wmb();
-	lock->flag_kmd = 0;
-	return ret;
+	cpu_gpu_unlock(lock);
+	return 0;
 }
 
 struct adreno_gpudev adreno_a6xx_gpudev = {
@@ -2664,7 +2526,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
 	.perfcounters = &a6xx_perfcounters,
-	.enable_pwr_counters = a6xx_enable_pwr_counters,
 	.read_throttling_counters = a6xx_read_throttling_counters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
@@ -2686,7 +2547,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.preemption_context_destroy = a6xx_preemption_context_destroy,
 	.sptprac_is_on = a6xx_sptprac_is_on,
 	.ccu_invalidate = a6xx_ccu_invalidate,
-	.perfcounter_init = a6xx_perfcounter_init,
 	.perfcounter_update = a6xx_perfcounter_update,
 	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
 };
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index ae6839a..6f88028f 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -11,6 +11,26 @@
 #include "a6xx_reg.h"
 
 /**
+ * struct a6xx_protected_regs - container for a protect register span
+ */
+struct a6xx_protected_regs {
+	/** @reg: Physical protected mode register to write to */
+	u32 reg;
+	/** @start: Dword offset of the starting register in the range */
+	u32 start;
+	/**
+	 * @end: Dword offset of the ending register in the range
+	 * (inclusive)
+	 */
+	u32 end;
+	/**
+	 * @noaccess: 1 if the register should not be accessible from
+	 * userspace, 0 if it can be read (but not written)
+	 */
+	u32 noaccess;
+};
+
+/**
  * struct adreno_a6xx_core - a6xx specific GPU core definitions
  */
 struct adreno_a6xx_core {
@@ -42,6 +62,10 @@ struct adreno_a6xx_core {
 	bool veto_fal10;
 	/** @pdc_in_aop: True if PDC programmed in AOP */
 	bool pdc_in_aop;
+	/** @hang_detect_cycles: Hang detect counter timeout value */
+	u32 hang_detect_cycles;
+	/** @protected_regs: Array of protected registers for the target */
+	const struct a6xx_protected_regs *protected_regs;
 };
 
 #define CP_CLUSTER_FE		0x0
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 4c1856fc..91978af 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -228,7 +228,7 @@ static int a6xx_load_pdc_ucode(struct kgsl_device *device)
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
 
-	if (adreno_is_a618(adreno_dev) || adreno_is_a650(adreno_dev))
+	if (adreno_is_a618(adreno_dev) || adreno_is_a650_family(adreno_dev))
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x2);
 	else
 		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
@@ -1370,40 +1370,10 @@ static int a6xx_gmu_rpmh_gpu_pwrctrl(struct kgsl_device *device,
 	return ret;
 }
 
-static int _setup_throttling_counter(struct adreno_device *adreno_dev,
-						int countable, u32 *offset)
-{
-	if (*offset)
-		return 0;
-
-	return adreno_perfcounter_get(adreno_dev,
-			KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
-			countable, offset, NULL,
-			PERFCOUNTER_FLAG_KERNEL);
-}
-
-static void _setup_throttling_counters(struct adreno_device *adreno_dev)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-	int ret;
-
-	ret = _setup_throttling_counter(adreno_dev, 0x10,
-				&adreno_dev->gpmu_throttle_counters[0]);
-	ret |= _setup_throttling_counter(adreno_dev, 0x15,
-				&adreno_dev->gpmu_throttle_counters[1]);
-	ret |= _setup_throttling_counter(adreno_dev, 0x19,
-				&adreno_dev->gpmu_throttle_counters[2]);
-
-	if (ret)
-		dev_err_once(&gmu->pdev->dev,
-			"Could not get all the throttling counters for LM\n");
-
-}
-
 void a6xx_gmu_enable_lm(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	u32 val;
 
 	memset(adreno_dev->busy_data.throttle_cycles, 0,
 		sizeof(adreno_dev->busy_data.throttle_cycles));
@@ -1412,7 +1382,20 @@ void a6xx_gmu_enable_lm(struct kgsl_device *device)
 			!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		return;
 
-	_setup_throttling_counters(adreno_dev);
+	/*
+	 * For throttling, use the following counters for throttled cycles:
+	 * XOCLK1: countable 0x10
+	 * XOCLK2: countable 0x15
+	 * XOCLK3: countable 0x19
+	 *
+	 * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
+	 * is 8 bits wide.
+	 */
+	val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
+	/* Make sure not to write over XOCLK0 */
+	gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+		0xffffff00, val);
 
 	gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
 }
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 7842dd9..5b4ae58 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -114,19 +114,6 @@ static void adreno_perfcounter_write(struct adreno_device *adreno_dev,
 }
 
 /**
- * adreno_perfcounter_close() - Release counters initialized by
- * adreno_perfcounter_close
- * @adreno_dev: Pointer to an adreno_device struct
- */
-void adreno_perfcounter_close(struct adreno_device *adreno_dev)
-{
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	if (gpudev->perfcounter_close)
-		gpudev->perfcounter_close(adreno_dev);
-}
-
-/**
  * adreno_perfcounter_restore() - Restore performance counters
  * @adreno_dev: adreno device to configure
  *
@@ -869,7 +856,6 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
 	unsigned int group, unsigned int counter, unsigned int countable)
 {
 	struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
-	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (counters == NULL)
 		return -EINVAL;
@@ -885,8 +871,6 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
 		/* alwayson counter is global, so init value is 0 */
 		break;
 	case KGSL_PERFCOUNTER_GROUP_PWR:
-		if (gpudev->enable_pwr_counters)
-			return gpudev->enable_pwr_counters(adreno_dev, counter);
 		return 0;
 	case KGSL_PERFCOUNTER_GROUP_VBIF:
 		if (countable > VBIF2_PERF_CNT_SEL_MASK)
@@ -948,10 +932,6 @@ static uint64_t _perfcounter_read_pwr(struct adreno_device *adreno_dev,
 
 	reg = &group->regs[counter];
 
-	/* Remember, counter 0 is not emulated on 5XX */
-	if (adreno_is_a5xx(adreno_dev) && (counter == 0))
-		return -EINVAL;
-
 	if (adreno_is_a3xx(adreno_dev)) {
 		/* On A3XX we need to freeze the counter so we can read it */
 		if (counter == 0)
diff --git a/drivers/gpu/msm/adreno_perfcounter.h b/drivers/gpu/msm/adreno_perfcounter.h
index 273c5aa..f5310d4 100644
--- a/drivers/gpu/msm/adreno_perfcounter.h
+++ b/drivers/gpu/msm/adreno_perfcounter.h
@@ -109,8 +109,6 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
 int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
 	struct kgsl_perfcounter_read_group __user *reads, unsigned int count);
 
-void adreno_perfcounter_close(struct adreno_device *adreno_dev);
-
 void adreno_perfcounter_restore(struct adreno_device *adreno_dev);
 
 void adreno_perfcounter_save(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 3b49324..60ddbfc 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -7,12 +7,15 @@
 #include <linux/slab.h>
 
 #include "a3xx_reg.h"
+#include "a5xx_reg.h"
+#include "a6xx_reg.h"
 #include "adreno.h"
 #include "adreno_pm4types.h"
 #include "adreno_ringbuffer.h"
 #include "adreno_trace.h"
 #include "kgsl_trace.h"
 
+
 #define RB_HOSTPTR(_rb, _pos) \
 	((unsigned int *) ((_rb)->buffer_desc.hostptr + \
 		((_pos) * sizeof(unsigned int))))
@@ -791,18 +794,21 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
 	*p++ = cp_mem_packet(adreno_dev, CP_REG_TO_MEM, 2, 1);
 
 	/*
-	 * For a4x and some a5x the alwayson_hi read through CPU
+	 * For some a5x the alwayson_hi read through CPU
 	 * will be masked. Only do 32 bit CP reads for keeping the
 	 * numbers consistent
 	 */
-	if (ADRENO_GPUREV(adreno_dev) >= 400 &&
-		ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
-		*p++ = adreno_getreg(adreno_dev,
-			ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO);
-	else
-		*p++ = adreno_getreg(adreno_dev,
-			ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO) |
+	if (adreno_is_a5xx(adreno_dev)) {
+		if (ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
+			*p++ = A5XX_RBBM_ALWAYSON_COUNTER_LO;
+		else
+			*p++ = A5XX_RBBM_ALWAYSON_COUNTER_LO |
+				(1 << 30) | (2 << 18);
+	} else if (adreno_is_a6xx(adreno_dev)) {
+		*p++ = A6XX_CP_ALWAYS_ON_COUNTER_LO |
 			(1 << 30) | (2 << 18);
+	}
+
 	p += cp_gpuaddr(adreno_dev, p, gpuaddr);
 
 	return (unsigned int)(p - cmds);
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 8289f52..8d2a97e 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -738,7 +738,7 @@ static void setup_fault_process(struct kgsl_device *device,
 	if (kgsl_mmu_is_perprocess(&device->mmu)) {
 		struct kgsl_process_private *tmp;
 
-		mutex_lock(&kgsl_driver.process_mutex);
+		spin_lock(&kgsl_driver.proclist_lock);
 		list_for_each_entry(tmp, &kgsl_driver.process_list, list) {
 			u64 pt_ttbr0;
 
@@ -749,7 +749,7 @@ static void setup_fault_process(struct kgsl_device *device,
 				break;
 			}
 		}
-		mutex_unlock(&kgsl_driver.process_mutex);
+		spin_unlock(&kgsl_driver.proclist_lock);
 	}
 done:
 	snapshot->process = process;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 1b298f9..2257294 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -921,7 +921,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
 {
 	struct kgsl_process_private *p, *private = NULL;
 
-	mutex_lock(&kgsl_driver.process_mutex);
+	spin_lock(&kgsl_driver.proclist_lock);
 	list_for_each_entry(p, &kgsl_driver.process_list, list) {
 		if (p->pid == pid) {
 			if (kgsl_process_private_get(p))
@@ -929,7 +929,8 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
 			break;
 		}
 	}
-	mutex_unlock(&kgsl_driver.process_mutex);
+	spin_unlock(&kgsl_driver.proclist_lock);
+
 	return private;
 }
 
@@ -1035,7 +1036,9 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
 		kgsl_mmu_detach_pagetable(private->pagetable);
 
 	/* Remove the process struct from the master list */
+	spin_lock(&kgsl_driver.proclist_lock);
 	list_del(&private->list);
+	spin_unlock(&kgsl_driver.proclist_lock);
 
 	/*
 	 * Unlock the mutex before releasing the memory and the debugfs
@@ -1071,7 +1074,9 @@ static struct kgsl_process_private *kgsl_process_private_open(
 		kgsl_process_init_sysfs(device, private);
 		kgsl_process_init_debugfs(private);
 
+		spin_lock(&kgsl_driver.proclist_lock);
 		list_add(&private->list, &kgsl_driver.process_list);
+		spin_unlock(&kgsl_driver.proclist_lock);
 	}
 
 done:
@@ -4870,6 +4875,7 @@ static const struct file_operations kgsl_fops = {
 
 struct kgsl_driver kgsl_driver  = {
 	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+	.proclist_lock = __SPIN_LOCK_UNLOCKED(kgsl_driver.proclist_lock),
 	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
 	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
 	/*
@@ -4973,7 +4979,6 @@ int kgsl_request_irq(struct platform_device *pdev, const  char *name,
 int kgsl_device_platform_probe(struct kgsl_device *device)
 {
 	int status = -EINVAL;
-	struct resource *res;
 	int cpu;
 
 	status = _register_device(device);
@@ -4986,34 +4991,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
 	if (status)
 		goto error;
 
-	/*
-	 * Check if a shadermemname is defined, and then get shader memory
-	 * details including shader memory starting physical address
-	 * and shader memory length
-	 */
-	if (device->shadermemname != NULL) {
-		res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
-						device->shadermemname);
-
-		if (res == NULL) {
-			dev_warn(device->dev,
-				      "Shader memory: platform_get_resource_byname failed\n");
-		}
-
-		else {
-			device->shader_mem_phys = res->start;
-			device->shader_mem_len = resource_size(res);
-		}
-
-		if (!devm_request_mem_region(device->dev,
-					device->shader_mem_phys,
-					device->shader_mem_len,
-						device->name)) {
-			dev_warn(device->dev,
-				      "request_mem_region_failed\n");
-		}
-	}
-
 	if (!devm_request_mem_region(device->dev, device->reg_phys,
 				device->reg_len, device->name)) {
 		dev_err(device->dev, "request_mem_region failed\n");
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index d824177..347a30c 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -113,6 +113,7 @@ struct kgsl_context;
  * @pagetable_list: LIst of open pagetables
  * @ptlock: Lock for accessing the pagetable list
  * @process_mutex: Mutex for accessing the process list
+ * @proclist_lock: Lock for accessing the process list
  * @devlock: Mutex protecting the device list
  * @stats: Struct containing atomic memory statistics
  * @full_cache_threshold: the threshold that triggers a full cache flush
@@ -131,6 +132,7 @@ struct kgsl_driver {
 	struct list_head pagetable_list;
 	spinlock_t ptlock;
 	struct mutex process_mutex;
+	spinlock_t proclist_lock;
 	struct mutex devlock;
 	struct {
 		atomic_long_t vmalloc;
@@ -331,16 +333,6 @@ struct kgsl_event_group {
 };
 
 /**
- * struct kgsl_protected_registers - Protected register range
- * @base: Offset of the range to be protected
- * @range: Range (# of registers = 2 ** range)
- */
-struct kgsl_protected_registers {
-	unsigned int base;
-	int range;
-};
-
-/**
  * struct sparse_bind_object - Bind metadata
  * @node: Node for the rb tree
  * @p_memdesc: Physical memdesc bound to
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 36ad144..1a86ca5 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -247,18 +247,12 @@ struct kgsl_device {
 	/* Kernel virtual address for GPU shader memory */
 	void __iomem *shader_mem_virt;
 
-	/* Starting physical address for GPU shader memory */
-	unsigned long shader_mem_phys;
-
 	/* Starting kernel virtual address for QDSS GFX DBG register block */
 	void __iomem *qdss_gfx_virt;
 
-	/* GPU shader memory size */
-	unsigned int shader_mem_len;
 	struct kgsl_memdesc memstore;
 	struct kgsl_memdesc scratch;
 	const char *iomemname;
-	const char *shadermemname;
 
 	struct kgsl_mmu mmu;
 	struct gmu_core_device gmu_core;
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index de9e4f4..660dbb2 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -383,6 +383,9 @@ static void gmu_memory_close(struct gmu_device *gmu)
 		md = &gmu->kmem_entries[i];
 		ctx = &gmu_ctx[md->ctx_idx];
 
+		if (!ctx->domain)
+			continue;
+
 		if (md->gmuaddr && md->mem_type != GMU_ITCM &&
 				md->mem_type != GMU_DTCM)
 			iommu_unmap(ctx->domain, md->gmuaddr, md->size);
@@ -447,7 +450,7 @@ int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk)
  * to share with GMU in kernel mode.
  * @device: Pointer to KGSL device
  */
-int gmu_memory_probe(struct kgsl_device *device)
+static int gmu_memory_probe(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1562,6 +1565,24 @@ static void gmu_snapshot(struct kgsl_device *device)
 	gmu->fault_count++;
 }
 
+static int gmu_init(struct kgsl_device *device)
+{
+	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
+	int ret;
+
+	ret = ops->load_firmware(device);
+	if (ret)
+		return ret;
+
+	ret = gmu_memory_probe(device);
+	if (ret)
+		return ret;
+
+	hfi_init(gmu);
+
+	return 0;
+}
 /* To be called to power on both GPU and GMU */
 static int gmu_start(struct kgsl_device *device)
 {
@@ -1575,7 +1596,9 @@ static int gmu_start(struct kgsl_device *device)
 	case KGSL_STATE_INIT:
 		gmu_aop_send_acd_state(device, test_bit(ADRENO_ACD_CTRL,
 					&adreno_dev->pwrctrl_flag));
-
+		ret = gmu_init(device);
+		if (ret)
+			return ret;
 	case KGSL_STATE_SUSPEND:
 		WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index f1dd0aa..040db02 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -218,7 +218,6 @@ unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
 		enum gmu_mem_type type);
 
 int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
-int gmu_memory_probe(struct kgsl_device *device);
 int gmu_cache_finalize(struct kgsl_device *device);
 
 #endif /* __KGSL_GMU_H */
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index f9993c07d..8af7840 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -306,16 +306,6 @@ void gmu_core_dev_enable_lm(struct kgsl_device *device)
 		ops->enable_lm(device);
 }
 
-int gmu_core_dev_load_firmware(struct kgsl_device *device)
-{
-	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
-
-	if (ops && ops->load_firmware)
-		return ops->load_firmware(device);
-
-	return 0;
-}
-
 void gmu_core_dev_snapshot(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
 {
@@ -370,5 +360,5 @@ int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device)
 	if (ops && ops->wait_for_active_transition)
 		return ops->wait_for_active_transition(device);
 
-	return -ETIMEDOUT;
+	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h
index ad4a9f3..22690aa 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.h
+++ b/drivers/gpu/msm/kgsl_gmu_core.h
@@ -217,7 +217,6 @@ void gmu_core_dev_oob_clear(struct kgsl_device *device, enum oob_request req);
 int gmu_core_dev_hfi_start_msg(struct kgsl_device *device);
 int gmu_core_dev_wait_for_lowest_idle(struct kgsl_device *device);
 void gmu_core_dev_enable_lm(struct kgsl_device *device);
-int gmu_core_dev_load_firmware(struct kgsl_device *device);
 void gmu_core_dev_snapshot(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot);
 bool gmu_core_dev_gx_is_on(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 88177fb..d1693cc 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -635,10 +635,8 @@ static void _get_entries(struct kgsl_process_private *private,
 
 static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
 		struct _mem_entry *preventry, struct _mem_entry *nextentry,
-		struct kgsl_context *context)
+		struct kgsl_process_private *private)
 {
-	struct kgsl_process_private *private;
-
 	memset(preventry, 0, sizeof(*preventry));
 	memset(nextentry, 0, sizeof(*nextentry));
 
@@ -647,8 +645,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
 
 	if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
 		_get_global_entries(faultaddr, preventry, nextentry);
-	} else if (context) {
-		private = context->proc_priv;
+	} else if (private) {
 		spin_lock(&private->mem_lock);
 		_get_entries(private, faultaddr, preventry, nextentry);
 		spin_unlock(&private->mem_lock);
@@ -687,6 +684,29 @@ static void _check_if_freed(struct kgsl_iommu_context *ctx,
 	}
 }
 
+static struct kgsl_process_private *kgsl_iommu_get_process(u64 ptbase)
+{
+	struct kgsl_process_private *p;
+	struct kgsl_iommu_pt *iommu_pt;
+
+	spin_lock(&kgsl_driver.proclist_lock);
+
+	list_for_each_entry(p, &kgsl_driver.process_list, list) {
+		iommu_pt = p->pagetable->priv;
+		if (iommu_pt->ttbr0 == ptbase) {
+			if (!kgsl_process_private_get(p))
+				p = NULL;
+
+			spin_unlock(&kgsl_driver.proclist_lock);
+			return p;
+		}
+	}
+
+	spin_unlock(&kgsl_driver.proclist_lock);
+
+	return NULL;
+}
+
 static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct device *dev, unsigned long addr, int flags, void *token)
 {
@@ -695,7 +715,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct kgsl_mmu *mmu = pt->mmu;
 	struct kgsl_iommu *iommu;
 	struct kgsl_iommu_context *ctx;
-	u64 ptbase, proc_ptbase;
+	u64 ptbase;
 	u32 contextidr;
 	pid_t pid = 0;
 	pid_t ptname;
@@ -705,9 +725,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	struct adreno_device *adreno_dev;
 	struct adreno_gpudev *gpudev;
 	unsigned int no_page_fault_log = 0;
-	unsigned int curr_context_id = 0;
-	struct kgsl_context *context;
 	char *fault_type = "unknown";
+	char *comm = "unknown";
+	struct kgsl_process_private *private;
 
 	static DEFINE_RATELIMIT_STATE(_rs,
 					DEFAULT_RATELIMIT_INTERVAL,
@@ -722,21 +742,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	adreno_dev = ADRENO_DEVICE(device);
 	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
-	if (pt->name == KGSL_MMU_SECURE_PT)
-		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
-
-	/*
-	 * set the fault bits and stuff before any printks so that if fault
-	 * handler runs then it will know it's dealing with a pagefault.
-	 * Read the global current timestamp because we could be in middle of
-	 * RB switch and hence the cur RB may not be reliable but global
-	 * one will always be reliable
-	 */
-	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
-		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
-
-	context = kgsl_context_get(device, curr_context_id);
-
 	write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
 	if (flags & IOMMU_FAULT_TRANSLATION)
 		fault_type = "translation";
@@ -747,12 +752,17 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
 		fault_type = "transaction stalled";
 
-	if (context != NULL) {
-		/* save pagefault timestamp for GFT */
-		set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
-		pid = context->proc_priv->pid;
+	ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
+	private = kgsl_iommu_get_process(ptbase);
+
+	if (private) {
+		pid = private->pid;
+		comm = private->comm;
 	}
 
+	if (pt->name == KGSL_MMU_SECURE_PT)
+		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
+
 	ctx->fault = 1;
 
 	if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
@@ -767,9 +777,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 		mutex_unlock(&device->mutex);
 	}
 
-	ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
 	contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
-
 	ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
 		KGSL_MMU_GLOBAL_PT : pid;
 	/*
@@ -778,43 +786,19 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 	 * search and delays the trace unnecessarily.
 	 */
 	trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
-			ptname,
-			context != NULL ? context->proc_priv->comm : "unknown",
-			write ? "write" : "read");
+			ptname, comm, write ? "write" : "read");
 
 	if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
 		&adreno_dev->ft_pf_policy))
 		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
 
 	if (!no_page_fault_log && __ratelimit(&_rs)) {
-		const char *api_str;
-
-		if (context != NULL) {
-			struct adreno_context *drawctxt =
-					ADRENO_CONTEXT(context);
-
-			api_str = get_api_type_str(drawctxt->type);
-		} else
-			api_str = "UNKNOWN";
-
 		dev_crit(ctx->kgsldev->dev,
 			"GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr,
-			ptname,
-			context != NULL ? context->proc_priv->comm : "unknown");
-
-		if (context != NULL) {
-			proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(
-					context->proc_priv->pagetable);
-
-			if (ptbase != proc_ptbase)
-				dev_crit(ctx->kgsldev->dev,
-				"Pagetable address mismatch: HW address is 0x%llx but SW expected 0x%llx\n",
-				ptbase, proc_ptbase);
-		}
-
+			ptname, comm);
 		dev_crit(ctx->kgsldev->dev,
-			"context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
-			ctx->name, api_str, ptbase, contextidr,
+			"context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
+			ctx->name, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
 		if (gpudev->iommu_fault_block) {
@@ -834,7 +818,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 			dev_err(ctx->kgsldev->dev,
 				      "---- nearby memory ----\n");
 
-			_find_mem_entries(mmu, addr, &prev, &next, context);
+			_find_mem_entries(mmu, addr, &prev, &next, private);
 			if (prev.gpuaddr)
 				_print_entry(ctx->kgsldev, &prev);
 			else
@@ -877,7 +861,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
 		adreno_dispatcher_schedule(device);
 	}
 
-	kgsl_context_put(context);
+	kgsl_process_private_put(private);
+
 	return ret;
 }
 
@@ -2167,14 +2152,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
 	return 0;
 }
 
-static struct kgsl_protected_registers *
-kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
-{
-	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
-
-	return &iommu->protect;
-}
-
 static struct kgsl_iommu_addr_entry *_find_gpuaddr(
 		struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
 {
@@ -2626,15 +2603,6 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
 	iommu->regstart = reg_val[0];
 	iommu->regsize = reg_val[1];
 
-	/* Protecting the SMMU registers is mandatory */
-	if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
-		dev_err(device->dev,
-			"dt: no iommu protection range specified\n");
-		return -EINVAL;
-	}
-	iommu->protect.base = reg_val[0] / sizeof(u32);
-	iommu->protect.range = reg_val[1] / sizeof(u32);
-
 	of_property_for_each_string(node, "clock-names", prop, cname) {
 		struct clk *c = devm_clk_get(&pdev->dev, cname);
 
@@ -2722,7 +2690,6 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
 	.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
-	.mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
 	.mmu_init_pt = kgsl_iommu_init_pt,
 	.mmu_add_global = kgsl_iommu_add_global,
 	.mmu_remove_global = kgsl_iommu_remove_global,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index b98f2c2..2e4c2ad 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -112,7 +112,6 @@ struct kgsl_iommu_context {
  * @clk_enable_count: The ref count of clock enable calls
  * @clks: Array of pointers to IOMMU clocks
  * @smmu_info: smmu info used in a5xx preemption
- * @protect: register protection settings for the iommu.
  */
 struct kgsl_iommu {
 	struct kgsl_iommu_context ctx[KGSL_IOMMU_CONTEXT_MAX];
@@ -123,7 +122,6 @@ struct kgsl_iommu {
 	atomic_t clk_enable_count;
 	struct clk *clks[KGSL_IOMMU_MAX_CLKS];
 	struct kgsl_memdesc smmu_info;
-	struct kgsl_protected_registers protect;
 };
 
 /*
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 637e57d..93012aa 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -68,8 +68,6 @@ struct kgsl_mmu_ops {
 	bool (*mmu_pt_equal)(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt, u64 ttbr0);
 	int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
-	struct kgsl_protected_registers *(*mmu_get_prot_regs)
-			(struct kgsl_mmu *mmu);
 	int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
 	void (*mmu_add_global)(struct kgsl_mmu *mmu,
 			struct kgsl_memdesc *memdesc, const char *name);
@@ -328,15 +326,6 @@ static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
 		return mmu->mmu_ops->mmu_clear_fsr(mmu);
 }
 
-static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
-						(struct kgsl_mmu *mmu)
-{
-	if (MMU_OP_VALID(mmu, mmu_get_prot_regs))
-		return mmu->mmu_ops->mmu_get_prot_regs(mmu);
-
-	return NULL;
-}
-
 static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
 {
 	return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index c9bf42f..6786ecb4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1991,9 +1991,9 @@ static bool _gpu_freq_supported(struct kgsl_pwrctrl *pwr, unsigned int freq)
 	return false;
 }
 
-static void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device)
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+		struct device *dev)
 {
-	struct device *dev = &device->pdev->dev;
 	struct dev_pm_opp *opp;
 	unsigned long freq = 0;
 	int ret;
@@ -2083,7 +2083,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
-	kgsl_pwrctrl_disable_unused_opp(device);
+	kgsl_pwrctrl_disable_unused_opp(device, &pdev->dev);
 
 	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index f3a5648..ff3e4fe 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -267,4 +267,7 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
 void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
 			unsigned long timeout_us);
 void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+		struct device *dev);
+
 #endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 89b227b..790b379 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1041,6 +1041,12 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
 		 * frequency.
 		 */
 		ret = dev_pm_opp_of_add_table(device->busmondev);
+		/*
+		 * Disable OPP which are not supported as per GPU freq plan.
+		 * This is need to ensure freq_table specified in bus_profile
+		 * above matches OPP table.
+		 */
+		kgsl_pwrctrl_disable_unused_opp(device, device->busmondev);
 		if (!ret)
 			bus_devfreq = devfreq_add_device(device->busmondev,
 				&pwrscale->bus_profile.profile, "gpubw_mon",
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 6e1a4a4..ab9da59 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 
 	/* Locate the boot interface, to receive the LED change events */
 	struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+	struct hid_device *boot_hid;
+	struct hid_input *boot_hid_input;
 
-	struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
-	struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+	if (unlikely(boot_interface == NULL))
+		return -ENODEV;
+
+	boot_hid = usb_get_intfdata(boot_interface);
+	boot_hid_input = list_first_entry(&boot_hid->inputs,
 		struct hid_input, list);
 
 	return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 9671a4b..31f1023 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc);
 static inline void sony_schedule_work(struct sony_sc *sc,
 				      enum sony_worker which)
 {
+	unsigned long flags;
+
 	switch (which) {
 	case SONY_WORKER_STATE:
-		if (!sc->defer_initialization)
+		spin_lock_irqsave(&sc->lock, flags);
+		if (!sc->defer_initialization && sc->state_worker_initialized)
 			schedule_work(&sc->state_worker);
+		spin_unlock_irqrestore(&sc->lock, flags);
 		break;
 	case SONY_WORKER_HOTPLUG:
 		if (sc->hotplug_worker_initialized)
@@ -2553,13 +2557,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
 
 static inline void sony_cancel_work_sync(struct sony_sc *sc)
 {
+	unsigned long flags;
+
 	if (sc->hotplug_worker_initialized)
 		cancel_work_sync(&sc->hotplug_worker);
-	if (sc->state_worker_initialized)
+	if (sc->state_worker_initialized) {
+		spin_lock_irqsave(&sc->lock, flags);
+		sc->state_worker_initialized = 0;
+		spin_unlock_irqrestore(&sc->lock, flags);
 		cancel_work_sync(&sc->state_worker);
+	}
 }
 
-
 static int sony_input_configured(struct hid_device *hdev,
 					struct hid_input *hidinput)
 {
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index a746017..5a949ca 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	spin_unlock_irq(&list->hiddev->list_lock);
 
 	mutex_lock(&hiddev->existancelock);
+	/*
+	 * recheck exist with existance lock held to
+	 * avoid opening a disconnected device
+	 */
+	if (!list->hiddev->exist) {
+		res = -ENODEV;
+		goto bail_unlock;
+	}
 	if (!list->hiddev->open++)
 		if (list->hiddev->exist) {
 			struct hid_device *hid = hiddev->hid;
@@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	hid_hw_power(hid, PM_HINT_NORMAL);
 bail_unlock:
 	mutex_unlock(&hiddev->existancelock);
+
+	spin_lock_irq(&list->hiddev->list_lock);
+	list_del(&list->node);
+	spin_unlock_irq(&list->hiddev->list_lock);
 bail:
 	file->private_data = NULL;
 	vfree(list);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 78603b7..eba692c 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
 static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
 static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
 
@@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
 		data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
 		data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+		data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
 		data->REG_PWM[0] = NCT6106_REG_PWM;
 		data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
 		data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2876c18..38ffbdb 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
 	&sensor_dev_attr_in3_alarm.dev_attr.attr,
 	&sensor_dev_attr_in3_beep.dev_attr.attr,
 
-	&sensor_dev_attr_in4_input.dev_attr.attr,	/* 17 */
+	&sensor_dev_attr_in4_input.dev_attr.attr,	/* 16 */
 	&sensor_dev_attr_in4_min.dev_attr.attr,
 	&sensor_dev_attr_in4_max.dev_attr.attr,
 	&sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
 
 	if (index >= 6 && index < 11 && (reg & 0x03) != 0x03)	/* VSEN1 */
 		return 0;
-	if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c)	/* VSEN2 */
+	if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c)	/* VSEN2 */
 		return 0;
-	if (index >= 17 && (reg & 0x30) != 0x30)		/* VSEN3 */
+	if (index >= 16 && (reg & 0x30) != 0x30)		/* VSEN3 */
 		return 0;
 
 	return attr->mode;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index cda385a..dfb5638 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -123,6 +123,17 @@
 	  hardware component to another. It can also be used to pass
 	  software generated events.
 
+config CORESIGHT_CTI_SAVE_DISABLE
+	bool "Turn off CTI save and restore"
+	depends on CORESIGHT_CTI
+	help
+	  Turns off CoreSight CTI save and restore support for cpu CTIs. This
+	  avoids voting for the clocks during probe as well as the associated
+	  save and restore latency at the cost of breaking cpu CTI support on
+	  targets where cpu CTIs have to be preserved across power collapse.
+
+	  If unsure, say 'N' here to avoid breaking cpu CTI support.
+
 config CORESIGHT_OST
 	bool "CoreSight OST framework"
 	depends on CORESIGHT_STM
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 8a0439d..6216417 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -17,6 +17,7 @@
 #include "coresight-tmc.h"
 
 #define USB_BLK_SIZE 65536
+#define USB_SG_NUM (USB_BLK_SIZE / PAGE_SIZE)
 #define USB_BUF_NUM 255
 
 static struct tmc_drvdata *tmcdrvdata;
@@ -315,10 +316,11 @@ static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
 
 static void usb_read_work_fn(struct work_struct *work)
 {
-	int ret, seq = 0;
+	int ret, i, seq = 0;
 	struct qdss_request *usb_req = NULL;
 	struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
 	size_t actual, req_size;
+	char *buf;
 	struct byte_cntr *drvdata =
 		container_of(work, struct byte_cntr, read_work);
 
@@ -337,50 +339,65 @@ static void usb_read_work_fn(struct work_struct *work)
 		}
 
 		req_size = USB_BLK_SIZE;
-		while (req_size > 0) {
-			seq++;
-			usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
-			if (!usb_req)
-				return;
+		seq++;
+		usb_req = devm_kzalloc(tmcdrvdata->dev, sizeof(*usb_req),
+					GFP_KERNEL);
+		if (!usb_req)
+			return;
+		usb_req->sg = devm_kzalloc(tmcdrvdata->dev,
+			sizeof(*(usb_req->sg)) * USB_SG_NUM, GFP_KERNEL);
+		if (!usb_req->sg) {
+			devm_kfree(tmcdrvdata->dev, usb_req->sg);
+			return;
+		}
+		usb_req->length = USB_BLK_SIZE;
+		drvdata->usb_req = usb_req;
+		for (i = 0; i < USB_SG_NUM; i++) {
 			actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
-					req_size, &usb_req->buf);
+					PAGE_SIZE, &buf);
 			if (actual <= 0) {
-				kfree(usb_req);
+				devm_kfree(tmcdrvdata->dev, usb_req->sg);
+				devm_kfree(tmcdrvdata->dev, usb_req);
 				usb_req = NULL;
 				dev_err(tmcdrvdata->dev, "No data in ETR\n");
-				break;
+				return;
 			}
-			usb_req->length = actual;
-			drvdata->usb_req = usb_req;
+			sg_set_buf(&usb_req->sg[i], buf, actual);
+			if (i == 0)
+				usb_req->buf = buf;
 			req_size -= actual;
-			if ((drvdata->offset + usb_req->length)
-					>= tmcdrvdata->size)
+			if ((drvdata->offset + actual) >= tmcdrvdata->size)
 				drvdata->offset = 0;
 			else
-				drvdata->offset += usb_req->length;
-			if (atomic_read(&drvdata->usb_free_buf) > 0) {
-				ret = usb_qdss_write(tmcdrvdata->usbch,
-						drvdata->usb_req);
-				if (ret) {
-					kfree(usb_req);
-					usb_req = NULL;
-					drvdata->usb_req = NULL;
-					dev_err(tmcdrvdata->dev,
-						"Write data failed:%d\n", ret);
-					if (ret == -EAGAIN)
-						continue;
-					return;
-				}
-				atomic_dec(&drvdata->usb_free_buf);
-
-			} else {
-				dev_dbg(tmcdrvdata->dev,
-				"Drop data, offset = %d, seq = %d, irq = %d\n",
-					drvdata->offset, seq,
-					atomic_read(&drvdata->irq_cnt));
-				kfree(usb_req);
+				drvdata->offset += actual;
+			if (i == USB_SG_NUM - 1)
+				sg_mark_end(&usb_req->sg[i]);
+		}
+		usb_req->num_sgs = i;
+		if (atomic_read(&drvdata->usb_free_buf) > 0) {
+			ret = usb_qdss_write(tmcdrvdata->usbch,
+					drvdata->usb_req);
+			if (ret) {
+				devm_kfree(tmcdrvdata->dev, usb_req->sg);
+				devm_kfree(tmcdrvdata->dev, usb_req);
+				usb_req = NULL;
 				drvdata->usb_req = NULL;
+				dev_err(tmcdrvdata->dev,
+					"Write data failed:%d\n", ret);
+				if (ret == -EAGAIN)
+					continue;
+				return;
 			}
+			atomic_dec(&drvdata->usb_free_buf);
+
+		} else {
+			dev_dbg(tmcdrvdata->dev,
+			"Drop data, offset = %d, seq = %d, irq = %d\n",
+				drvdata->offset, seq,
+				atomic_read(&drvdata->irq_cnt));
+			devm_kfree(tmcdrvdata->dev, usb_req->sg);
+			devm_kfree(tmcdrvdata->dev, usb_req);
+			drvdata->usb_req = NULL;
 		}
 		if (atomic_read(&drvdata->irq_cnt) > 0)
 			atomic_dec(&drvdata->irq_cnt);
@@ -394,7 +411,8 @@ static void usb_write_done(struct byte_cntr *drvdata,
 	atomic_inc(&drvdata->usb_free_buf);
 	if (d_req->status)
 		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
-	kfree(d_req);
+	devm_kfree(tmcdrvdata->dev, d_req->sg);
+	devm_kfree(tmcdrvdata->dev, d_req);
 }
 
 void usb_bypass_notifier(void *priv, unsigned int event,
diff --git a/drivers/hwtracing/coresight/coresight-common.h b/drivers/hwtracing/coresight/coresight-common.h
index b49a588..b6db835 100644
--- a/drivers/hwtracing/coresight/coresight-common.h
+++ b/drivers/hwtracing/coresight/coresight-common.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CORESIGHT_COMMON_H
@@ -16,6 +16,7 @@ struct coresight_csr {
 
 #ifdef CONFIG_CORESIGHT_CSR
 extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
 extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
 extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 309e5a0..e0d023719 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2013, 2015-2-17 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2015-2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -66,12 +66,15 @@ do {									\
 #define BLKSIZE_1024		2
 #define BLKSIZE_2048		3
 
+#define FLUSHPERIOD_2048	0x800
+
 struct csr_drvdata {
 	void __iomem		*base;
 	phys_addr_t		pbase;
 	struct device		*dev;
 	struct coresight_device	*csdev;
 	uint32_t		blksize;
+	uint32_t		flushperiod;
 	struct coresight_csr		csr;
 	struct clk		*clk;
 	spinlock_t		spin_lock;
@@ -79,6 +82,7 @@ struct csr_drvdata {
 	bool			hwctrl_set_support;
 	bool			set_byte_cntr_support;
 	bool			timestamp_support;
+	bool			enable_flush;
 };
 
 static LIST_HEAD(csr_list);
@@ -86,10 +90,23 @@ static DEFINE_MUTEX(csr_lock);
 
 #define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
 
+static void msm_qdss_csr_config_flush_period(struct csr_drvdata *drvdata)
+{
+	uint32_t usbflshctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (drvdata->flushperiod << 2);
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+}
+
 void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
-	uint32_t usbbamctrl, usbflshctrl;
+	uint32_t usbbamctrl;
 	unsigned long flags;
 
 	if (csr == NULL)
@@ -106,12 +123,6 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 	usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
-	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
-	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-	usbflshctrl |= 0x2;
-	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-
 	usbbamctrl |= 0x4;
 	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
 
@@ -120,6 +131,36 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
 }
 EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
 
+void msm_qdss_csr_enable_flush(struct coresight_csr *csr)
+{
+	struct csr_drvdata *drvdata;
+	uint32_t usbflshctrl;
+	unsigned long flags;
+
+	if (csr == NULL)
+		return;
+
+	drvdata = to_csr_drvdata(csr);
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+		return;
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	msm_qdss_csr_config_flush_period(drvdata);
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl |= 0x2;
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+	drvdata->enable_flush = true;
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_flush);
+
+
 void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
 {
 	struct csr_drvdata *drvdata;
@@ -166,6 +207,7 @@ void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
 	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
 
 	CSR_LOCK(drvdata);
+	drvdata->enable_flush = false;
 	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
 }
 EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
@@ -295,14 +337,66 @@ static ssize_t timestamp_show(struct device *dev,
 
 static DEVICE_ATTR_RO(timestamp);
 
+static ssize_t flushperiod_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", drvdata->flushperiod);
+}
+
+static ssize_t flushperiod_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t size)
+{
+	unsigned long flags;
+	unsigned long val;
+	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+		dev_err(dev, "Invalid param\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+	if (kstrtoul(buf, 0, &val) || val > 0xffff) {
+		spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	if (drvdata->flushperiod == val)
+		goto out;
+
+	drvdata->flushperiod = val;
+
+	if (drvdata->enable_flush)
+		msm_qdss_csr_config_flush_period(drvdata);
+
+out:
+	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+	return size;
+}
+
+static DEVICE_ATTR_RW(flushperiod);
+
 static struct attribute *csr_attrs[] = {
 	&dev_attr_timestamp.attr,
+	&dev_attr_flushperiod.attr,
 	NULL,
 };
 
 static struct attribute_group csr_attr_grp = {
 	.attrs = csr_attrs,
 };
+
 static const struct attribute_group *csr_attr_grps[] = {
 	&csr_attr_grp,
 	NULL,
@@ -374,14 +468,16 @@ static int csr_probe(struct platform_device *pdev)
 	else
 		dev_dbg(dev, "timestamp_support operation supported\n");
 
+	if (drvdata->usb_bam_support)
+		drvdata->flushperiod = FLUSHPERIOD_2048;
+
 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
 	desc->type = CORESIGHT_DEV_TYPE_NONE;
 	desc->pdata = pdev->dev.platform_data;
 	desc->dev = &pdev->dev;
-	if (drvdata->timestamp_support)
-		desc->groups = csr_attr_grps;
+	desc->groups = csr_attr_grps;
 
 	drvdata->csdev = coresight_register(desc);
 	if (IS_ERR(drvdata->csdev))
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 4217742..f98abee 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1015,11 +1015,12 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
 	tmc_sync_etr_buf(drvdata);
 }
 
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush)
 {
 	CS_UNLOCK(drvdata->base);
 
-	tmc_flush_and_stop(drvdata);
+	if (flush)
+		tmc_flush_and_stop(drvdata);
 	/*
 	 * When operating in sysFS mode the content of the buffer needs to be
 	 * read before the TMC is disabled.
@@ -1114,6 +1115,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
 
 	CS_LOCK(drvdata->base);
 
+	msm_qdss_csr_enable_flush(drvdata->csr);
 	drvdata->enable_to_bam = true;
 }
 
@@ -1442,7 +1444,7 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 	return -EINVAL;
 }
 
-static void tmc_disable_etr_sink(struct coresight_device *csdev)
+static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
 {
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -1468,10 +1470,10 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 				goto out;
 			} else {
 				usb_qdss_close(drvdata->usbch);
-				tmc_etr_disable_hw(drvdata);
+				tmc_etr_disable_hw(drvdata, flush);
 			}
 		} else {
-			tmc_etr_disable_hw(drvdata);
+			tmc_etr_disable_hw(drvdata, flush);
 		}
 		drvdata->mode = CS_MODE_DISABLED;
 	}
@@ -1506,6 +1508,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
 }
 
+static void tmc_disable_etr_sink(struct coresight_device *csdev)
+{
+	_tmc_disable_etr_sink(csdev, true);
+}
+
 int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 {
 	enum tmc_etr_out_mode new_mode, old_mode;
@@ -1525,7 +1532,7 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 		return 0;
 	}
 
-	tmc_disable_etr_sink(drvdata->csdev);
+	_tmc_disable_etr_sink(drvdata->csdev, false);
 	old_mode = drvdata->out_mode;
 	drvdata->out_mode = new_mode;
 	if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) {
@@ -1587,7 +1594,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 
 	/* Disable the TMC if need be */
 	if (drvdata->mode == CS_MODE_SYSFS)
-		tmc_etr_disable_hw(drvdata);
+		tmc_etr_disable_hw(drvdata, true);
 
 	drvdata->reading = true;
 out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 994339a..a30e360 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -312,7 +312,7 @@ void tmc_free_etr_buf(struct etr_buf *etr_buf);
 void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
 void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
 void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush);
 void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
 		  struct usb_qdss_ch *ch);
 int tmc_etr_bam_init(struct amba_device *adev,
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 063e89e..c776a35 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
 		.modified = 1,					        \
 		.info_mask_separate =					\
 			BIT(IIO_CHAN_INFO_RAW) |			\
-			BIT(IIO_CHAN_INFO_SCALE) |			\
 			BIT(IIO_CHAN_INFO_CALIBBIAS),			\
 		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE),	\
 		.ext_info = cros_ec_accel_legacy_ext_info,		\
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0538ff8..49c1956 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -86,7 +86,7 @@
 #define MAX9611_TEMP_MAX_POS		0x7f80
 #define MAX9611_TEMP_MAX_NEG		0xff80
 #define MAX9611_TEMP_MIN_NEG		0xd980
-#define MAX9611_TEMP_MASK		GENMASK(7, 15)
+#define MAX9611_TEMP_MASK		GENMASK(15, 7)
 #define MAX9611_TEMP_SHIFT		0x07
 #define MAX9611_TEMP_RAW(_r)		((_r) >> MAX9611_TEMP_SHIFT)
 #define MAX9611_TEMP_SCALE_NUM		1000000
@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
 	if (ret)
 		return ret;
 
-	regval = ret & MAX9611_TEMP_MASK;
+	regval &= MAX9611_TEMP_MASK;
 
 	if ((regval > MAX9611_TEMP_MAX_POS &&
 	     regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2..7586c1d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
 	if (has_smi)
 		cq_size *= 2;
 
+	port_priv->pd = ib_alloc_pd(device, 0);
+	if (IS_ERR(port_priv->pd)) {
+		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+		ret = PTR_ERR(port_priv->pd);
+		goto error3;
+	}
+
 	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
 			IB_POLL_WORKQUEUE);
 	if (IS_ERR(port_priv->cq)) {
 		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
 		ret = PTR_ERR(port_priv->cq);
-		goto error3;
-	}
-
-	port_priv->pd = ib_alloc_pd(device, 0);
-	if (IS_ERR(port_priv->pd)) {
-		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
-		ret = PTR_ERR(port_priv->pd);
 		goto error4;
 	}
 
@@ -3236,11 +3236,11 @@ static int ib_mad_port_open(struct ib_device *device,
 error7:
 	destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-	ib_dealloc_pd(port_priv->pd);
-error4:
 	ib_free_cq(port_priv->cq);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+	ib_dealloc_pd(port_priv->pd);
 error3:
 	kfree(port_priv);
 
@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
 	destroy_workqueue(port_priv->wq);
 	destroy_mad_qp(&port_priv->qp_info[1]);
 	destroy_mad_qp(&port_priv->qp_info[0]);
-	ib_dealloc_pd(port_priv->pd);
 	ib_free_cq(port_priv->cq);
+	ib_dealloc_pd(port_priv->pd);
 	cleanup_recv_queue(&port_priv->qp_info[1]);
 	cleanup_recv_queue(&port_priv->qp_info[0]);
 	/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a685..a18f3f8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
 	if (get_user(id, arg))
 		return -EFAULT;
+	if (id >= IB_UMAD_MAX_AGENTS)
+		return -EINVAL;
 
 	mutex_lock(&file->port->file_mutex);
 	mutex_lock(&file->mutex);
 
-	if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+	id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+	if (!__get_agent(file, id)) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9bab4fb..bd1fdad 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-	return order <= mr_cache_max_order(dev) &&
-		umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_mr *mr = NULL;
-	bool populate_mtts = false;
+	bool use_umr;
 	struct ib_umem *umem;
 	int page_shift;
 	int npages;
@@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (err < 0)
 		return ERR_PTR(err);
 
-	if (use_umr(dev, order)) {
+	use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+		  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+		   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+	if (order <= mr_cache_max_order(dev) && use_umr) {
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
 					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
 			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
 			mr = NULL;
 		}
-		populate_mtts = false;
 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
 		if (access_flags & IB_ACCESS_ON_DEMAND) {
 			err = -EINVAL;
 			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
 			goto error;
 		}
-		populate_mtts = true;
+		use_umr = false;
 	}
 
 	if (!mr) {
-		if (!umr_can_modify_entity_size(dev))
-			populate_mtts = true;
 		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-				page_shift, access_flags, populate_mtts);
+				page_shift, access_flags, !use_umr);
 		mutex_unlock(&dev->slow_path_mutex);
 	}
 
@@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	update_odp_mr(mr);
 #endif
 
-	if (!populate_mtts) {
+	if (use_umr) {
 		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
 		if (access_flags & IB_ACCESS_ON_DEMAND)
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 7807325..c431df7 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
 		return -ENODEV;
 
 	epirq = &interface->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(epirq))
+		return -ENODEV;
+
 	epout = &interface->endpoint[1].desc;
+	if (!usb_endpoint_is_int_out(epout))
+		return -ENODEV;
 
 	if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
 		goto fail;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 530142b..eb9b9de 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1810,6 +1810,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
 				  leave_breadcrumbs);
 }
 
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+				     struct elantech_device_info *info)
+{
+	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+		return true;
+
+	switch (info->bus) {
+	case ETP_BUS_PS2_ONLY:
+		/* expected case */
+		break;
+	case ETP_BUS_SMB_HST_NTFY_ONLY:
+	case ETP_BUS_PS2_SMB_HST_NTFY:
+		/* SMbus implementation is stable since 2018 */
+		if (dmi_get_bios_year() >= 2018)
+			return true;
+	default:
+		psmouse_dbg(psmouse,
+			    "Ignoring SMBus bus provider %d\n", info->bus);
+		break;
+	}
+
+	return false;
+}
+
 /**
  * elantech_setup_smbus - called once the PS/2 devices are enumerated
  * and decides to instantiate a SMBus InterTouch device.
@@ -1829,7 +1853,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 		 * i2c_blacklist_pnp_ids.
 		 * Old ICs are up to the user to decide.
 		 */
-		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+		if (!elantech_use_host_notify(psmouse, info) ||
 		    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
 			return -ENXIO;
 	}
@@ -1849,34 +1873,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
 	return 0;
 }
 
-static bool elantech_use_host_notify(struct psmouse *psmouse,
-				     struct elantech_device_info *info)
-{
-	if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
-		return true;
-
-	switch (info->bus) {
-	case ETP_BUS_PS2_ONLY:
-		/* expected case */
-		break;
-	case ETP_BUS_SMB_ALERT_ONLY:
-		/* fall-through  */
-	case ETP_BUS_PS2_SMB_ALERT:
-		psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
-		break;
-	case ETP_BUS_SMB_HST_NTFY_ONLY:
-		/* fall-through  */
-	case ETP_BUS_PS2_SMB_HST_NTFY:
-		return true;
-	default:
-		psmouse_dbg(psmouse,
-			    "Ignoring SMBus bus provider %d.\n",
-			    info->bus);
-	}
-
-	return false;
-}
-
 int elantech_init_smbus(struct psmouse *psmouse)
 {
 	struct elantech_device_info info;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index af7d484..06cebde 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
 	"LEN2055", /* E580 */
 	"SYN3052", /* HP EliteBook 840 G4 */
 	"SYN3221", /* HP 15-ay000 */
+	"SYN323d", /* HP Spectre X360 13-w013dx */
 	NULL
 };
 
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 10a0391..538986e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -161,7 +161,8 @@ struct trackpoint_data {
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
 #else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+				    bool set_properties)
 {
 	return -ENOSYS;
 }
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 75b5006..b1cf0c9 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
 		return -ENODEV;
 
+	endpoint = &intf->cur_altsetting->endpoint[0].desc;
+	if (!usb_endpoint_is_int_in(endpoint))
+		return -ENODEV;
+
 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!kbtab || !input_dev)
@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
 	input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
 
-	endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
 	usb_fill_int_urb(kbtab->irq, dev,
 			 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
 			 kbtab->data, 8,
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 6a3953d..be3f1ae 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -4537,7 +4537,36 @@ static int check_dt(struct device_node *np)
 	return -ENODEV;
 }
 
-static int fts_probe(struct i2c_client *client,
+static int check_default_tp(struct device_node *dt, const char *prop)
+{
+	const char *active_tp;
+	const char *compatible;
+	char *start;
+	int ret;
+
+	ret = of_property_read_string(dt->parent, prop, &active_tp);
+	if (ret) {
+		pr_err(" %s:fail to read %s %d\n", __func__, prop, ret);
+		return -ENODEV;
+	}
+
+	ret = of_property_read_string(dt, "compatible", &compatible);
+	if (ret < 0) {
+		pr_err(" %s:fail to read %s %d\n", __func__, "compatible", ret);
+		return -ENODEV;
+	}
+
+	start = strnstr(active_tp, compatible, strlen(active_tp));
+	if (start == NULL) {
+		pr_err(" %s:no match compatible, %s, %s\n",
+			__func__, compatible, active_tp);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int fts_probe_internal(struct i2c_client *client,
 		const struct i2c_device_id *idp)
 {
 	struct fts_ts_info *info = NULL;
@@ -4548,10 +4577,7 @@ static int fts_probe(struct i2c_client *client,
 
 	logError(0, "%s %s: driver probe begin!\n", tag, __func__);
 
-	error = check_dt(dp);
-
-	if (error != OK ||
-		!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		logError(1, "%s Unsupported I2C functionality\n", tag);
 		error = -EIO;
 		goto ProbeErrorExit_0;
@@ -4928,6 +4954,23 @@ static int fts_probe(struct i2c_client *client,
 	return error;
 }
 
+static int fts_probe(struct i2c_client *client, const struct i2c_device_id *idp)
+{
+	int error = 0;
+	struct device_node *dp = client->dev.of_node;
+
+	if (check_dt(dp)) {
+		if (!check_default_tp(dp, "qcom,i2c-touch-active"))
+			error = -EPROBE_DEFER;
+		else
+			error = -ENODEV;
+
+		return error;
+	}
+
+	return fts_probe_internal(client, idp);
+}
+
 static int fts_remove(struct i2c_client *client)
 {
 	struct fts_ts_info *info = i2c_get_clientdata(client);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
old mode 100755
new mode 100644
index 900fc8e..00ea777
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -451,7 +451,7 @@ static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
 	return retval;
 }
 
-int check_dt(struct device_node *np)
+static int check_dt(struct device_node *np)
 {
 	int i;
 	int count;
@@ -475,6 +475,35 @@ int check_dt(struct device_node *np)
 	return -ENODEV;
 }
 
+static int check_default_tp(struct device_node *dt, const char *prop)
+{
+	const char *active_tp;
+	const char *compatible;
+	char *start;
+	int ret;
+
+	ret = of_property_read_string(dt->parent, prop, &active_tp);
+	if (ret) {
+		pr_err(" %s:fail to read %s %d\n", __func__, prop, ret);
+		return -ENODEV;
+	}
+
+	ret = of_property_read_string(dt, "compatible", &compatible);
+	if (ret < 0) {
+		pr_err(" %s:fail to read %s %d\n", __func__, "compatible", ret);
+		return -ENODEV;
+	}
+
+	start = strnstr(active_tp, compatible, strlen(active_tp));
+	if (start == NULL) {
+		pr_err(" %s:no match compatible, %s, %s\n",
+			__func__, compatible, active_tp);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
 static struct synaptics_dsx_bus_access bus_access = {
 	.type = BUS_I2C,
 	.read = synaptics_rmi4_i2c_read,
@@ -490,9 +519,16 @@ static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
 		const struct i2c_device_id *dev_id)
 {
 	int retval;
+	struct device_node *dp = client->dev.of_node;
 
-	if (check_dt(client->dev.of_node))
-		return -ENODEV;
+	if (check_dt(dp)) {
+		if (!check_default_tp(dp, "qcom,i2c-touch-active"))
+			retval = -EPROBE_DEFER;
+		else
+			retval = -ENODEV;
+
+		return retval;
+	}
 
 	if (!i2c_check_functionality(client->adapter,
 			I2C_FUNC_SMBUS_BYTE_DATA)) {
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index d61570d..48304e2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf,
 	if (!usbtouch || !input_dev)
 		goto out_free;
 
+	mutex_init(&usbtouch->pm_mutex);
+
 	type = &usbtouch_dev_info[id->driver_info];
 	usbtouch->type = type;
 	if (!type->process_pkt)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3a1d303..66b4800 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
 	NULL,
 };
 
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
 	int cap_ptr = iommu->cap_ptr;
 	u32 range, misc, low, high;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8ced186..fb793de 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1147,21 +1147,44 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
 	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
 }
 
+static void arm_smmu_tlb_inv_context_s1(void *cookie);
+
 static void arm_smmu_tlb_sync_context(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct device *dev = smmu_domain->dev;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
 	unsigned long flags;
+	size_t ret;
+	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
+	ktime_t cur = ktime_get();
+
+	ret = arm_smmu_domain_power_on(&smmu_domain->domain,
+				       smmu_domain->smmu);
+	if (ret)
+		return;
+
+	trace_tlbi_start(dev, 0);
+
+	if (!use_tlbiall)
+		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+	else
+		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
 
 	spin_lock_irqsave(&smmu_domain->sync_lock, flags);
 	if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
-					base + ARM_SMMU_CB_TLBSTATUS))
+				base + ARM_SMMU_CB_TLBSTATUS))
 		dev_err_ratelimited(smmu->dev,
-				"TLB sync on cb%d failed for device %s\n",
-				smmu_domain->cfg.cbndx,
-				dev_name(smmu_domain->dev));
+				    "TLB sync on cb%d failed for device %s\n",
+				    smmu_domain->cfg.cbndx,
+				    dev_name(smmu_domain->dev));
 	spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
+
+	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+
+	arm_smmu_domain_power_off(&smmu_domain->domain, smmu_domain->smmu);
 }
 
 static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1173,23 +1196,7 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
 
 static void arm_smmu_tlb_inv_context_s1(void *cookie)
 {
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct device *dev = smmu_domain->dev;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
-	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
-	ktime_t cur = ktime_get();
-
-	trace_tlbi_start(dev, 0);
-
-	if (!use_tlbiall)
-		writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
-	else
-		writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
-
-	arm_smmu_tlb_sync_context(cookie);
-	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+	return;
 }
 
 static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -1483,6 +1490,7 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
 
 	phys = arm_smmu_iova_to_phys_hard(domain, iova);
 	smmu_domain->pgtbl_cfg.tlb->tlb_flush_all(smmu_domain);
+	smmu_domain->pgtbl_cfg.tlb->tlb_sync(smmu_domain);
 	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
 
 	if (phys != phys_post_tlbiall) {
@@ -2539,6 +2547,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
 
 	/* Ensure there are no stale mappings for this context bank */
 	tlb->tlb_flush_all(smmu_domain);
+	tlb->tlb_sync(smmu_domain);
 }
 
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
@@ -3025,17 +3034,12 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 	if (arm_smmu_is_slave_side_secure(smmu_domain))
 		return msm_secure_smmu_unmap(domain, iova, size);
 
-	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
-	if (ret)
-		return ret;
-
 	arm_smmu_secure_domain_lock(smmu_domain);
 
 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
 	ret = ops->unmap(ops, iova, size);
 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
 
-	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
 	/*
 	 * While splitting up block mappings, we might allocate page table
 	 * memory during unmap, so the vmids needs to be assigned to the
@@ -3194,6 +3198,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
 	return ret;
 }
 
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	if (smmu_domain->tlb_ops)
+		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+}
+
 /*
  * This function can sleep, and cannot be called from atomic context. Will
  * power on register block if required. This restriction does not apply to the
@@ -3960,6 +3972,8 @@ static struct iommu_ops arm_smmu_ops = {
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
 	.map_sg			= arm_smmu_map_sg,
+	.flush_iotlb_all	= arm_smmu_iotlb_sync,
+	.iotlb_sync		= arm_smmu_iotlb_sync,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
 	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 899d7c3e..ec88a51 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -153,6 +153,7 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
 		bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
 
 		iommu_tlbiall(mapping->domain);
+		iommu_tlb_sync(mapping->domain);
 		mapping->have_stale_tlbs = false;
 		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
 	}
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 541abb2..688e037 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -86,6 +86,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
 
 	iop = container_of(ops, struct io_pgtable, ops);
 	io_pgtable_tlb_flush_all(iop);
+	io_pgtable_tlb_sync(iop);
 	io_pgtable_init_table[iop->fmt]->free(iop);
 }
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ee30e89..9ba73e1 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
 
 	if (!its_alloc_vpe_table(vpe_id)) {
 		its_vpe_id_free(vpe_id);
-		its_free_pending_table(vpe->vpt_page);
+		its_free_pending_table(vpt_page);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4760307..cef8f5e 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
 	.irq_unmask		= imx_gpcv2_irq_unmask,
 	.irq_set_wake		= imx_gpcv2_irq_set_wake,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f6fd115..b3cb7fe 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -132,6 +132,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
 	{ .compatible = "qcom,sm8150-apcs-hmss-global", .data = (void *) 12 },
 	{ .compatible = "qcom,sm8150-spcs-global", .data = (void *)0 },
 	{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
+	{ .compatible = "qcom,bengal-apcs-hmss-global", .data = (void *)8 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c9..7e426e4 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -130,6 +130,7 @@ struct mapped_device {
 };
 
 int md_in_flight(struct mapped_device *md);
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8..264b84e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 	}
 
 	if (unlikely(error == BLK_STS_TARGET)) {
-		if (req_op(clone) == REQ_OP_WRITE_SAME &&
-		    !clone->q->limits.max_write_same_sectors)
+		if (req_op(clone) == REQ_OP_DISCARD &&
+		    !clone->q->limits.max_discard_sectors)
+			disable_discard(tio->md);
+		else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+			 !clone->q->limits.max_write_same_sectors)
 			disable_write_same(tio->md);
-		if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-		    !clone->q->limits.max_write_zeroes_sectors)
+		else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+			 !clone->q->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(tio->md);
 	}
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 42768fe..c9860e3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
 	}
 }
 
+void disable_discard(struct mapped_device *md)
+{
+	struct queue_limits *limits = dm_get_queue_limits(md);
+
+	/* device doesn't really support DISCARD, disable it */
+	limits->max_discard_sectors = 0;
+	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
 	struct queue_limits *limits = dm_get_queue_limits(md);
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
 	dm_endio_fn endio = tio->ti->type->end_io;
 
 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-		    !bio->bi_disk->queue->limits.max_write_same_sectors)
+		if (bio_op(bio) == REQ_OP_DISCARD &&
+		    !bio->bi_disk->queue->limits.max_discard_sectors)
+			disable_discard(md);
+		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+			 !bio->bi_disk->queue->limits.max_write_same_sectors)
 			disable_write_same(md);
-		if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-		    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
 			disable_write_zeroes(md);
 	}
 
diff --git a/drivers/media/platform/msm/cvp/cvp_core_hfi.h b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
index 59b67b8..5481ea2 100644
--- a/drivers/media/platform/msm/cvp/cvp_core_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
@@ -266,6 +266,7 @@ struct iris_hfi_device {
 	unsigned int skip_pc_count;
 	struct msm_cvp_capability *sys_init_capabilities;
 	struct iris_hfi_vpu_ops *vpu_ops;
+	struct delayed_work dsp_init_work;
 };
 
 void cvp_iris_hfi_delete_device(void *device);
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 4700cae..24b6296 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -266,6 +266,7 @@ const int cvp_max_packets = 32;
 
 static void iris_hfi_pm_handler(struct work_struct *work);
 static DECLARE_DELAYED_WORK(iris_hfi_pm_work, iris_hfi_pm_handler);
+static void dsp_init_work_handler(struct work_struct *work);
 static inline int __resume(struct iris_hfi_device *device);
 static inline int __suspend(struct iris_hfi_device *device);
 static int __disable_regulators(struct iris_hfi_device *device);
@@ -506,7 +507,7 @@ static int __dsp_send_hfi_queue(struct iris_hfi_device *device)
 		(phys_addr_t *)device->dsp_iface_q_table.mem_data.dma_handle,
 		device->dsp_iface_q_table.mem_data.size);
 	if (rc) {
-		dprintk(CVP_ERR, "%s: dsp init failed\n", __func__);
+		dprintk(CVP_ERR, "%s: dsp hfi queue init failed\n", __func__);
 		return rc;
 	}
 
@@ -1386,6 +1387,8 @@ static void cvp_dump_csr(struct iris_hfi_device *dev)
 
 	if (!dev)
 		return;
+	if (!dev->power_enabled)
+		return;
 	reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
 	dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
 	reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
@@ -2059,7 +2062,8 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
 	}
 
 	vsfr = (struct cvp_hfi_sfr_struct *) dev->sfr.align_virtual_addr;
-	vsfr->bufSize = ALIGNED_SFR_SIZE;
+	if (vsfr)
+		vsfr->bufSize = ALIGNED_SFR_SIZE;
 
 	rc = __interface_dsp_queues_init(dev);
 	if (rc) {
@@ -2154,6 +2158,43 @@ static int __sys_set_power_control(struct iris_hfi_device *device,
 	return 0;
 }
 
+static void dsp_init_work_handler(struct work_struct *work)
+{
+	int rc = 0;
+	static int retry_count;
+	struct iris_hfi_device *device;
+
+	if (!work) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	device = container_of(work, struct iris_hfi_device, dsp_init_work.work);
+	if (!device) {
+		dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+		return;
+	}
+
+	dprintk(CVP_PROF, "Entering %s\n", __func__);
+
+	mutex_lock(&device->lock);
+	rc = __dsp_send_hfi_queue(device);
+	mutex_unlock(&device->lock);
+
+	if (rc) {
+		if (retry_count > MAX_DSP_INIT_ATTEMPTS) {
+			dprintk(CVP_ERR, "%s: max trials exceeded\n", __func__);
+			return;
+		}
+		dprintk(CVP_PROF, "%s: Attempt to init DSP %d\n",
+			__func__, retry_count);
+
+		schedule_delayed_work(&device->dsp_init_work,
+				msecs_to_jiffies(CVP_MAX_WAIT_TIME));
+		++retry_count;
+	}
+}
+
 static int iris_hfi_core_init(void *device)
 {
 	int rc = 0;
@@ -2231,7 +2272,6 @@ static int iris_hfi_core_init(void *device)
 
 	__enable_subcaches(device);
 	__set_subcaches(device);
-	__dsp_send_hfi_queue(device);
 
 	__set_ubwc_config(device);
 	__sys_set_idle_indicator(device, true);
@@ -2244,9 +2284,15 @@ static int iris_hfi_core_init(void *device)
 		pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY,
 				dev->res->pm_qos_latency_us);
 	}
+
+	rc = __dsp_send_hfi_queue(device);
+	if (rc)
+		schedule_delayed_work(&dev->dsp_init_work,
+				msecs_to_jiffies(CVP_MAX_WAIT_TIME));
+
 	dprintk(CVP_DBG, "Core inited successfully\n");
 	mutex_unlock(&dev->lock);
-	return rc;
+	return 0;
 err_core_init:
 	__set_state(dev, IRIS_STATE_DEINIT);
 	__unload_fw(dev);
@@ -4626,7 +4672,7 @@ static void __unload_fw(struct iris_hfi_device *device)
 	device->resources.fw.cookie = NULL;
 	__deinit_resources(device);
 
-	dprintk(CVP_DBG, "Firmware unloaded successfully\n");
+	dprintk(CVP_WARN, "Firmware unloaded\n");
 }
 
 static int iris_hfi_get_fw_info(void *dev, struct cvp_hal_fw_info *fw_info)
@@ -4852,6 +4898,8 @@ static struct iris_hfi_device *__add_device(u32 device_id,
 	mutex_init(&hdevice->lock);
 	INIT_LIST_HEAD(&hdevice->sess_head);
 
+	INIT_DELAYED_WORK(&hdevice->dsp_init_work, dsp_init_work_handler);
+
 	return hdevice;
 
 err_cleanup:
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 393ff3c..00f2d7c 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -480,10 +480,18 @@ static int hfi_process_session_cvp_msg(u32 device_id,
 		if (pkt->packet_type == HFI_MSG_SESSION_CVP_DFS
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_DME
 			|| pkt->packet_type == HFI_MSG_SESSION_CVP_ICA
-			|| pkt->packet_type == HFI_MSG_SESSION_CVP_FD)
+			|| pkt->packet_type == HFI_MSG_SESSION_CVP_FD) {
+			u64 ktid;
+			u32 kdata1, kdata2;
+
+			kdata1 = pkt->client_data.kdata1;
+			kdata2 = pkt->client_data.kdata2;
+			ktid = ((u64)kdata2 << 32) | kdata1;
+			msm_cvp_unmap_buf_cpu(inst, ktid);
+
 			return _deprecated_hfi_msg_process(device_id,
 				pkt, info, inst);
-
+		}
 		dprintk(CVP_ERR, "Invalid deprecate_bitmask %#x\n",
 					inst->deprecate_bitmask);
 	}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 44aa779..8cb781d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -369,6 +369,75 @@ static void __msm_cvp_cache_operations(struct msm_cvp_internal_buffer *cbuf)
 				cbuf->buf.offset, cbuf->buf.size);
 }
 
+static int msm_cvp_map_buf_user_persist(struct msm_cvp_inst *inst,
+					struct cvp_buf_type *in_buf,
+					u32 *iova)
+{
+	int rc = 0;
+	struct cvp_internal_buf *cbuf;
+	struct dma_buf *dma_buf;
+
+	if (!inst || !iova) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (in_buf->fd > 0) {
+		dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
+		if (!dma_buf) {
+			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
+				in_buf->fd);
+			return -EINVAL;
+		}
+		in_buf->dbuf = dma_buf;
+		msm_cvp_smem_put_dma_buf(dma_buf);
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, in_buf, iova);
+	if (!rc && *iova != 0)
+		return 0;
+	cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
+	if (!cbuf)
+		return -ENOMEM;
+
+	cbuf->smem.buffer_type = in_buf->flags;
+	cbuf->smem.fd = in_buf->fd;
+	cbuf->smem.size = in_buf->size;
+	cbuf->smem.flags = 0;
+	cbuf->smem.offset = 0;
+	cbuf->smem.dma_buf = in_buf->dbuf;
+	cbuf->buffer_ownership = CLIENT;
+
+	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
+	if (rc) {
+		dprintk(CVP_ERR,
+		"%s: %x : fd %d %s size %d",
+		"map persist failed", hash32_ptr(inst->session), cbuf->smem.fd,
+		cbuf->smem.dma_buf->name, cbuf->smem.size);
+		goto exit;
+	}
+
+	/* Assign mapped dma_buf back because it could be zero previously */
+	in_buf->dbuf = cbuf->smem.dma_buf;
+
+	mutex_lock(&inst->persistbufs.lock);
+	list_add_tail(&cbuf->list, &inst->persistbufs.list);
+	mutex_unlock(&inst->persistbufs.lock);
+
+	*iova = cbuf->smem.device_addr;
+
+	dprintk(CVP_DBG,
+	"%s: %x : fd %d %s size %d", "map persist", hash32_ptr(inst->session),
+	cbuf->smem.fd, cbuf->smem.dma_buf->name, cbuf->smem.size);
+	return rc;
+
+exit:
+	kfree(cbuf);
+	cbuf = NULL;
+
+	return rc;
+}
+
 static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 				struct cvp_buf_type *in_buf,
 				u32 *iova,
@@ -625,6 +694,56 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
 	return rc;
 }
 
+static int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *in_pkt,
+	unsigned int offset, unsigned int buf_num)
+{
+	struct cvp_buf_desc *buf_ptr;
+	struct cvp_buf_type *new_buf;
+	int i, rc = 0;
+	unsigned int iova;
+
+	if (!offset || !buf_num)
+		return 0;
+
+	for (i = 0; i < buf_num; i++) {
+		buf_ptr = (struct cvp_buf_desc *)
+				&in_pkt->pkt_data[offset];
+
+		offset += sizeof(*new_buf) >> 2;
+		new_buf = (struct cvp_buf_type *)buf_ptr;
+
+		/*
+		 * Make sure fd or dma_buf field doesn't have any
+		 * garbage value.
+		 */
+		if (inst->session_type == MSM_CVP_USER) {
+			new_buf->dbuf = 0;
+		} else if (inst->session_type == MSM_CVP_KERNEL) {
+			new_buf->fd = -1;
+		} else if (inst->session_type >= MSM_CVP_UNKNOWN) {
+			dprintk(CVP_ERR,
+				"%s: unknown session type %d\n",
+				__func__, inst->session_type);
+			return -EINVAL;
+		}
+
+		if (new_buf->fd <= 0 && !new_buf->dbuf)
+			continue;
+
+		rc = msm_cvp_map_buf_user_persist(inst, new_buf, &iova);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: buf %d register failed.\n",
+				__func__, i);
+
+			return rc;
+		}
+		new_buf->fd = iova;
+	}
+	return rc;
+}
+
 static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 	struct cvp_kmd_hfi_packet *in_pkt,
 	unsigned int offset, unsigned int buf_num)
@@ -818,7 +937,11 @@ static int msm_cvp_session_process_hfi(
 		buf_num = in_buf_num;
 	}
 
-	rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+	if (in_pkt->pkt_data[1] == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
+		rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
+	else
+		rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+
 	if (rc)
 		goto exit;
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index e2bd3f8..2fb9850 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -638,15 +638,17 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
 	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 	list_for_each_entry(inst, &core->instances, list) {
 		dprintk(CVP_WARN,
-			"%s: sys error for inst %#x kref %x, cmd %x\n",
+			"%s: sys error inst %#x kref %x, cmd %x state %x\n",
 				__func__, inst, kref_read(&inst->kref),
-				inst->cur_cmd_type);
-		change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
-
-		spin_lock_irqsave(&inst->event_handler.lock, flags);
-		inst->event_handler.event = CVP_SSR_EVENT;
-		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
-		wake_up_all(&inst->event_handler.wq);
+				inst->cur_cmd_type, inst->state);
+		if (inst->state != MSM_CVP_CORE_INVALID) {
+			change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+			spin_lock_irqsave(&inst->event_handler.lock, flags);
+			inst->event_handler.event = CVP_SSR_EVENT;
+			spin_unlock_irqrestore(
+				&inst->event_handler.lock, flags);
+			wake_up_all(&inst->event_handler.wq);
+		}
 
 		if (!core->trigger_ssr)
 			msm_cvp_comm_print_inst_info(inst);
@@ -1477,7 +1479,7 @@ int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
 		}
 	}
 
-	if (inst->state == MSM_CVP_CORE_UNINIT) {
+	if (inst->state >= MSM_CVP_CORE_UNINIT) {
 		spin_lock_irqsave(&inst->event_handler.lock, flags);
 		inst->event_handler.event = CVP_SSR_EVENT;
 		spin_unlock_irqrestore(&inst->event_handler.lock, flags);
@@ -1655,6 +1657,7 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
 	}
 
 	binfo->buffer_type = HFI_BUFFER_INTERNAL_PERSIST_1;
+	binfo->buffer_ownership = DRIVER;
 
 	rc = set_internal_buf_on_fw(inst, &binfo->smem, false);
 	if (rc)
@@ -1711,6 +1714,7 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 	int rc = 0;
 	struct msm_cvp_core *core;
 	struct cvp_hfi_device *hdev;
+	int all_released;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
@@ -1729,6 +1733,8 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 	}
 
 	dprintk(CVP_DBG, "release persist buffer!\n");
+	all_released = 0;
+
 	mutex_lock(&inst->persistbufs.lock);
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct cvp_internal_buf, list);
@@ -1738,36 +1744,49 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
 			mutex_unlock(&inst->persistbufs.lock);
 			return -EINVAL;
 		}
-		if (inst->state > MSM_CVP_CLOSE_DONE) {
-			list_del(&buf->list);
-			msm_cvp_smem_free(handle);
-			kfree(buf);
-			continue;
-		}
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		buffer_info.response_required = true;
-		rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-		if (!rc) {
-			mutex_unlock(&inst->persistbufs.lock);
-			rc = wait_for_sess_signal_receipt(inst,
+
+		/* Workaround for FW: release buffer means release all */
+		if (inst->state <= MSM_CVP_CLOSE_DONE && !all_released) {
+			buffer_info.buffer_size = handle->size;
+			buffer_info.buffer_type = buf->buffer_type;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr = handle->device_addr;
+			buffer_info.response_required = true;
+			rc = call_hfi_op(hdev, session_release_buffers,
+					(void *)inst->session, &buffer_info);
+			if (!rc) {
+				mutex_unlock(&inst->persistbufs.lock);
+				rc = wait_for_sess_signal_receipt(inst,
 					HAL_SESSION_RELEASE_BUFFER_DONE);
-			if (rc)
-				dprintk(CVP_WARN,
+				if (rc)
+					dprintk(CVP_WARN,
 					"%s: wait for signal failed, rc %d\n",
-						__func__, rc);
-			mutex_lock(&inst->persistbufs.lock);
-		} else {
-			dprintk(CVP_WARN,
-					"Rel prst buf fail:%x, %d\n",
-					buffer_info.align_device_addr,
-					buffer_info.buffer_size);
+					__func__, rc);
+				mutex_lock(&inst->persistbufs.lock);
+			} else {
+				dprintk(CVP_WARN,
+						"Rel prst buf fail:%x, %d\n",
+						buffer_info.align_device_addr,
+						buffer_info.buffer_size);
+			}
+			all_released = 1;
 		}
 		list_del(&buf->list);
-		msm_cvp_smem_free(handle);
+
+		if (buf->buffer_ownership == DRIVER) {
+			dprintk(CVP_DBG,
+			"%s: %x : fd %d %s size %d",
+			"free arp", hash32_ptr(inst->session), buf->smem.fd,
+			buf->smem.dma_buf->name, buf->smem.size);
+			msm_cvp_smem_free(handle);
+		} else if (buf->buffer_ownership == CLIENT) {
+			dprintk(CVP_DBG,
+			"%s: %x : fd %d %s size %d",
+			"unmap persist", hash32_ptr(inst->session),
+			buf->smem.fd, buf->smem.dma_buf->name, buf->smem.size);
+			msm_cvp_smem_unmap_dma_buf(inst, &buf->smem);
+		}
+
 		kfree(buf);
 	}
 	mutex_unlock(&inst->persistbufs.lock);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index aac667d..99dd3fd 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -45,4 +45,5 @@ int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
 int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
 void print_client_buffer(u32 tag, const char *str,
 		struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf);
+void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 365f0a8..cde1eaa 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -213,8 +213,9 @@ static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
 
 	/* Instance count includes current instance as well. */
 
-	if ((instance_count > core->resources.max_inst_count) ||
-		(secure_instance_count > core->resources.max_secure_inst_count))
+	if ((instance_count >= core->resources.max_inst_count) ||
+		(secure_instance_count >=
+			core->resources.max_secure_inst_count))
 		overload = true;
 	return overload;
 }
@@ -273,6 +274,19 @@ void *msm_cvp_open(int core_id, int session_type)
 		goto err_invalid_core;
 	}
 
+	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+	if (msm_cvp_check_for_inst_overload(core)) {
+		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+		mutex_lock(&core->lock);
+		list_for_each_entry(inst, &core->instances, list)
+			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
+				inst, inst->cur_cmd_type,
+				hash32_ptr(inst->session));
+		mutex_unlock(&core->lock);
+
+		return NULL;
+	}
+
 	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
 	if (!inst) {
 		dprintk(CVP_ERR, "Failed to allocate memory\n");
@@ -331,19 +345,6 @@ void *msm_cvp_open(int core_id, int session_type)
 		goto fail_init;
 	}
 
-	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
-	if (msm_cvp_check_for_inst_overload(core)) {
-		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
-		mutex_lock(&core->lock);
-		list_for_each_entry(inst, &core->instances, list)
-			dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
-				inst, inst->cur_cmd_type,
-				hash32_ptr(inst->session));
-		mutex_unlock(&core->lock);
-
-		goto fail_init;
-	}
-
 	inst->debugfs_root =
 		msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 8dabdae..8218282 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -78,6 +78,7 @@ static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 	int err = 0;
 
 	if (IS_ERR_OR_NULL(me->chan)) {
+		dprintk(CVP_ERR, "%s: DSP GLink is not ready\n", __func__);
 		err = -EINVAL;
 		goto bail;
 	}
@@ -211,6 +212,8 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 		dprintk(CVP_ERR,
 			"%s: Incorrect DDR type value %d\n",
 			__func__, local_cmd_msg.ddr_type);
+		err = -EINVAL;
+		goto exit;
 	}
 
 	mutex_lock(&me->smd_mutex);
@@ -219,7 +222,7 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 	mutex_unlock(&me->smd_mutex);
 
 	dprintk(CVP_DBG,
-		"%s :: address of buffer, PA=0x%pK  size_buff=%d ddr_type=%d\n",
+		"%s: address of buffer, PA=0x%pK  size_buff=%d ddr_type=%d\n",
 		__func__, phys_addr, size_in_bytes, local_cmd_msg.ddr_type);
 
 	err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
@@ -229,33 +232,34 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
 		dprintk(CVP_ERR,
 			"%s: Failed in hyp_assign. err=%d\n",
 			__func__, err);
-		return err;
+		goto exit;
 	}
 
 	err = cvp_dsp_send_cmd
 			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
-	if (err != 0)
+	if (err) {
 		dprintk(CVP_ERR,
-			"%s: cvp_dsp_send_cmd failed with err=%d\n",
+			"%s: cvp_dsp_send_cmd faidmesgled with err=%d\n",
 			__func__, err);
-	else {
-		core = list_first_entry(&cvp_driver->cores,
-				struct msm_cvp_core, list);
-		timeout = msecs_to_jiffies(
-				core->resources.msm_cvp_dsp_rsp_timeout);
-		err = wait_for_completion_timeout(
-				&me->cmdqueue_send_work, timeout);
-		if (!err) {
-			dprintk(CVP_ERR, "failed to send cmdqueue\n");
-			return -ETIMEDOUT;
-		}
-
-		mutex_lock(&me->smd_mutex);
-		me->cvp_shutdown = STATUS_OK;
-		me->cdsp_state = STATUS_OK;
-		mutex_unlock(&me->smd_mutex);
+		goto exit;
 	}
 
+	core = list_first_entry(&cvp_driver->cores,
+			struct msm_cvp_core, list);
+	timeout = msecs_to_jiffies(
+			core->resources.msm_cvp_dsp_rsp_timeout);
+	if (!wait_for_completion_timeout(&me->cmdqueue_send_work, timeout)) {
+		dprintk(CVP_ERR, "failed to send cmdqueue\n");
+		err =  -ETIMEDOUT;
+		goto exit;
+	}
+
+	mutex_lock(&me->smd_mutex);
+	me->cvp_shutdown = STATUS_OK;
+	me->cdsp_state = STATUS_OK;
+	mutex_unlock(&me->smd_mutex);
+
+exit:
 	return err;
 }
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index 0d5b18c..139b322 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -28,7 +28,7 @@
 #define MAX_SUPPORTED_INSTANCES 16
 #define MAX_NAME_LENGTH 64
 #define MAX_DEBUGFS_NAME 50
-#define DCVS_FTB_WINDOW 16
+#define MAX_DSP_INIT_ATTEMPTS 16
 
 #define SYS_MSG_START HAL_SYS_INIT_DONE
 #define SYS_MSG_END HAL_SYS_ERROR
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index 6bea45e..bafab3f 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -45,6 +45,7 @@
 #define NPU_MAX_DT_NAME_LEN	    21
 #define NPU_MAX_PWRLEVELS		8
 #define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_BW_DEVS			4
 
 enum npu_power_level {
 	NPU_PWRLEVEL_MINSVS = 0,
@@ -174,7 +175,9 @@ struct npu_pwrctrl {
 	uint32_t min_pwrlevel;
 	uint32_t num_pwrlevels;
 
-	struct device *devbw;
+	struct device *devbw[NPU_MAX_BW_DEVS];
+	uint32_t devbw_num;
+	uint32_t bwmon_enabled;
 	uint32_t uc_pwrlevel;
 	uint32_t cdsprm_pwrlevel;
 	uint32_t fmax_pwrlevel;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index f69efe5..988d177 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -17,18 +17,6 @@
  * Function Definitions - Debug
  * -------------------------------------------------------------------------
  */
-static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
-{
-	uint32_t reg_val;
-
-	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
-	NPU_INFO("fw jobs execute started count = %d\n", reg_val);
-	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
-	NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
-	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
-	NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
-}
-
 void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr)
 {
 	int32_t *ptr = (int32_t *)cmd_ptr;
@@ -50,7 +38,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
 		target_que * sizeof(struct hfi_queue_header);
 	int32_t *ptr = (int32_t *)&queue;
 	size_t content_off;
-	uint32_t *content;
+	uint32_t *content, content_size;
 	int i;
 
 	MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
@@ -58,21 +46,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
 
 	NPU_ERR("DUMP IPC queue %d:\n", target_que);
 	NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE);
-	NPU_ERR("Content size %d:\n", queue.qhdr_q_size);
 	NPU_ERR("============QUEUE HEADER=============\n");
 	for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++)
 		NPU_ERR("%x\n", ptr[i]);
 
-	content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset;
-	content = kzalloc(queue.qhdr_q_size, GFP_KERNEL);
+	content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset +
+		queue.qhdr_read_idx);
+	if (queue.qhdr_write_idx >= queue.qhdr_read_idx)
+		content_size = queue.qhdr_write_idx - queue.qhdr_read_idx;
+	else
+		content_size = queue.qhdr_q_size - queue.qhdr_read_idx +
+			queue.qhdr_write_idx;
+
+	NPU_ERR("Content size %d:\n", content_size);
+	if (content_size == 0)
+		return;
+
+	content = kzalloc(content_size, GFP_KERNEL);
 	if (!content) {
 		NPU_ERR("failed to allocate IPC queue content buffer\n");
 		return;
 	}
 
-	MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size);
+	if (queue.qhdr_write_idx >= queue.qhdr_read_idx) {
+		MEMR(npu_dev, (void *)content_off, content, content_size);
+	} else {
+		MEMR(npu_dev, (void *)content_off, content,
+			queue.qhdr_q_size - queue.qhdr_read_idx);
+
+		MEMR(npu_dev, (void *)((size_t)IPC_ADDR +
+			queue.qhdr_start_offset),
+			(void *)((size_t)content + queue.qhdr_q_size -
+			queue.qhdr_read_idx), queue.qhdr_write_idx);
+	}
+
 	NPU_ERR("============QUEUE CONTENT=============\n");
-	for (i = 0; i < queue.qhdr_q_size/4; i++)
+	for (i = 0; i < content_size/4; i++)
 		NPU_ERR("%x\n", content[i]);
 
 	NPU_ERR("DUMP IPC queue %d END\n", target_que);
@@ -103,7 +112,13 @@ static void npu_dump_all_ipc_queue(struct npu_device *npu_dev)
 
 void npu_dump_debug_info(struct npu_device *npu_dev)
 {
-	npu_dump_debug_timeout_stats(npu_dev);
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+	if (host_ctx->fw_state != FW_ENABLED) {
+		NPU_WARN("NPU is disabled\n");
+		return;
+	}
+
 	npu_dump_dbg_registers(npu_dev);
 	npu_dump_all_ipc_queue(npu_dev);
 }
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 732a9df..42f13e9 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -16,6 +16,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/thermal.h>
 #include <linux/soc/qcom/llcc-qcom.h>
+#include <soc/qcom/devfreq_devbw.h>
 
 #include "npu_common.h"
 #include "npu_hw.h"
@@ -60,6 +61,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
 static ssize_t boot_store(struct device *dev,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count);
+static void npu_suspend_devbw(struct npu_device *npu_dev);
+static void npu_resume_devbw(struct npu_device *npu_dev);
 static bool npu_is_post_clock(const char *clk_name);
 static bool npu_is_exclude_rate_clock(const char *clk_name);
 static int npu_get_max_state(struct thermal_cooling_device *cdev,
@@ -74,14 +77,10 @@ static int npu_get_info(struct npu_client *client, unsigned long arg);
 static int npu_map_buf(struct npu_client *client, unsigned long arg);
 static int npu_unmap_buf(struct npu_client *client,
 	unsigned long arg);
-static int npu_load_network(struct npu_client *client,
-	unsigned long arg);
 static int npu_load_network_v2(struct npu_client *client,
 	unsigned long arg);
 static int npu_unload_network(struct npu_client *client,
 	unsigned long arg);
-static int npu_exec_network(struct npu_client *client,
-	unsigned long arg);
 static int npu_exec_network_v2(struct npu_client *client,
 	unsigned long arg);
 static int npu_receive_event(struct npu_client *client,
@@ -126,8 +125,6 @@ static const char * const npu_exclude_rate_clocks[] = {
 	"axi_clk",
 	"ahb_clk",
 	"dma_clk",
-	"llm_temp_clk",
-	"llm_curr_clk",
 	"atb_clk",
 	"s2p_clk",
 };
@@ -341,26 +338,30 @@ int npu_enable_core_power(struct npu_device *npu_dev)
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 	int ret = 0;
 
+	mutex_lock(&npu_dev->dev_lock);
+	NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
 	if (!pwr->pwr_vote_num) {
 		ret = npu_enable_regulators(npu_dev);
 		if (ret)
-			return ret;
+			goto fail;
 
 		ret = npu_set_bw(npu_dev, 100, 100);
 		if (ret) {
 			npu_disable_regulators(npu_dev);
-			return ret;
+			goto fail;
 		}
 
 		ret = npu_enable_core_clocks(npu_dev);
 		if (ret) {
 			npu_set_bw(npu_dev, 0, 0);
 			npu_disable_regulators(npu_dev);
-			pwr->pwr_vote_num = 0;
-			return ret;
+			goto fail;
 		}
+		npu_resume_devbw(npu_dev);
 	}
 	pwr->pwr_vote_num++;
+fail:
+	mutex_unlock(&npu_dev->dev_lock);
 
 	return ret;
 }
@@ -369,10 +370,16 @@ void npu_disable_core_power(struct npu_device *npu_dev)
 {
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
-	if (!pwr->pwr_vote_num)
+	mutex_lock(&npu_dev->dev_lock);
+	NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num);
+	if (!pwr->pwr_vote_num) {
+		mutex_unlock(&npu_dev->dev_lock);
 		return;
+	}
+
 	pwr->pwr_vote_num--;
 	if (!pwr->pwr_vote_num) {
+		npu_suspend_devbw(npu_dev);
 		npu_disable_core_clocks(npu_dev);
 		npu_set_bw(npu_dev, 0, 0);
 		npu_disable_regulators(npu_dev);
@@ -382,6 +389,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
 		NPU_DBG("setting back to power level=%d\n",
 			pwr->active_pwrlevel);
 	}
+	mutex_unlock(&npu_dev->dev_lock);
 }
 
 static int npu_enable_core_clocks(struct npu_device *npu_dev)
@@ -565,6 +573,42 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
 }
 
 /* -------------------------------------------------------------------------
+ * Bandwidth Monitor Related
+ * -------------------------------------------------------------------------
+ */
+static void npu_suspend_devbw(struct npu_device *npu_dev)
+{
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+	int ret, i;
+
+	if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+		for (i = 0; i < pwr->devbw_num; i++) {
+			ret = devfreq_suspend_devbw(pwr->devbw[i]);
+			if (ret)
+				NPU_ERR("devfreq_suspend_devbw failed rc:%d\n",
+					ret);
+		}
+		pwr->bwmon_enabled = 0;
+	}
+}
+
+static void npu_resume_devbw(struct npu_device *npu_dev)
+{
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+	int ret, i;
+
+	if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+		for (i = 0; i < pwr->devbw_num; i++) {
+			ret = devfreq_resume_devbw(pwr->devbw[i]);
+			if (ret)
+				NPU_ERR("devfreq_resume_devbw failed rc:%d\n",
+					ret);
+		}
+		pwr->bwmon_enabled = 1;
+	}
+}
+
+/* -------------------------------------------------------------------------
  * Clocks Related
  * -------------------------------------------------------------------------
  */
@@ -631,10 +675,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 				continue;
 		}
 
-		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
-
 		if (core_clks[i].reset) {
-			NPU_DBG("Deassert %s\n", core_clks[i].clk_name);
 			rc = reset_control_deassert(core_clks[i].reset);
 			if (rc)
 				NPU_WARN("deassert %s reset failed\n",
@@ -651,9 +692,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 		if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
 			continue;
 
-		NPU_DBG("setting rate of clock %s to %ld\n",
-			core_clks[i].clk_name, pwrlevel->clk_freq[i]);
-
 		rc = clk_set_rate(core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		/* not fatal error, keep using previous clk rate */
@@ -674,11 +712,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
 				if (npu_is_post_clock(core_clks[i].clk_name))
 					continue;
 			}
-			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 			clk_disable_unprepare(core_clks[i].clk);
 
 			if (core_clks[i].reset) {
-				NPU_DBG("Assert %s\n", core_clks[i].clk_name);
 				rc = reset_control_assert(core_clks[i].reset);
 				if (rc)
 					NPU_WARN("assert %s reset failed\n",
@@ -706,9 +742,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 
 		/* set clock rate to 0 before disabling it */
 		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
-			NPU_DBG("setting rate of clock %s to 0\n",
-				core_clks[i].clk_name);
-
 			rc = clk_set_rate(core_clks[i].clk, 0);
 			if (rc) {
 				NPU_ERR("clk_set_rate %s to 0 failed\n",
@@ -716,11 +749,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 			}
 		}
 
-		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
 
 		if (core_clks[i].reset) {
-			NPU_DBG("Assert %s\n", core_clks[i].clk_name);
 			rc = reset_control_assert(core_clks[i].reset);
 			if (rc)
 				NPU_WARN("assert %s reset failed\n",
@@ -794,11 +825,15 @@ static int npu_enable_regulators(struct npu_device *npu_dev)
 					regulators[i].regulator_name);
 				break;
 			}
-			NPU_DBG("regulator %s enabled\n",
-				regulators[i].regulator_name);
 		}
 	}
-	host_ctx->power_vote_num++;
+
+	if (rc) {
+		for (i--; i >= 0; i--)
+			regulator_disable(regulators[i].regulator);
+	} else {
+		host_ctx->power_vote_num++;
+	}
 	return rc;
 }
 
@@ -809,11 +844,9 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
 	struct npu_regulator *regulators = npu_dev->regulators;
 
 	if (host_ctx->power_vote_num > 0) {
-		for (i = 0; i < npu_dev->regulator_num; i++) {
+		for (i = 0; i < npu_dev->regulator_num; i++)
 			regulator_disable(regulators[i].regulator);
-			NPU_DBG("regulator %s disabled\n",
-				regulators[i].regulator_name);
-		}
+
 		host_ctx->power_vote_num--;
 	}
 }
@@ -845,13 +878,12 @@ int npu_enable_irq(struct npu_device *npu_dev)
 	reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE;
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0),
 		reg_val);
-	for (i = 0; i < NPU_MAX_IRQ; i++) {
-		if (npu_dev->irq[i].irq != 0) {
+	for (i = 0; i < NPU_MAX_IRQ; i++)
+		if (npu_dev->irq[i].irq != 0)
 			enable_irq(npu_dev->irq[i].irq);
-			NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
-		}
-	}
+
 	npu_dev->irq_enabled = true;
+	NPU_DBG("irq enabled\n");
 
 	return 0;
 }
@@ -866,12 +898,9 @@ void npu_disable_irq(struct npu_device *npu_dev)
 		return;
 	}
 
-	for (i = 0; i < NPU_MAX_IRQ; i++) {
-		if (npu_dev->irq[i].irq != 0) {
+	for (i = 0; i < NPU_MAX_IRQ; i++)
+		if (npu_dev->irq[i].irq != 0)
 			disable_irq(npu_dev->irq[i].irq);
-			NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
-		}
-	}
 
 	reg_val = npu_cc_reg_read(npu_dev,
 		NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0));
@@ -886,6 +915,7 @@ void npu_disable_irq(struct npu_device *npu_dev)
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0),
 		RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE);
 	npu_dev->irq_enabled = false;
+	NPU_DBG("irq disabled\n");
 }
 
 /* -------------------------------------------------------------------------
@@ -931,12 +961,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
 
-		NPU_DBG("prior to activate sys cache\n");
 		rc = llcc_slice_activate(npu_dev->sys_cache);
-		if (rc)
+		if (rc) {
 			NPU_ERR("failed to activate sys cache\n");
-		else
-			NPU_DBG("sys cache activated\n");
+			llcc_slice_putd(npu_dev->sys_cache);
+			npu_dev->sys_cache = NULL;
+			rc = 0;
+		}
 	}
 
 	return rc;
@@ -1095,38 +1126,6 @@ static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
 	return 0;
 }
 
-static int npu_load_network(struct npu_client *client,
-	unsigned long arg)
-{
-	struct msm_npu_load_network_ioctl req;
-	struct msm_npu_unload_network_ioctl unload_req;
-	void __user *argp = (void __user *)arg;
-	int ret = 0;
-
-	ret = copy_from_user(&req, argp, sizeof(req));
-
-	if (ret) {
-		NPU_ERR("fail to copy from user\n");
-		return -EFAULT;
-	}
-
-	NPU_DBG("network load with perf request %d\n", req.perf_mode);
-
-	ret = npu_host_load_network(client, &req);
-	if (ret) {
-		NPU_ERR("npu_host_load_network failed %d\n", ret);
-		return ret;
-	}
-
-	ret = copy_to_user(argp, &req, sizeof(req));
-	if (ret) {
-		NPU_ERR("fail to copy to user\n");
-		ret = -EFAULT;
-		unload_req.network_hdl = req.network_hdl;
-		npu_host_unload_network(client, &unload_req);
-	}
-	return ret;
-}
 
 static int npu_load_network_v2(struct npu_client *client,
 	unsigned long arg)
@@ -1216,44 +1215,6 @@ static int npu_unload_network(struct npu_client *client,
 	return 0;
 }
 
-static int npu_exec_network(struct npu_client *client,
-	unsigned long arg)
-{
-	struct msm_npu_exec_network_ioctl req;
-	void __user *argp = (void __user *)arg;
-	int ret = 0;
-
-	ret = copy_from_user(&req, argp, sizeof(req));
-
-	if (ret) {
-		NPU_ERR("fail to copy from user\n");
-		return -EFAULT;
-	}
-
-	if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
-		(req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
-		NPU_ERR("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
-			req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
-			req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
-		return -EINVAL;
-	}
-
-	ret = npu_host_exec_network(client, &req);
-
-	if (ret) {
-		NPU_ERR("npu_host_exec_network failed %d\n", ret);
-		return ret;
-	}
-
-	ret = copy_to_user(argp, &req, sizeof(req));
-
-	if (ret) {
-		NPU_ERR("fail to copy to user\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
 static int npu_exec_network_v2(struct npu_client *client,
 	unsigned long arg)
 {
@@ -1446,7 +1407,8 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
 		ret = npu_unmap_buf(client, arg);
 		break;
 	case MSM_NPU_LOAD_NETWORK:
-		ret = npu_load_network(client, arg);
+		NPU_ERR("npu_load_network_v1 is no longer supported\n");
+		ret = -ENOTTY;
 		break;
 	case MSM_NPU_LOAD_NETWORK_V2:
 		ret = npu_load_network_v2(client, arg);
@@ -1455,7 +1417,8 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
 		ret = npu_unload_network(client, arg);
 		break;
 	case MSM_NPU_EXEC_NETWORK:
-		ret = npu_exec_network(client, arg);
+		NPU_ERR("npu_exec_network_v1 is no longer supported\n");
+		ret = -ENOTTY;
 		break;
 	case MSM_NPU_EXEC_NETWORK_V2:
 		ret = npu_exec_network_v2(client, arg);
@@ -1658,8 +1621,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
 	bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
 	bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
 
-	NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
-
 	ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
 	if (ret) {
 		NPU_ERR("bandwidth request failed (%d)\n", ret);
@@ -1790,7 +1751,9 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
 {
 	struct platform_device *pdev = npu_dev->pdev;
 	struct device_node *node;
-	int ret = 0;
+	int ret = 0, i;
+	struct platform_device *p2dev;
+	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
 	/* Power levels */
 	node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
@@ -1804,6 +1767,47 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
 	if (ret)
 		return ret;
 
+	/* Parse Bandwidth Monitor */
+	pwr->devbw_num = of_property_count_strings(pdev->dev.of_node,
+			"qcom,npubw-dev-names");
+	if (pwr->devbw_num <= 0) {
+		NPU_INFO("npubw-dev-names are not defined\n");
+		return 0;
+	} else if (pwr->devbw_num > NPU_MAX_BW_DEVS) {
+		NPU_ERR("number of devbw %d exceeds limit\n", pwr->devbw_num);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < pwr->devbw_num; i++) {
+		node = of_parse_phandle(pdev->dev.of_node,
+				"qcom,npubw-devs", i);
+
+		if (node) {
+			p2dev = of_find_device_by_node(node);
+			of_node_put(node);
+			if (p2dev) {
+				pwr->devbw[i] = &p2dev->dev;
+			} else {
+				NPU_ERR("can't find devbw%d\n", i);
+				ret = -EINVAL;
+				break;
+			}
+		} else {
+			NPU_ERR("can't find devbw node\n");
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* Allow npu work without bwmon */
+		pwr->devbw_num = 0;
+		ret = 0;
+	} else {
+		/* Set to 1 initially - we assume bwmon is on */
+		pwr->bwmon_enabled = 1;
+	}
+
 	return ret;
 }
 
diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h
index 8c0385d..3d8537b 100644
--- a/drivers/media/platform/msm/npu/npu_firmware.h
+++ b/drivers/media/platform/msm/npu/npu_firmware.h
@@ -29,11 +29,6 @@
 /* Data value for debug */
 #define REG_NPU_FW_DEBUG_DATA       NPU_GPR13
 
-/* Started job count */
-#define REG_FW_JOB_CNT_START        NPU_GPR14
-/* Finished job count */
-#define REG_FW_JOB_CNT_END          NPU_GPR15
-
 /* NPU FW Control/Status Register */
 /* bit fields definitions in CTRL STATUS REG */
 #define FW_CTRL_STATUS_IPC_READY_BIT            0
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 85e8187..62feb8c 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -166,9 +166,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
 			status = INTERRUPT_RAISE_NPU(npu_dev);
 	}
 
-	if (status == 0)
-		NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
-	else
+	if (status)
 		NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
 
 	return status;
@@ -238,6 +236,13 @@ static int ipc_queue_read(struct npu_device *npu_dev,
 		status = -EPERM;
 		goto exit;
 	}
+
+	if (packet_size > NPU_IPC_BUF_LENGTH) {
+		NPU_ERR("Invalid packet size %d\n", packet_size);
+		status = -EINVAL;
+		goto exit;
+	}
+
 	new_read_idx = queue.qhdr_read_idx + packet_size;
 
 	if (new_read_idx < (queue.qhdr_q_size)) {
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 243d7cd..b738a8b 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -99,7 +99,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 	}
 
 	/* Keep reading ctrl status until NPU is ready */
-	NPU_DBG("waiting for status ready from fw\n");
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
 		ret = -EPERM;
@@ -194,7 +193,6 @@ int load_fw(struct npu_device *npu_dev)
 int unload_fw(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-	int ret = 0;
 
 	if (host_ctx->auto_pil_disable) {
 		NPU_WARN("auto pil is disabled\n");
@@ -212,16 +210,7 @@ int unload_fw(struct npu_device *npu_dev)
 		return -EBUSY;
 	}
 
-	/* vote minimum bandwidth before unload npu fw via PIL */
-	ret = npu_set_bw(npu_dev, 100, 100);
-	if (ret) {
-		NPU_ERR("Can't update bandwidth\n");
-		mutex_unlock(&host_ctx->lock);
-		return ret;
-	}
-
 	subsystem_put_local(host_ctx->subsystem_handle);
-	npu_set_bw(npu_dev, 0, 0);
 	host_ctx->fw_state = FW_UNLOADED;
 	NPU_DBG("fw is unloaded\n");
 	mutex_unlock(&host_ctx->lock);
@@ -531,9 +520,18 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
 			npu_disable_core_power(npu_dev);
 			npu_notify_aop(npu_dev, false);
 		}
+
+		/* vote minimum bandwidth before unload npu fw via PIL */
+		ret = npu_set_bw(npu_dev, 100, 100);
+		if (ret)
+			NPU_WARN("Can't update bandwidth\n");
+
 		break;
 	}
 	case SUBSYS_AFTER_SHUTDOWN:
+		ret = npu_set_bw(npu_dev, 0, 0);
+		if (ret)
+			NPU_WARN("Can't update bandwidth\n");
 		break;
 	default:
 		NPU_DBG("Ignoring event\n");
@@ -592,12 +590,14 @@ int npu_host_init(struct npu_device *npu_dev)
 	if (IS_ERR(host_ctx->notif_hdle)) {
 		NPU_ERR("register event notification failed\n");
 		sts = PTR_ERR(host_ctx->notif_hdle);
-		return sts;
+		host_ctx->notif_hdle = NULL;
+		goto fail;
 	}
 
 	host_ctx->wq = create_workqueue("npu_irq_hdl");
 	if (!host_ctx->wq) {
 		sts = -EPERM;
+		goto fail;
 	} else {
 		INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
 		INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
@@ -608,16 +608,33 @@ int npu_host_init(struct npu_device *npu_dev)
 			npu_disable_fw_work);
 	}
 
+	host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+	if (!host_ctx->ipc_msg_buf) {
+		NPU_ERR("Failed to allocate ipc buffer\n");
+		sts = -ENOMEM;
+		goto fail;
+	}
+
 	host_ctx->auto_pil_disable = false;
 
 	return sts;
+fail:
+	if (host_ctx->wq)
+		destroy_workqueue(host_ctx->wq);
+	if (host_ctx->notif_hdle)
+		subsys_notif_unregister_notifier(host_ctx->notif_hdle,
+			&host_ctx->nb);
+	mutex_destroy(&host_ctx->lock);
+	return sts;
 }
 
 void npu_host_deinit(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
+	kfree(host_ctx->ipc_msg_buf);
 	destroy_workqueue(host_ctx->wq);
+	subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
 	mutex_destroy(&host_ctx->lock);
 }
 
@@ -630,7 +647,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	NPU_DBG("NPU ipc irq %d\n", irq);
 	INTERRUPT_ACK(npu_dev, irq);
 
 	/* Check that the event thread currently is running */
@@ -646,23 +662,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr)
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	NPU_DBG("NPU general irq %d\n", irq);
-
 	reg_val = npu_cc_reg_read(npu_dev,
 		NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0));
 	NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val);
 	reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS);
 	ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL);
 
-	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) {
-		NPU_DBG("Send SHUTDOWN ACK\n");
+	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS)
 		ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN;
-	}
 
-	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) {
-		NPU_DBG("Send BRINGUP ACK\n");
+	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS)
 		ack_val |= Q6SS_RSC_BRINGUP_ACK_EN;
-	}
 
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val);
 	npu_cc_reg_write(npu_dev,
@@ -732,6 +742,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 
 	if (host_ctx->wdg_irq_sts) {
 		NPU_INFO("watchdog irq triggered\n");
+		npu_dump_debug_info(npu_dev);
 		fw_alive = false;
 	}
 
@@ -773,7 +784,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	}
 
 	/* Keep reading ctrl status until NPU is ready */
-	NPU_DBG("waiting for status ready from fw\n");
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
 		NPU_ERR("wait for fw status ready timedout\n");
@@ -899,6 +909,12 @@ static void npu_bridge_mbox_work(struct work_struct *work)
 		return;
 	}
 
+	if ((host_ctx->wdg_irq_sts != 0) || (host_ctx->err_irq_sts != 0)) {
+		NPU_WARN("SSR is triggered, skip this time\n");
+		mutex_unlock(&host_ctx->lock);
+		return;
+	}
+
 	/* queue or modify delayed work to disable fw */
 	mod_delayed_work(host_ctx->wq, &host_ctx->disable_fw_work,
 		NPU_MBOX_IDLE_TIMEOUT);
@@ -1174,14 +1190,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
 		NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
 		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
-		NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.e2e_ipc_tick_count);
-		NPU_DBG("aco_load_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.aco_load_tick_count);
-		NPU_DBG("aco_execute_time: %d (in tick count)\n",
-			exe_rsp_pkt->stats.aco_execution_tick_count);
-		NPU_DBG("total_num_layers: %d\n",
-			exe_rsp_pkt->stats.exe_stats.total_num_layers);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
@@ -1503,13 +1511,14 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
 		NPU_ERR("Another cmd is pending\n");
 		ret = -EBUSY;
 	} else {
-		NPU_DBG("Send cmd %d network id %lld\n",
-			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
-			network->id);
 		network->cmd_async = async;
 		network->cmd_ret_status = 0;
 		network->cmd_pending = true;
 		network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
+		reinit_completion(&network->cmd_done);
+		NPU_DBG("Send cmd %d network id %llx trans id %d\n",
+			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
+			network->id, network->trans_id);
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_APPS_EXEC, cmd_ptr);
 		if (ret)
@@ -1546,28 +1555,6 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
 	return ret;
 }
 
-static void host_copy_patch_data(struct npu_patch_tuple *param, uint32_t value,
-		struct msm_npu_layer *layer_info)
-{
-	param->value = value;
-	param->chunk_id = layer_info->patch_info.chunk_id;
-	param->loc_offset = layer_info->patch_info.loc_offset;
-	param->instruction_size_in_bytes =
-		layer_info->patch_info.instruction_size_in_bytes;
-	param->shift_value_in_bits =
-		layer_info->patch_info.shift_value_in_bits;
-	param->variable_size_in_bits =
-		layer_info->patch_info.variable_size_in_bits;
-
-	NPU_DBG("copy_patch_data: %x %d %x %x %x %x\n",
-		param->value,
-		param->chunk_id,
-		param->loc_offset,
-		param->instruction_size_in_bytes,
-		param->shift_value_in_bits,
-		param->variable_size_in_bits);
-}
-
 static void host_copy_patch_data_v2(struct npu_patch_tuple_v2 *param,
 	struct msm_npu_patch_info_v2 *patch_info)
 {
@@ -1607,7 +1594,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
 			network++;
 		}
 	}
-	pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+	NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
 
 	return max_perf_mode;
 }
@@ -1627,110 +1614,6 @@ static int set_perf_mode(struct npu_device *npu_dev)
 	return ret;
 }
 
-int32_t npu_host_load_network(struct npu_client *client,
-			struct msm_npu_load_network_ioctl *load_ioctl)
-{
-	int ret = 0;
-	struct npu_device *npu_dev = client->npu_dev;
-	struct npu_network *network;
-	struct ipc_cmd_load_pkt load_packet;
-	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-
-	ret = enable_fw(npu_dev);
-	if (ret)
-		return ret;
-
-	mutex_lock(&host_ctx->lock);
-	network = alloc_network(host_ctx, client);
-	if (!network) {
-		ret = -ENOMEM;
-		goto err_deinit_fw;
-	}
-
-	network_get(network);
-	network->buf_hdl = load_ioctl->buf_ion_hdl;
-	network->size = load_ioctl->buf_size;
-	network->phy_add = load_ioctl->buf_phys_addr;
-	network->first_block_size = load_ioctl->first_block_size;
-	network->priority = load_ioctl->priority;
-	network->perf_mode = load_ioctl->perf_mode;
-
-	/* verify mapped physical address */
-	if (!npu_mem_verify_addr(client, network->phy_add)) {
-		ret = -EINVAL;
-		goto error_free_network;
-	}
-
-	ret = set_perf_mode(npu_dev);
-	if (ret) {
-		NPU_ERR("set_perf_mode failed\n");
-		goto error_free_network;
-	}
-
-	load_packet.header.cmd_type = NPU_IPC_CMD_LOAD;
-	load_packet.header.size = sizeof(struct ipc_cmd_load_pkt);
-	load_packet.header.trans_id =
-		atomic_add_return(1, &host_ctx->ipc_trans_id);
-	load_packet.header.flags = load_ioctl->flags;
-
-	/* ACO Buffer. Use the npu mapped aco address */
-	load_packet.buf_pkt.address = (uint64_t)network->phy_add;
-	load_packet.buf_pkt.buf_size = network->first_block_size;
-	load_packet.buf_pkt.network_id = network->id;
-
-	/* NPU_IPC_CMD_LOAD will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &load_packet, false,
-		false);
-	if (ret) {
-		NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
-		goto error_free_network;
-	}
-
-	mutex_unlock(&host_ctx->lock);
-
-	ret = wait_for_completion_interruptible_timeout(
-		&network->cmd_done,
-		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
-		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
-
-	mutex_lock(&host_ctx->lock);
-	if (!ret) {
-		NPU_ERR("NPU_IPC_CMD_LOAD time out\n");
-		npu_dump_debug_info(npu_dev);
-		ret = -ETIMEDOUT;
-		goto error_free_network;
-	} else if (ret < 0) {
-		NPU_ERR("NPU_IPC_CMD_LOAD is interrupted by signal\n");
-		goto error_free_network;
-	}
-
-	if (network->fw_error) {
-		ret = -EIO;
-		NPU_ERR("fw is in error state during load network\n");
-		goto error_free_network;
-	}
-
-	ret = network->cmd_ret_status;
-	if (ret)
-		goto error_free_network;
-
-	load_ioctl->network_hdl = network->network_hdl;
-	network->is_active = true;
-	network_put(network);
-	mutex_unlock(&host_ctx->lock);
-
-	return ret;
-
-error_free_network:
-	network_put(network);
-	free_network(host_ctx, client, network->id);
-err_deinit_fw:
-	mutex_unlock(&host_ctx->lock);
-	disable_fw(npu_dev);
-	return ret;
-}
-
 int32_t npu_host_load_network_v2(struct npu_client *client,
 			struct msm_npu_load_network_ioctl_v2 *load_ioctl,
 			struct msm_npu_patch_info_v2 *patch_info)
@@ -1784,8 +1667,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 		goto error_free_network;
 	}
 
-	NPU_DBG("network address %llx\n", network->phy_add);
-
 	ret = set_perf_mode(npu_dev);
 	if (ret) {
 		NPU_ERR("set_perf_mode failed\n");
@@ -1805,11 +1686,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	load_packet->buf_pkt.num_layers = network->num_layers;
 	load_packet->num_patch_params = num_patch_params;
 
-	/* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
 	if (ret) {
-		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1822,19 +1701,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	mutex_lock(&host_ctx->lock);
 
-	if (!ret) {
-		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
-		npu_dump_debug_info(npu_dev);
-		ret = -ETIMEDOUT;
-		goto error_load_network;
-	}
-
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during load_v2 network\n");
 		goto error_free_network;
 	}
 
+	if (!ret) {
+		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
+			network->id, network->trans_id);
+		npu_dump_debug_info(npu_dev);
+		ret = -ETIMEDOUT;
+		goto error_load_network;
+	}
+
 	ret = network->cmd_ret_status;
 	if (ret)
 		goto error_free_network;
@@ -1908,8 +1788,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	unload_packet.header.flags = 0;
 	unload_packet.network_hdl = (uint32_t)network->network_hdl;
 
-	/* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
 		false);
 
@@ -1937,21 +1815,23 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	mutex_lock(&host_ctx->lock);
 
+	if (network->fw_error) {
+		ret = -EIO;
+		NPU_ERR("fw is in error state during unload network\n");
+		goto free_network;
+	}
+
 	if (!ret) {
-		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
+			network->id, network->trans_id);
 		npu_dump_debug_info(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_network;
 	}
 
-	if (network->fw_error) {
-		ret = -EIO;
-		NPU_ERR("fw is in error state during unload network\n");
-	} else {
-		ret = network->cmd_ret_status;
-		NPU_DBG("unload network status %d\n", ret);
-	}
+	ret = network->cmd_ret_status;
+	NPU_DBG("unload network status %d\n", ret);
 
 free_network:
 	/*
@@ -1971,131 +1851,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	return ret;
 }
 
-int32_t npu_host_exec_network(struct npu_client *client,
-			struct msm_npu_exec_network_ioctl *exec_ioctl)
-{
-	struct npu_device *npu_dev = client->npu_dev;
-	struct ipc_cmd_execute_pkt exec_packet;
-	/* npu mapped addr */
-	uint64_t input_off, output_off;
-	int32_t ret;
-	struct npu_network *network;
-	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-	bool async_ioctl = !!exec_ioctl->async;
-
-	mutex_lock(&host_ctx->lock);
-	network = get_network_by_hdl(host_ctx, client,
-		exec_ioctl->network_hdl);
-
-	if (!network) {
-		mutex_unlock(&host_ctx->lock);
-		return -EINVAL;
-	}
-
-	if (!network->is_active) {
-		NPU_ERR("network is not active\n");
-		ret = -EINVAL;
-		goto exec_done;
-	}
-
-	if (network->fw_error) {
-		NPU_ERR("fw is in error state\n");
-		ret = -EIO;
-		goto exec_done;
-	}
-
-	NPU_DBG("execute network %lld\n", network->id);
-	memset(&exec_packet, 0, sizeof(exec_packet));
-	if (exec_ioctl->patching_required) {
-		if ((exec_ioctl->input_layer_num != 1) ||
-			(exec_ioctl->output_layer_num != 1)) {
-			NPU_ERR("Invalid input/output layer num\n");
-			ret = -EINVAL;
-			goto exec_done;
-		}
-
-		input_off = exec_ioctl->input_layers[0].buf_phys_addr;
-		output_off = exec_ioctl->output_layers[0].buf_phys_addr;
-		/* verify mapped physical address */
-		if (!npu_mem_verify_addr(client, input_off) ||
-			!npu_mem_verify_addr(client, output_off)) {
-			NPU_ERR("Invalid patch buf address\n");
-			ret = -EINVAL;
-			goto exec_done;
-		}
-
-		exec_packet.patch_params.num_params = 2;
-		host_copy_patch_data(&exec_packet.patch_params.param[0],
-			(uint32_t)input_off, &exec_ioctl->input_layers[0]);
-		host_copy_patch_data(&exec_packet.patch_params.param[1],
-			(uint32_t)output_off, &exec_ioctl->output_layers[0]);
-	} else {
-		exec_packet.patch_params.num_params = 0;
-	}
-
-	exec_packet.header.cmd_type = NPU_IPC_CMD_EXECUTE;
-	exec_packet.header.size = sizeof(struct ipc_cmd_execute_pkt);
-	exec_packet.header.trans_id =
-		atomic_add_return(1, &host_ctx->ipc_trans_id);
-	exec_packet.header.flags = 0xF;
-	exec_packet.network_hdl = network->network_hdl;
-
-	/* Send it on the high priority queue */
-	reinit_completion(&network->cmd_done);
-	ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl,
-		false);
-
-	if (ret) {
-		NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
-		goto exec_done;
-	}
-
-	if (async_ioctl) {
-		NPU_DBG("Async ioctl, return now\n");
-		goto exec_done;
-	}
-
-	mutex_unlock(&host_ctx->lock);
-
-	ret = wait_for_completion_timeout(
-		&network->cmd_done,
-		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
-		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
-
-	mutex_lock(&host_ctx->lock);
-	if (!ret) {
-		NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE time out\n",
-			network->id);
-		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
-		ret = -ETIMEDOUT;
-		goto exec_done;
-	}
-
-	if (network->fw_error) {
-		ret = -EIO;
-		NPU_ERR("fw is in error state during execute network\n");
-	} else {
-		ret = network->cmd_ret_status;
-		NPU_DBG("execution status %d\n", ret);
-	}
-
-exec_done:
-	network_put(network);
-	mutex_unlock(&host_ctx->lock);
-
-	/*
-	 * treat network execution timed out as error in order to
-	 * force npu fw to stop execution
-	 */
-	if (ret == -ETIMEDOUT) {
-		NPU_ERR("Error handling after execution failure\n");
-		host_error_hdlr(npu_dev, true);
-	}
-
-	return ret;
-}
-
 int32_t npu_host_exec_network_v2(struct npu_client *client,
 	struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
 	struct msm_npu_patch_buf_info *patch_buf_info)
@@ -2174,8 +1929,6 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
-	/* Send it on the high priority queue */
-	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
 		false);
 
@@ -2197,21 +1950,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
 	mutex_lock(&host_ctx->lock);
-	if (!ret) {
-		NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n",
-			network->id);
-		npu_dump_debug_info(npu_dev);
-		network->cmd_pending = false;
-		ret = -ETIMEDOUT;
-		goto free_exec_packet;
-	}
-
 	if (network->fw_error) {
 		ret = -EIO;
 		NPU_ERR("fw is in error state during execute_v2 network\n");
 		goto free_exec_packet;
 	}
 
+	if (!ret) {
+		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
+			network->id, network->trans_id);
+		npu_dump_debug_info(npu_dev);
+		network->cmd_pending = false;
+		ret = -ETIMEDOUT;
+		goto free_exec_packet;
+	}
+
 	ret = network->cmd_ret_status;
 	if (!ret) {
 		exec_ioctl->stats_buf_size = network->stats_buf_size;
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 36bcc08..72976cb 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -26,7 +26,7 @@
 #define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
 #define FIRMWARE_VERSION 0x00001000
 #define MAX_LOADED_NETWORK 32
-#define NPU_IPC_BUF_LENGTH 512
+#define NPU_IPC_BUF_LENGTH 4096
 
 #define FW_DBG_MODE_PAUSE        (1 << 0)
 #define FW_DBG_MODE_INC_TIMEOUT  (1 << 1)
@@ -105,6 +105,7 @@ struct npu_host_ctx {
 	void *notif_hdle;
 	spinlock_t bridge_mbox_lock;
 	bool bridge_mbox_pwr_on;
+	void *ipc_msg_buf;
 };
 
 struct npu_device;
@@ -131,15 +132,11 @@ int32_t npu_host_map_buf(struct npu_client *client,
 	struct msm_npu_map_buf_ioctl *map_ioctl);
 int32_t npu_host_unmap_buf(struct npu_client *client,
 	struct msm_npu_unmap_buf_ioctl *unmap_ioctl);
-int32_t npu_host_load_network(struct npu_client *client,
-	struct msm_npu_load_network_ioctl *load_ioctl);
 int32_t npu_host_load_network_v2(struct npu_client *client,
 	struct msm_npu_load_network_ioctl_v2 *load_ioctl,
 	struct msm_npu_patch_info_v2 *patch_info);
 int32_t npu_host_unload_network(struct npu_client *client,
 	struct msm_npu_unload_network_ioctl *unload);
-int32_t npu_host_exec_network(struct npu_client *client,
-	struct msm_npu_exec_network_ioctl *exec_ioctl);
 int32_t npu_host_exec_network_v2(struct npu_client *client,
 	struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
 	struct msm_npu_patch_buf_info *patch_buf_info);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index cd8db2c..1b55096 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1778,20 +1778,17 @@ static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
 	else
 		qclk = &qseecom.ce_drv;
 
-	if (qclk->clk_access_cnt > 2) {
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt--;
+	} else {
 		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
 		ret = -EINVAL;
-		goto err_dec_ref_cnt;
 	}
-	if (qclk->clk_access_cnt == 2)
-		qclk->clk_access_cnt--;
 
-err_dec_ref_cnt:
 	mutex_unlock(&clk_access_lock);
 	return ret;
 }
 
-
 static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
 {
 	int32_t ret = 0;
@@ -7750,6 +7747,13 @@ static long qseecom_ioctl(struct file *file,
 		break;
 	}
 	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("app loaded query req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
 		data->type = QSEECOM_CLIENT_APP;
 		mutex_lock(&app_access_lock);
 		atomic_inc(&data->ioctl_count);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index ed5cefb..89deb45 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 {
 	data->bytes_xfered = data->blocks * data->blksz;
 	data->error = 0;
+	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 	return 1;
 }
 
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
 		mmc->max_segs = 1;
 
 	/* DMA size field can address up to 8 MB */
-	mmc->max_seg_size = 8 * 1024 * 1024;
+	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
+				  dma_get_max_seg_size(host->dev));
 	mmc->max_req_size = mmc->max_seg_size;
 	/* External DMA is in 512 byte blocks */
 	mmc->max_blk_size = 512;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7fdac27..9c77bfe 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
 	ret = mmc_of_parse(host->mmc);
 	if (ret) {
-		dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
 		goto unreg_clk;
 	}
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index be0b785..8f14f85 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
 done:
 	bond_dev->vlan_features = vlan_features;
 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_STAG_TX |
 				    NETIF_F_GSO_UDP_L4;
 	bond_dev->gso_max_segs = gso_max_segs;
 	netif_set_gso_max_size(bond_dev, gso_max_size);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 602c19e2..786d852 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 
 	/* All packets processed */
 	if (num_pkts < quota) {
-		napi_complete_done(napi, num_pkts);
-		/* Enable Rx FIFO interrupts */
-		rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
-				   RCANFD_RFCC_RFIE);
+		if (napi_complete_done(napi, num_pkts)) {
+			/* Enable Rx FIFO interrupts */
+			rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+					   RCANFD_RFCC_RFIE);
+		}
 	}
 	return num_pkts;
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 611f9d3..740ef47 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
 	dev->state &= ~PCAN_USB_STATE_STARTED;
 	netif_stop_queue(netdev);
 
+	close_candev(netdev);
+
+	dev->can.state = CAN_STATE_STOPPED;
+
 	/* unlink all pending urbs and free used memory */
 	peak_usb_unlink_all_urbs(dev);
 
 	if (dev->adapter->dev_stop)
 		dev->adapter->dev_stop(dev);
 
-	close_candev(netdev);
-
-	dev->can.state = CAN_STATE_STOPPED;
-
 	/* can set bus off now */
 	if (dev->adapter->dev_set_bus) {
 		int err = dev->adapter->dev_set_bus(dev, 0);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index dd161c5..4198835 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
 			goto err_out;
 
 		/* allocate command buffer once for all for the interface */
-		pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+		pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
 						GFP_KERNEL);
 		if (!pdev->cmd_buffer_addr)
 			goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index d516def..b304198 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
 	u8 *buffer;
 	int err;
 
-	buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+	buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 33baa17..cf01e73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 	/* if VF indicate to PF this function is going down (PF will delete sp
 	 * elements and clear initializations
 	 */
-	if (IS_VF(bp))
+	if (IS_VF(bp)) {
+		bnx2x_clear_vlan_info(bp);
 		bnx2x_vfpf_close_vf(bp);
-	else if (unload_mode != UNLOAD_RECOVERY)
+	} else if (unload_mode != UNLOAD_RECOVERY) {
 		/* if this is a normal/close unload need to clean up chip*/
 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
-	else {
+	} else {
 		/* Send the UNLOAD_REQUEST to the MCP */
 		bnx2x_send_unload_req(bp, unload_mode);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e508e5..ee5159e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
 /**
  * bnx2x_sp_event - handle ramrods completion.
  *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2c9af0f..68c62e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
 	return rc;
 }
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+
+	/* Mark that hw forgot all entries */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+
+	bp->vlan_cnt = 0;
+}
+
 static int bnx2x_del_all_vlans(struct bnx2x *bp)
 {
 	struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
 	unsigned long ramrod_flags = 0, vlan_flags = 0;
-	struct bnx2x_vlan_entry *vlan;
 	int rc;
 
 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
 	if (rc)
 		return rc;
 
-	/* Mark that hw forgot all entries */
-	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		vlan->hw = false;
-	bp->vlan_cnt = 0;
+	bnx2x_clear_vlan_info(bp);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f2aba5b..d45c435 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
 {
 	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
-	spin_lock_init(&new->lock);
+	if (new)
+		spin_lock_init(&new->lock);
 	return new;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f5cd953..45d9a5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
 	if (err) {
 		en_err(priv, "Failed to allocate RSS indirection QP\n");
-		goto rss_err;
+		goto qp_alloc_err;
 	}
 
 	rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1244,6 +1244,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
 	kfree(rss_map->indir_qp);
 	rss_map->indir_qp = NULL;
 rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde6..a4be04d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
 	return &arfs_t->rules_hash[bucket_idx];
 }
 
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
-	return (skb->protocol == htons(ETH_P_IP)) ?
-		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
 					 u8 ip_proto, __be16 etype)
 {
@@ -599,31 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
 	arfs_may_expire_flow(priv);
 }
 
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->dest;
-	return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->source;
-	return ((struct udphdr *)transport_header)->source;
-}
-
 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 					 struct arfs_table *arfs_t,
-					 const struct sk_buff *skb,
+					 const struct flow_keys *fk,
 					 u16 rxq, u32 flow_id)
 {
 	struct arfs_rule *rule;
@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	INIT_WORK(&rule->arfs_work, arfs_handle_work);
 
 	tuple = &rule->tuple;
-	tuple->etype = skb->protocol;
+	tuple->etype = fk->basic.n_proto;
+	tuple->ip_proto = fk->basic.ip_proto;
 	if (tuple->etype == htons(ETH_P_IP)) {
-		tuple->src_ipv4 = ip_hdr(skb)->saddr;
-		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+		tuple->src_ipv4 = fk->addrs.v4addrs.src;
+		tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
 	} else {
-		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
 		       sizeof(struct in6_addr));
-		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
 		       sizeof(struct in6_addr));
 	}
-	tuple->ip_proto = arfs_get_ip_proto(skb);
-	tuple->src_port = arfs_get_src_port(skb);
-	tuple->dst_port = arfs_get_dst_port(skb);
+	tuple->src_port = fk->ports.src;
+	tuple->dst_port = fk->ports.dst;
 
 	rule->flow_id = flow_id;
 	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 	return rule;
 }
 
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
-			 const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
 {
-	if (tuple->etype == htons(ETH_P_IP) &&
-	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
-	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
-		return true;
-	if (tuple->etype == htons(ETH_P_IPV6) &&
-	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-		     sizeof(struct in6_addr))) &&
-	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-		     sizeof(struct in6_addr))))
-		return true;
+	if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+		return false;
+	if (tuple->etype != fk->basic.n_proto)
+		return false;
+	if (tuple->etype == htons(ETH_P_IP))
+		return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+		       tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+	if (tuple->etype == htons(ETH_P_IPV6))
+		return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+			       sizeof(struct in6_addr)) &&
+		       !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+			       sizeof(struct in6_addr));
 	return false;
 }
 
 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
-					const struct sk_buff *skb)
+					const struct flow_keys *fk)
 {
 	struct arfs_rule *arfs_rule;
 	struct hlist_head *head;
-	__be16 src_port = arfs_get_src_port(skb);
-	__be16 dst_port = arfs_get_dst_port(skb);
 
-	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+	head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
 	hlist_for_each_entry(arfs_rule, head, hlist) {
-		if (arfs_rule->tuple.src_port == src_port &&
-		    arfs_rule->tuple.dst_port == dst_port &&
-		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+		if (arfs_cmp(&arfs_rule->tuple, fk))
 			return arfs_rule;
-		}
 	}
 
 	return NULL;
@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 	struct arfs_table *arfs_t;
 	struct arfs_rule *arfs_rule;
+	struct flow_keys fk;
 
-	if (skb->protocol != htons(ETH_P_IP) &&
-	    skb->protocol != htons(ETH_P_IPV6))
+	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+		return -EPROTONOSUPPORT;
+
+	if (fk.basic.n_proto != htons(ETH_P_IP) &&
+	    fk.basic.n_proto != htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
 
 	if (skb->encapsulation)
 		return -EPROTONOSUPPORT;
 
-	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+	arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
 	if (!arfs_t)
 		return -EPROTONOSUPPORT;
 
 	spin_lock_bh(&arfs->arfs_lock);
-	arfs_rule = arfs_find_rule(arfs_t, skb);
+	arfs_rule = arfs_find_rule(arfs_t, &fk);
 	if (arfs_rule) {
 		if (arfs_rule->rxq == rxq_index) {
 			spin_unlock_bh(&arfs->arfs_lock);
@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 		}
 		arfs_rule->rxq = rxq_index;
 	} else {
-		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
-					    rxq_index, flow_id);
+		arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
 		if (!arfs_rule) {
 			spin_unlock_bh(&arfs->arfs_lock);
 			return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 792bb8b..2b9350f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
 	struct mlx5_core_dev *mdev = priv->mdev;
 	int err;
 
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return -EOPNOTSUPP;
+
 	if (pauseparam->autoneg)
 		return -EINVAL;
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index aaad5e4..72f3e4b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -389,14 +389,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
 	}
 
 	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
-		void *agg_params;
-		unsigned long irq_flags;
+		struct rmnet_egress_agg_params *agg_params;
 
 		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
-		spin_lock_irqsave(&port->agg_lock, irq_flags);
-		memcpy(&port->egress_agg_params, agg_params,
-		       sizeof(port->egress_agg_params));
-		spin_unlock_irqrestore(&port->agg_lock, irq_flags);
+		rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
+					       agg_params->agg_count,
+					       agg_params->agg_features,
+					       agg_params->agg_time);
 	}
 
 	return 0;
@@ -700,6 +699,16 @@ int rmnet_get_powersave_notif(void *port)
 	return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
 }
 EXPORT_SYMBOL(rmnet_get_powersave_notif);
+
+struct net_device *rmnet_get_real_dev(void *port)
+{
+	if (port)
+		return ((struct rmnet_port *)port)->dev;
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_real_dev);
+
 #endif
 
 /* Startup/Shutdown */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 07b1154..2359401 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -20,6 +20,11 @@ struct rmnet_endpoint {
 	struct hlist_node hlnode;
 };
 
+struct rmnet_agg_stats {
+	u64 ul_agg_reuse;
+	u64 ul_agg_alloc;
+};
+
 struct rmnet_port_priv_stats {
 	u64 dl_hdr_last_qmap_vers;
 	u64 dl_hdr_last_ep_id;
@@ -33,14 +38,21 @@ struct rmnet_port_priv_stats {
 	u64 dl_hdr_total_pkts;
 	u64 dl_trl_last_seq;
 	u64 dl_trl_count;
+	struct rmnet_agg_stats agg;
 };
 
 struct rmnet_egress_agg_params {
 	u16 agg_size;
-	u16 agg_count;
+	u8 agg_count;
+	u8 agg_features;
 	u32 agg_time;
 };
 
+struct rmnet_agg_page {
+	struct list_head list;
+	struct page *page;
+};
+
 /* One instance of this structure is instantiated for each real_dev associated
  * with rmnet.
  */
@@ -65,6 +77,9 @@ struct rmnet_port {
 	struct timespec agg_last;
 	struct hrtimer hrtimer;
 	struct work_struct agg_wq;
+	u8 agg_size_order;
+	struct list_head agg_list;
+	struct rmnet_agg_page *agg_head;
 
 	void *qmi_info;
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
index a2252c5..40238e6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
@@ -375,12 +375,28 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
 				 struct rmnet_frag_descriptor *frag_desc)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+	if (frag_desc->trans_proto == IPPROTO_TCP)
+		shinfo->gso_type = (frag_desc->ip_proto == 4) ?
+				   SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+	else
+		shinfo->gso_type = SKB_GSO_UDP_L4;
+
+	shinfo->gso_size = frag_desc->gso_size;
+	shinfo->gso_segs = frag_desc->gso_segs;
+}
+
+/* Set the partial checksum information. Sets the transport checksum tot he
+ * pseudoheader checksum and sets the offload metadata.
+ */
+static void rmnet_frag_partial_csum(struct sk_buff *skb,
+				    struct rmnet_frag_descriptor *frag_desc)
+{
 	struct iphdr *iph = (struct iphdr *)skb->data;
 	__sum16 pseudo;
 	u16 pkt_len = skb->len - frag_desc->ip_len;
-	bool ipv4 = frag_desc->ip_proto == 4;
 
-	if (ipv4) {
+	if (frag_desc->ip_proto == 4) {
 		iph->tot_len = htons(skb->len);
 		iph->check = 0;
 		iph->check = ip_fast_csum(iph, iph->ihl);
@@ -401,7 +417,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
 				    ((u8 *)iph + frag_desc->ip_len);
 
 		tp->check = pseudo;
-		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
 		skb->csum_offset = offsetof(struct tcphdr, check);
 	} else {
 		struct udphdr *up = (struct udphdr *)
@@ -409,14 +424,11 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
 
 		up->len = htons(pkt_len);
 		up->check = pseudo;
-		shinfo->gso_type = SKB_GSO_UDP_L4;
 		skb->csum_offset = offsetof(struct udphdr, check);
 	}
 
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
-	shinfo->gso_size = frag_desc->gso_size;
-	shinfo->gso_segs = frag_desc->gso_segs;
 }
 
 /* Allocate and populate an skb to contain the packet represented by the
@@ -542,7 +554,8 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
 
 	/* Handle csum offloading */
 	if (frag_desc->csum_valid) {
-		head_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		/* Set the partial checksum information */
+		rmnet_frag_partial_csum(head_skb, frag_desc);
 	} else if (frag_desc->hdrs_valid &&
 		   (frag_desc->trans_proto == IPPROTO_TCP ||
 		    frag_desc->trans_proto == IPPROTO_UDP)) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index f575096..04048f6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -261,6 +261,8 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
 void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
 void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
 void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
+				    u8 count, u8 features, u32 time);
 void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
 			     struct rmnet_map_dl_ind_hdr *dl_hdr);
 void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index fcb1d2d..694e596 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -656,12 +656,28 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
 				struct rmnet_map_coal_metadata *coal_meta)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+	if (coal_meta->trans_proto == IPPROTO_TCP)
+		shinfo->gso_type = (coal_meta->ip_proto == 4) ?
+				   SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+	else
+		shinfo->gso_type = SKB_GSO_UDP_L4;
+
+	shinfo->gso_size = coal_meta->data_len;
+	shinfo->gso_segs = coal_meta->pkt_count;
+}
+
+/* Handles setting up the partial checksum in the skb. Sets the transport
+ * checksum to the pseudoheader checksum and sets the csum offload metadata
+ */
+static void rmnet_map_partial_csum(struct sk_buff *skb,
+				   struct rmnet_map_coal_metadata *coal_meta)
+{
 	unsigned char *data = skb->data;
 	__sum16 pseudo;
 	u16 pkt_len = skb->len - coal_meta->ip_len;
-	bool ipv4 = coal_meta->ip_proto == 4;
 
-	if (ipv4) {
+	if (coal_meta->ip_proto == 4) {
 		struct iphdr *iph = (struct iphdr *)data;
 
 		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
@@ -678,20 +694,16 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
 		struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
 
 		tp->check = pseudo;
-		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
 		skb->csum_offset = offsetof(struct tcphdr, check);
 	} else {
 		struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
 
 		up->check = pseudo;
-		shinfo->gso_type = SKB_GSO_UDP_L4;
 		skb->csum_offset = offsetof(struct udphdr, check);
 	}
 
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
-	shinfo->gso_size = coal_meta->data_len;
-	shinfo->gso_segs = coal_meta->pkt_count;
 }
 
 static void
@@ -756,7 +768,8 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 
 	/* Handle checksum status */
 	if (likely(csum_valid)) {
-		skbn->ip_summed = CHECKSUM_UNNECESSARY;
+		/* Set the partial checksum information */
+		rmnet_map_partial_csum(skbn, coal_meta);
 	} else if (check) {
 		/* Unfortunately, we have to fake a bad checksum here, since
 		 * the original bad value is lost by the hardware. The only
@@ -938,8 +951,10 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
 		coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
 		coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
 		coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
-		if (coal_meta.pkt_count > 1)
+		if (coal_meta.pkt_count > 1) {
+			rmnet_map_partial_csum(coal_skb, &coal_meta);
 			rmnet_map_gso_stamp(coal_skb, &coal_meta);
+		}
 
 		__skb_queue_tail(list, coal_skb);
 		return;
@@ -1250,6 +1265,113 @@ static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
 	}
 }
 
+static void rmnet_free_agg_pages(struct rmnet_port *port)
+{
+	struct rmnet_agg_page *agg_page, *idx;
+
+	list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
+		put_page(agg_page->page);
+		kfree(agg_page);
+	}
+
+	port->agg_head = NULL;
+}
+
+static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
+{
+	struct rmnet_agg_page *agg_page;
+	struct page *page = NULL;
+	int i = 0;
+
+	if (!(port->egress_agg_params.agg_features & RMNET_PAGE_RECYCLE))
+		goto alloc;
+
+	do {
+		agg_page = port->agg_head;
+		if (unlikely(!agg_page))
+			break;
+
+		if (page_ref_count(agg_page->page) == 1) {
+			page = agg_page->page;
+			page_ref_inc(agg_page->page);
+
+			port->stats.agg.ul_agg_reuse++;
+			port->agg_head = list_next_entry(agg_page, list);
+			break;
+		}
+
+		port->agg_head = list_next_entry(agg_page, list);
+		i++;
+	} while (i <= 5);
+
+alloc:
+	if (!page) {
+		page =  __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
+		port->stats.agg.ul_agg_alloc++;
+	}
+
+	return page;
+}
+
+static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
+{
+	struct rmnet_agg_page *agg_page;
+	struct page *page;
+
+	agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
+	if (!agg_page)
+		return NULL;
+
+	page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
+	if (!page) {
+		kfree(agg_page);
+		return NULL;
+	}
+
+	agg_page->page = page;
+
+	return agg_page;
+}
+
+static void rmnet_alloc_agg_pages(struct rmnet_port *port)
+{
+	struct rmnet_agg_page *agg_page = NULL;
+	int i = 0;
+
+	for (i = 0; i < 512; i++) {
+		agg_page = __rmnet_alloc_agg_pages(port);
+
+		if (agg_page)
+			list_add_tail(&agg_page->list, &port->agg_list);
+	}
+
+	port->agg_head = list_first_entry_or_null(&port->agg_list,
+						  struct rmnet_agg_page, list);
+}
+
+static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
+{
+	struct sk_buff *skb;
+	unsigned int size;
+	struct page *page;
+	void *vaddr;
+
+	page = rmnet_get_agg_pages(port);
+	if (!page)
+		return NULL;
+
+	vaddr = page_address(page);
+	size = PAGE_SIZE << port->agg_size_order;
+
+	skb = build_skb(vaddr, size);
+	if (!skb) {
+		put_page(page);
+		return NULL;
+	}
+
+	return skb;
+}
+
 void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 {
 	struct timespec diff, last;
@@ -1277,8 +1399,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 			return;
 		}
 
-		port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
-					  GFP_ATOMIC);
+		port->agg_skb = rmnet_map_build_skb(port);
 		if (!port->agg_skb) {
 			port->agg_skb = 0;
 			port->agg_count = 0;
@@ -1328,14 +1449,51 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
 	spin_unlock_irqrestore(&port->agg_lock, flags);
 }
 
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
+				    u8 count, u8 features, u32 time)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&port->agg_lock, irq_flags);
+	port->egress_agg_params.agg_count = count;
+	port->egress_agg_params.agg_time = time;
+	port->egress_agg_params.agg_size = size;
+	port->egress_agg_params.agg_features = features;
+
+	rmnet_free_agg_pages(port);
+
+	/* This effectively disables recycling in case the UL aggregation
+	 * size is lesser than PAGE_SIZE.
+	 */
+	if (size < PAGE_SIZE)
+		goto done;
+
+	port->agg_size_order = get_order(size);
+
+	size = PAGE_SIZE << port->agg_size_order;
+	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	port->egress_agg_params.agg_size = size;
+
+	if (port->egress_agg_params.agg_features == RMNET_PAGE_RECYCLE)
+		rmnet_alloc_agg_pages(port);
+
+done:
+	spin_unlock_irqrestore(&port->agg_lock, irq_flags);
+}
+
 void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
 {
 	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
-	port->egress_agg_params.agg_size = 8192;
-	port->egress_agg_params.agg_count = 20;
-	port->egress_agg_params.agg_time = 3000000;
 	spin_lock_init(&port->agg_lock);
+	INIT_LIST_HEAD(&port->agg_list);
+
+	/* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
+	 * This is done to reduce memory usage in cases where
+	 * UL aggregation is disabled.
+	 * Additionally, the features flag is also set to 0.
+	 */
+	rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 0, 3000000);
 
 	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
 }
@@ -1359,6 +1517,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
 		port->agg_state = 0;
 	}
 
+	rmnet_free_agg_pages(port);
 	spin_unlock_irqrestore(&port->agg_lock, flags);
 }
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index e5c530c..2ce29bf 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -22,6 +22,9 @@ RMNET_INGRESS_FORMAT_DL_MARKER_V2)
 #define RMNET_INGRESS_FORMAT_PS                 BIT(27)
 #define RMNET_FORMAT_PS_NOTIF                   BIT(26)
 
+/* UL Aggregation parameters */
+#define RMNET_PAGE_RECYCLE                      BIT(0)
+
 /* Replace skb->dev to a virtual rmnet device and pass up the stack */
 #define RMNET_EPMODE_VND (1)
 /* Pass the frame directly to another device with dev_queue_xmit() */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 1edf9e7..f00f1ce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -226,6 +226,8 @@ static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
 	"DL header total pkts received",
 	"DL trailer last seen sequence",
 	"DL trailer pkts received",
+	"UL agg reuse",
+	"UL agg alloc",
 };
 
 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -276,6 +278,7 @@ static int rmnet_stats_reset(struct net_device *dev)
 {
 	struct rmnet_priv *priv = netdev_priv(dev);
 	struct rmnet_port_priv_stats *stp;
+	struct rmnet_priv_stats *st;
 	struct rmnet_port *port;
 
 	port = rmnet_get_port(priv->real_dev);
@@ -285,6 +288,11 @@ static int rmnet_stats_reset(struct net_device *dev)
 	stp = &port->stats;
 
 	memset(stp, 0, sizeof(*stp));
+
+	st = &priv->stats;
+
+	memset(st, 0, sizeof(*st));
+
 	return 0;
 }
 
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dc30f11..3feb49b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
 
 	team->dev->vlan_features = vlan_features;
 	team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+				     NETIF_F_HW_VLAN_CTAG_TX |
+				     NETIF_F_HW_VLAN_STAG_TX |
 				     NETIF_F_GSO_UDP_L4;
 	team->dev->hard_header_len = max_hard_header_len;
 
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f4247b2..b7a0df9 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
 	int i;
-	__u8 tmp;
+	__u8 tmp = 0;
 	__le16 retdatai;
 	int ret;
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ba15afd..f928487 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2049,7 +2049,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (pbss)
 		wmi_nettype = WMI_NETTYPE_P2P;
 
-	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
+	wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d ap_ps=%d\n", vif->mid,
+		     is_go, wil->ap_ps);
 	if (is_go && !pbss) {
 		wil_err(wil, "P2P GO must be in PBSS\n");
 		return -ENOTSUPP;
@@ -2139,6 +2140,14 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 	if (rc)
 		goto err_bcast;
 
+	if (test_bit(WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT,
+		     wil->fw_capabilities)) {
+		enum wmi_ps_profile_type ps_profile = wil->ap_ps ?
+			wil->ps_profile : WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+		wil_ps_update(wil, ps_profile);
+	}
+
 	goto out; /* success */
 
 err_bcast:
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7a33dab..36ca937 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -2639,6 +2639,7 @@ static const struct dbg_off dbg_wil_off[] = {
 	WIL_FIELD(rx_buff_id_count, 0644,	doff_u32),
 	WIL_FIELD(amsdu_en, 0644,	doff_u8),
 	WIL_FIELD(force_edmg_channel, 0644,	doff_u8),
+	WIL_FIELD(ap_ps, 0644, doff_u8),
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1c2227c..e5aca30 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1307,6 +1307,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 	}
 
 	update_supported_bands(wil);
+
+	wil->ap_ps = test_bit(WIL_PLATFORM_CAPA_AP_PS, wil->platform_capa);
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 97d5933..7913124 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -299,6 +299,7 @@ static void wil_vif_init(struct wil6210_vif *vif)
 
 	INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker);
 	INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker);
+	INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired);
 	INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
 
 	INIT_LIST_HEAD(&vif->probe_client_pending);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5120b46..08d88f1 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1041,6 +1041,7 @@ struct wil6210_priv {
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
 	bool keep_radio_on_during_sleep;
+	u8 ap_ps; /* AP mode power save enabled */
 
 	struct pmc_ctx pmc;
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index d381649..e6730af 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -32,6 +32,7 @@ enum wil_platform_capa {
 	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
 	WIL_PLATFORM_CAPA_EXT_CLK = 2,
 	WIL_PLATFORM_CAPA_SMMU = 3,
+	WIL_PLATFORM_CAPA_AP_PS = 4,
 	WIL_PLATFORM_CAPA_MAX,
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 3a10bd8..96dff3d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1447,6 +1447,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
 	__le16 fc;
 	u32 d_len;
 	struct cfg80211_bss *bss;
+	struct cfg80211_inform_bss bss_data = {
+		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
+		.boottime_ns = ktime_to_ns(ktime_get_boottime()),
+	};
 
 	if (flen < 0) {
 		wil_err(wil, "sched scan result event too short, len %d\n",
@@ -1489,8 +1493,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
 		return;
 	}
 
-	bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
-					d_len, signal, GFP_KERNEL);
+	bss_data.signal = signal;
+	bss_data.chan = channel;
+	bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame,
+					     d_len, GFP_KERNEL);
 	if (bss) {
 		wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
 		cfg80211_put_bss(wiphy, bss);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index ceb0c5b..fc28f4b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -99,6 +99,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_CHANNEL_4			= 26,
 	WMI_FW_CAPABILITY_IPA				= 27,
 	WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF		= 30,
+	WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT		= 32,
 	WMI_FW_CAPABILITY_MAX,
 };
 
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index b80d523..0215f59 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -29,6 +29,7 @@
 #define FW_READY_TIMEOUT		20000
 #define FW_ASSERT_TIMEOUT		5000
 #define CNSS_EVENT_PENDING		2989
+#define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS	50
 
 #define CNSS_QUIRKS_DEFAULT		0
 #ifdef CONFIG_CNSS_EMULATION
@@ -1254,6 +1255,7 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
 	cnss_release_antenna_sharing(plat_priv);
 	cnss_bus_dev_shutdown(plat_priv);
+	msleep(COLD_BOOT_CAL_SHUTDOWN_DELAY_MS);
 	complete(&plat_priv->cal_complete);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 01b20ec..841a4aa 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -16,6 +16,7 @@
 #include "bus.h"
 #include "debug.h"
 #include "pci.h"
+#include "reg.h"
 
 #define PCI_LINK_UP			1
 #define PCI_LINK_DOWN			0
@@ -57,79 +58,10 @@ static DEFINE_SPINLOCK(pci_reg_window_lock);
 
 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
 
-#define QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET	0x310C
-
-#define QCA6390_CE_SRC_RING_REG_BASE		0xA00000
-#define QCA6390_CE_DST_RING_REG_BASE		0xA01000
-#define QCA6390_CE_COMMON_REG_BASE		0xA18000
-
-#define QCA6390_CE_SRC_RING_BASE_LSB_OFFSET	0x0
-#define QCA6390_CE_SRC_RING_BASE_MSB_OFFSET	0x4
-#define QCA6390_CE_SRC_RING_ID_OFFSET		0x8
-#define QCA6390_CE_SRC_RING_MISC_OFFSET		0x10
-#define QCA6390_CE_SRC_CTRL_OFFSET		0x58
-#define QCA6390_CE_SRC_R0_CE_CH_SRC_IS_OFFSET	0x5C
-#define QCA6390_CE_SRC_RING_HP_OFFSET		0x400
-#define QCA6390_CE_SRC_RING_TP_OFFSET		0x404
-
-#define QCA6390_CE_DEST_RING_BASE_LSB_OFFSET	0x0
-#define QCA6390_CE_DEST_RING_BASE_MSB_OFFSET	0x4
-#define QCA6390_CE_DEST_RING_ID_OFFSET		0x8
-#define QCA6390_CE_DEST_RING_MISC_OFFSET	0x10
-#define QCA6390_CE_DEST_CTRL_OFFSET		0xB0
-#define QCA6390_CE_CH_DST_IS_OFFSET		0xB4
-#define QCA6390_CE_CH_DEST_CTRL2_OFFSET		0xB8
-#define QCA6390_CE_DEST_RING_HP_OFFSET		0x400
-#define QCA6390_CE_DEST_RING_TP_OFFSET		0x404
-
-#define QCA6390_CE_STATUS_RING_BASE_LSB_OFFSET	0x58
-#define QCA6390_CE_STATUS_RING_BASE_MSB_OFFSET	0x5C
-#define QCA6390_CE_STATUS_RING_ID_OFFSET	0x60
-#define QCA6390_CE_STATUS_RING_MISC_OFFSET	0x68
-#define QCA6390_CE_STATUS_RING_HP_OFFSET	0x408
-#define QCA6390_CE_STATUS_RING_TP_OFFSET	0x40C
-
-#define QCA6390_CE_COMMON_GXI_ERR_INTS		0x14
-#define QCA6390_CE_COMMON_GXI_ERR_STATS		0x18
-#define QCA6390_CE_COMMON_GXI_WDOG_STATUS	0x2C
-#define QCA6390_CE_COMMON_TARGET_IE_0		0x48
-#define QCA6390_CE_COMMON_TARGET_IE_1		0x4C
-
-#define QCA6390_CE_REG_INTERVAL			0x2000
-
-#define SHADOW_REG_COUNT			36
-#define QCA6390_PCIE_SHADOW_REG_VALUE_0		0x8FC
-#define QCA6390_PCIE_SHADOW_REG_VALUE_34	0x984
-#define QCA6390_PCIE_SHADOW_REG_VALUE_35	0x988
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3	0x1F80118
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4	0x1F8011C
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5	0x1F80120
-
-#define SHADOW_REG_INTER_COUNT			43
-#define QCA6390_PCIE_SHADOW_REG_INTER_0		0x1E05000
-#define QCA6390_PCIE_SHADOW_REG_HUNG		0x1E050A8
-
-#define QDSS_APB_DEC_CSR_BASE			0x1C01000
-
-#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET	0x6C
-#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET	0x70
-#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET	0x74
-#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET	0x78
-
-#define MAX_UNWINDOWED_ADDRESS			0x80000
-#define WINDOW_ENABLE_BIT			0x40000000
-#define WINDOW_SHIFT				19
-#define WINDOW_VALUE_MASK			0x3F
-#define WINDOW_START				MAX_UNWINDOWED_ADDRESS
-#define WINDOW_RANGE_MASK			0x7FFFF
-
 #define FORCE_WAKE_DELAY_MIN_US			4000
 #define FORCE_WAKE_DELAY_MAX_US			6000
 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
 
-#define QCA6390_TIME_SYNC_ENABLE		0x80000000
-#define QCA6390_TIME_SYNC_CLEAR			0x0
-
 static struct cnss_pci_reg ce_src[] = {
 	{ "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET },
 	{ "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET },
@@ -178,6 +110,240 @@ static struct cnss_pci_reg qdss_csr[] = {
 	{ NULL },
 };
 
+static struct cnss_misc_reg wcss_reg_access_seq[] = {
+	{0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+	{1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
+	{0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
+	{1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
+	{0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+	{0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
+	{0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
+	{0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
+	{0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
+	{0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
+	{1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
+	{0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+	{1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+	{1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
+	{0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
+	{1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+	{0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+	{0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
+	{0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
+	{0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
+	{0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
+	{0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
+	{0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
+	{0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
+};
+
+static struct cnss_misc_reg pcie_reg_access_seq[] = {
+	{0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
+	{0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+	{1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
+	{0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+	{0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+	{0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
+	{0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
+	{0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
+	{0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
+	{0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
+	{0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
+	{0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
+	{0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
+	{0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
+	{0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
+	{0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
+	{0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+	{0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
+	{0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
+	{0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
+	{0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
+	{0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
+};
+
+static struct cnss_misc_reg wlaon_reg_access_seq[] = {
+	{0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
+	{0, QCA6390_WLAON_SOC_POWER_CTRL, 0},
+	{0, QCA6390_WLAON_PCIE_PWR_CTRL_REG, 0},
+	{0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
+	{0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
+	{0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
+	{0, QCA6390_WLAON_SOC_POWER_CTRL, 0},
+	{0, QCA6390_WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
+	{0, QCA6390_WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
+	{0, QCA6390_WLAON_SW_COLD_RESET, 0},
+	{0, QCA6390_WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
+	{0, QCA6390_WLAON_GDSC_DELAY_SETTING, 0},
+	{0, QCA6390_WLAON_GDSC_DELAY_SETTING2, 0},
+	{0, QCA6390_WLAON_WL_PWR_STATUS_REG, 0},
+	{0, QCA6390_WLAON_WL_AON_DBG_CFG_REG, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL1, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL6, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL7, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL3, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL4, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL8, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL2, 0},
+	{0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL9, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL1, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL2, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL3, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL4, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL5, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL6, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL7, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL8, 0},
+	{0, QCA6390_WLAON_RTC_CLK_CAL_CTRL9, 0},
+	{0, QCA6390_WLAON_WCSSAON_CONFIG_REG, 0},
+	{0, QCA6390_WLAON_WLAN_OEM_DEBUG_REG, 0},
+	{0, QCA6390_WLAON_WLAN_RAM_DUMP_REG, 0},
+	{0, QCA6390_WLAON_QDSS_WCSS_REG, 0},
+	{0, QCA6390_WLAON_QDSS_WCSS_ACK, 0},
+	{0, QCA6390_WLAON_WL_CLK_CNTL_KDF_REG, 0},
+	{0, QCA6390_WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
+	{0, QCA6390_WLAON_QFPROM_PWR_CTRL_REG, 0},
+	{0, QCA6390_WLAON_DLY_CONFIG, 0},
+	{0, QCA6390_WLAON_WLAON_Q6_IRQ_REG, 0},
+	{0, QCA6390_WLAON_PCIE_INTF_SW_CFG_REG, 0},
+	{0, QCA6390_WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
+	{0, QCA6390_WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
+	{0, QCA6390_WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
+	{0, QCA6390_WLAON_Q6_COOKIE_BIT, 0},
+	{0, QCA6390_WLAON_WARM_SW_ENTRY, 0},
+	{0, QCA6390_WLAON_RESET_DBG_SW_ENTRY, 0},
+	{0, QCA6390_WLAON_WL_PMUNOC_CFG_REG, 0},
+	{0, QCA6390_WLAON_RESET_CAUSE_CFG_REG, 0},
+	{0, QCA6390_WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
+	{0, QCA6390_WLAON_DEBUG, 0},
+	{0, QCA6390_WLAON_SOC_PARAMETERS, 0},
+	{0, QCA6390_WLAON_WLPM_SIGNAL, 0},
+	{0, QCA6390_WLAON_SOC_RESET_CAUSE_REG, 0},
+	{0, QCA6390_WLAON_WAKEUP_PCIE_SOC_REG, 0},
+	{0, QCA6390_WLAON_PBL_STACK_CANARY, 0},
+	{0, QCA6390_WLAON_MEM_TOT_NUM_GRP_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
+	{0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
+	{0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
+	{0, QCA6390_WLAON_MEM_CNT_SEL_REG, 0},
+	{0, QCA6390_WLAON_MEM_NO_EXTBHS_REG, 0},
+	{0, QCA6390_WLAON_MEM_DEBUG_REG, 0},
+	{0, QCA6390_WLAON_MEM_DEBUG_BUS_REG, 0},
+	{0, QCA6390_WLAON_MEM_REDUN_CFG_REG, 0},
+	{0, QCA6390_WLAON_WL_AON_SPARE2, 0},
+	{0, QCA6390_WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
+	{0, QCA6390_WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
+	{0, QCA6390_WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
+	{0, QCA6390_WLAON_WLPM_CHICKEN_BITS, 0},
+	{0, QCA6390_WLAON_PCIE_PHY_PWR_REG, 0},
+	{0, QCA6390_WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
+	{0, QCA6390_WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
+	{0, QCA6390_WLAON_POWERCTRL_PMU_REG, 0},
+	{0, QCA6390_WLAON_POWERCTRL_MEM_REG, 0},
+	{0, QCA6390_WLAON_PCIE_PWR_CTRL_REG, 0},
+	{0, QCA6390_WLAON_SOC_PWR_PROFILE_REG, 0},
+	{0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
+	{0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
+	{0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
+	{0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
+	{0, QCA6390_WLAON_MEM_SVS_CFG_REG, 0},
+	{0, QCA6390_WLAON_CMN_AON_MISC_REG, 0},
+	{0, QCA6390_WLAON_INTR_STATUS, 0},
+	{0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
+	{0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
+	{0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+	{0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+};
+
+#define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
+#define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
+#define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
+
 static int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
 {
 	u16 device_id;
@@ -1090,6 +1256,74 @@ int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
 	return 0;
 }
 
+static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
+				   struct cnss_misc_reg *misc_reg,
+				   u32 misc_reg_size,
+				   char *reg_name)
+{
+	int i;
+
+	if (!misc_reg)
+		return;
+
+	if (in_interrupt() || irqs_disabled())
+		return;
+
+	if (cnss_pci_check_link_status(pci_priv))
+		return;
+
+	cnss_pci_force_wake_get(pci_priv);
+
+	cnss_pr_dbg("start to dump %s registers\n", reg_name);
+
+	for (i = 0; i < misc_reg_size; i++) {
+		if (misc_reg[i].wr) {
+			if (misc_reg[i].offset ==
+			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
+			    i >= 1)
+				misc_reg[i].val =
+				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
+				misc_reg[i - 1].val;
+			if (cnss_pci_reg_write(pci_priv,
+					       misc_reg[i].offset,
+					       misc_reg[i].val))
+				goto force_wake_put;
+			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
+				     misc_reg[i].val,
+				     misc_reg[i].offset);
+
+		} else {
+			if (cnss_pci_reg_read(pci_priv,
+					      misc_reg[i].offset,
+					      &misc_reg[i].val))
+				goto force_wake_put;
+			cnss_pr_vdbg("Read 0x%X from 0x%X\n",
+				     misc_reg[i].val,
+				     misc_reg[i].offset);
+		}
+	}
+
+force_wake_put:
+	cnss_pci_force_wake_put(pci_priv);
+}
+
+static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
+{
+	if (in_interrupt() || irqs_disabled())
+		return;
+
+	if (cnss_pci_check_link_status(pci_priv))
+		return;
+
+	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
+	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
+			       pci_priv->wcss_reg_size, "wcss");
+	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
+			       pci_priv->pcie_reg_size, "pcie");
+	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
+			       pci_priv->wlaon_reg_size, "wlaon");
+}
+
 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
 {
 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
@@ -1626,9 +1860,16 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
 static bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
-	struct device_node *root_of_node = root_port->dev.of_node;
+	struct device_node *root_of_node;
 	bool drv_supported = false;
 
+	if (!root_port) {
+		cnss_pr_err("PCIe DRV is not supported as root port is null\n");
+		return drv_supported;
+	}
+
+	root_of_node = root_port->dev.of_node;
+
 	if (root_of_node->parent)
 		drv_supported = of_property_read_bool(root_of_node->parent,
 						      "qcom,drv-supported");
@@ -1691,6 +1932,8 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
 		break;
 	case MSM_PCIE_EVENT_DRV_DISCONNECT:
 		cnss_pr_dbg("DRV subsystem is disconnected\n");
+		if (cnss_pci_get_auto_suspended(pci_priv))
+			cnss_pci_pm_request_resume(pci_priv);
 		cnss_pci_set_drv_connected(pci_priv, 0);
 		break;
 	default:
@@ -1864,11 +2107,17 @@ static int cnss_pci_suspend(struct device *dev)
 	if (!cnss_is_device_powered_on(plat_priv))
 		goto out;
 
-	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
-
-	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
 		pci_priv->drv_connected_last =
 			cnss_pci_get_drv_connected(pci_priv);
+		if (!pci_priv->drv_connected_last) {
+			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+			ret = -EAGAIN;
+			goto out;
+		}
+	}
+
+	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
 
 	ret = cnss_pci_suspend_driver(pci_priv);
 	if (ret)
@@ -1988,11 +2237,16 @@ static int cnss_pci_runtime_suspend(struct device *dev)
 		return -EAGAIN;
 	}
 
-	cnss_pr_vdbg("Runtime suspend start\n");
-
-	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
 		pci_priv->drv_connected_last =
 			cnss_pci_get_drv_connected(pci_priv);
+		if (!pci_priv->drv_connected_last) {
+			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+			return -EAGAIN;
+		}
+	}
+
+	cnss_pr_vdbg("Runtime suspend start\n");
 
 	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
@@ -2091,46 +2345,74 @@ void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
 
 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
 {
-	struct pci_dev *pci_dev;
+	struct device *dev;
+	enum rpm_status status;
 
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
-	if (!pci_dev)
-		return -ENODEV;
+	dev = &pci_priv->pci_dev->dev;
 
-	return pm_request_resume(&pci_dev->dev);
+	status = dev->power.runtime_status;
+	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+			     (void *)_RET_IP_);
+
+	return pm_request_resume(dev);
 }
 
 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
 {
-	struct pci_dev *pci_dev;
+	struct device *dev;
+	enum rpm_status status;
 
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
-	if (!pci_dev)
-		return -ENODEV;
+	dev = &pci_priv->pci_dev->dev;
 
-	return pm_runtime_resume(&pci_dev->dev);
+	status = dev->power.runtime_status;
+	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+			     (void *)_RET_IP_);
+
+	return pm_runtime_resume(dev);
 }
 
 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv)
 {
+	struct device *dev;
+	enum rpm_status status;
+
 	if (!pci_priv)
 		return -ENODEV;
 
-	return pm_runtime_get(&pci_priv->pci_dev->dev);
+	dev = &pci_priv->pci_dev->dev;
+
+	status = dev->power.runtime_status;
+	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+			     (void *)_RET_IP_);
+
+	return pm_runtime_get(dev);
 }
 
 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv)
 {
+	struct device *dev;
+	enum rpm_status status;
+
 	if (!pci_priv)
 		return -ENODEV;
 
-	return pm_runtime_get_sync(&pci_priv->pci_dev->dev);
+	dev = &pci_priv->pci_dev->dev;
+
+	status = dev->power.runtime_status;
+	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+			     (void *)_RET_IP_);
+
+	return pm_runtime_get_sync(dev);
 }
 
 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv)
@@ -2138,7 +2420,7 @@ void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv)
 	if (!pci_priv)
 		return;
 
-	return pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
+	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
 }
 
 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv)
@@ -2496,6 +2778,7 @@ int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
 		return -ENODEV;
 
 	cnss_auto_resume(&pci_priv->pci_dev->dev);
+	cnss_pci_dump_misc_reg(pci_priv);
 	cnss_pci_dump_shadow_reg(pci_priv);
 
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
@@ -2622,6 +2905,10 @@ int cnss_smmu_map(struct device *dev,
 	unsigned long iova;
 	size_t len;
 	int ret = 0;
+	int flag = IOMMU_READ | IOMMU_WRITE;
+	struct pci_dev *root_port;
+	struct device_node *root_of_node;
+	bool dma_coherent = false;
 
 	if (!pci_priv)
 		return -ENODEV;
@@ -2644,9 +2931,19 @@ int cnss_smmu_map(struct device *dev,
 		return -ENOMEM;
 	}
 
+	root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
+	root_of_node = root_port->dev.of_node;
+	if (root_of_node->parent) {
+		dma_coherent = of_property_read_bool(root_of_node->parent,
+						     "dma-coherent");
+		cnss_pr_dbg("dma-coherent is %s\n",
+			    dma_coherent ? "enabled" : "disabled");
+		if (dma_coherent)
+			flag |= IOMMU_CACHE;
+	}
+
 	ret = iommu_map(pci_priv->iommu_domain, iova,
-			rounddown(paddr, PAGE_SIZE), len,
-			IOMMU_READ | IOMMU_WRITE);
+			rounddown(paddr, PAGE_SIZE), len, flag);
 	if (ret) {
 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
 		return ret;
@@ -3032,6 +3329,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
 	if (cnss_pci_check_link_status(pci_priv))
 		return;
 
+	cnss_pci_dump_misc_reg(pci_priv);
 	cnss_pci_dump_qdss_reg(pci_priv);
 
 	ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
@@ -3351,6 +3649,31 @@ static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
 	kfree(mhi_ctrl->irq);
 }
 
+static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
+{
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		pci_priv->wcss_reg = wcss_reg_access_seq;
+		pci_priv->wcss_reg_size = WCSS_REG_SIZE;
+		pci_priv->pcie_reg = pcie_reg_access_seq;
+		pci_priv->pcie_reg_size = PCIE_REG_SIZE;
+		pci_priv->wlaon_reg = wlaon_reg_access_seq;
+		pci_priv->wlaon_reg_size = WLAON_REG_SIZE;
+
+		/* Configure WDOG register with specific value so that we can
+		 * know if HW is in the process of WDOG reset recovery or not
+		 * when reading the registers.
+		 */
+		cnss_pci_reg_write
+		(pci_priv,
+		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
+		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
+		break;
+	default:
+		return;
+	}
+}
+
 static int cnss_pci_probe(struct pci_dev *pci_dev,
 			  const struct pci_device_id *id)
 {
@@ -3432,6 +3755,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
 			goto disable_bus;
 		}
 		cnss_pci_get_link_status(pci_priv);
+
+		cnss_pci_config_regs(pci_priv);
+
 		if (EMULATION_HW)
 			break;
 		ret = cnss_suspend_pci_link(pci_priv);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 0858b27..8dcb14a6 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -53,6 +53,12 @@ struct cnss_pci_debug_reg {
 	u32 val;
 };
 
+struct cnss_misc_reg {
+	u8 wr;
+	u32 offset;
+	u32 val;
+};
+
 struct cnss_pci_data {
 	struct pci_dev *pci_dev;
 	struct cnss_plat_data *plat_priv;
@@ -87,6 +93,12 @@ struct cnss_pci_data {
 	struct delayed_work time_sync_work;
 	u8 disable_pc;
 	struct cnss_pci_debug_reg *debug_reg;
+	struct cnss_misc_reg *wcss_reg;
+	u32 wcss_reg_size;
+	struct cnss_misc_reg *pcie_reg;
+	u32 pcie_reg_size;
+	struct cnss_misc_reg *wlaon_reg;
+	u32 wlaon_reg_size;
 };
 
 static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 3513d15..bb2bffc 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -775,12 +775,12 @@ int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
 	u32 i;
 	int ret = 0;
 
-	cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
-		    plat_priv->driver_state);
-
 	if (!plat_priv)
 		return -ENODEV;
 
+	cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
diff --git a/drivers/net/wireless/cnss2/reg.h b/drivers/net/wireless/cnss2/reg.h
new file mode 100644
index 0000000..4052de4
--- /dev/null
+++ b/drivers/net/wireless/cnss2/reg.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_REG_H
+#define _CNSS_REG_H
+
+#define QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET	0x310C
+
+#define QCA6390_CE_SRC_RING_REG_BASE		0xA00000
+#define QCA6390_CE_DST_RING_REG_BASE		0xA01000
+#define QCA6390_CE_COMMON_REG_BASE		0xA18000
+
+#define QCA6390_CE_SRC_RING_BASE_LSB_OFFSET	0x0
+#define QCA6390_CE_SRC_RING_BASE_MSB_OFFSET	0x4
+#define QCA6390_CE_SRC_RING_ID_OFFSET		0x8
+#define QCA6390_CE_SRC_RING_MISC_OFFSET		0x10
+#define QCA6390_CE_SRC_CTRL_OFFSET		0x58
+#define QCA6390_CE_SRC_R0_CE_CH_SRC_IS_OFFSET	0x5C
+#define QCA6390_CE_SRC_RING_HP_OFFSET		0x400
+#define QCA6390_CE_SRC_RING_TP_OFFSET		0x404
+
+#define QCA6390_CE_DEST_RING_BASE_LSB_OFFSET	0x0
+#define QCA6390_CE_DEST_RING_BASE_MSB_OFFSET	0x4
+#define QCA6390_CE_DEST_RING_ID_OFFSET		0x8
+#define QCA6390_CE_DEST_RING_MISC_OFFSET	0x10
+#define QCA6390_CE_DEST_CTRL_OFFSET		0xB0
+#define QCA6390_CE_CH_DST_IS_OFFSET		0xB4
+#define QCA6390_CE_CH_DEST_CTRL2_OFFSET		0xB8
+#define QCA6390_CE_DEST_RING_HP_OFFSET		0x400
+#define QCA6390_CE_DEST_RING_TP_OFFSET		0x404
+
+#define QCA6390_CE_STATUS_RING_BASE_LSB_OFFSET	0x58
+#define QCA6390_CE_STATUS_RING_BASE_MSB_OFFSET	0x5C
+#define QCA6390_CE_STATUS_RING_ID_OFFSET	0x60
+#define QCA6390_CE_STATUS_RING_MISC_OFFSET	0x68
+#define QCA6390_CE_STATUS_RING_HP_OFFSET	0x408
+#define QCA6390_CE_STATUS_RING_TP_OFFSET	0x40C
+
+#define QCA6390_CE_COMMON_GXI_ERR_INTS		0x14
+#define QCA6390_CE_COMMON_GXI_ERR_STATS		0x18
+#define QCA6390_CE_COMMON_GXI_WDOG_STATUS	0x2C
+#define QCA6390_CE_COMMON_TARGET_IE_0		0x48
+#define QCA6390_CE_COMMON_TARGET_IE_1		0x4C
+
+#define QCA6390_CE_REG_INTERVAL			0x2000
+
+#define SHADOW_REG_COUNT			36
+#define QCA6390_PCIE_SHADOW_REG_VALUE_0		0x8FC
+#define QCA6390_PCIE_SHADOW_REG_VALUE_34	0x984
+#define QCA6390_PCIE_SHADOW_REG_VALUE_35	0x988
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3	0x1F80118
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4	0x1F8011C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5	0x1F80120
+
+#define SHADOW_REG_INTER_COUNT			43
+#define QCA6390_PCIE_SHADOW_REG_INTER_0		0x1E05000
+#define QCA6390_PCIE_SHADOW_REG_HUNG		0x1E050A8
+
+#define QDSS_APB_DEC_CSR_BASE			0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET	0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET	0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET	0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET	0x78
+
+#define MAX_UNWINDOWED_ADDRESS			0x80000
+#define WINDOW_ENABLE_BIT			0x40000000
+#define WINDOW_SHIFT				19
+#define WINDOW_VALUE_MASK			0x3F
+#define WINDOW_START				MAX_UNWINDOWED_ADDRESS
+#define WINDOW_RANGE_MASK			0x7FFFF
+
+#define QCA6390_TIME_SYNC_ENABLE		0x80000000
+#define QCA6390_TIME_SYNC_CLEAR			0x0
+
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG 0x01E04234
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL 0xDEAD1234
+#define QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG 0x01E03140
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG 0x1E04054
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG 0x1E04058
+#define QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG 0x1E05090
+#define QCA6390_PCIE_PCIE_PARF_LTSSM 0x01E081B0
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS 0x01E08024
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS_1 0x01E08028
+#define QCA6390_PCIE_PCIE_PARF_INT_STATUS 0x01E08220
+#define QCA6390_PCIE_PCIE_INT_ALL_STATUS 0x01E08224
+#define QCA6390_PCIE_PCIE_INT_ALL_MASK 0x01E0822C
+#define QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG 0x01E0AC00
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4 0x01E08530
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3 0x01E0852c
+#define QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL 0x01E08174
+#define QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER 0x01E08178
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS 0x01E084D0
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG 0x01E084d4
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0x01E0ec88
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB 0x01E0ec08
+#define QCA6390_PCIE_PCIE_CORE_CONFIG 0x01E08640
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2 0x01E0EC04
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1 0x01E0EC0C
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0x01E0EC84
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH 0x01E030C8
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW 0x01E030CC
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH 0x01E0313C
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW 0x01E03140
+
+#define QCA6390_GCC_DEBUG_CLK_CTL 0x001E4025C
+
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE 0x00D00200
+#define QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL 0x00B60164
+#define QCA6390_WCSS_PMM_TOP_PMU_CX_CSR 0x00B70080
+#define QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT 0x00B700E0
+#define QCA6390_WCSS_PMM_TOP_AON_INT_EN 0x00B700D0
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS 0x00B70020
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL 0x00B7001C
+#define QCA6390_WCSS_PMM_TOP_TESTBUS_STS 0x00B70028
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG 0x00DB0008
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK 0x20
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL 0x00D02000
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE 0x00D02004
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS 0x00DB000C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL 0x00DB0030
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0 0x00DB0400
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9 0x00DB0424
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0 0x00D90380
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1 0x00D90384
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2 0x00D90388
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3 0x00D9038C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4 0x00D90390
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5 0x00D90394
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6 0x00D90398
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0 0x00D90100
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1 0x00D90104
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2 0x00D90108
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3 0x00D9010C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4 0x00D90110
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5 0x00D90114
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6 0x00D90118
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0 0x00D90500
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1 0x00D90504
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2 0x00D90508
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3 0x00D9050C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4 0x00D90510
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5 0x00D90514
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6 0x00D90518
+#define QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR 0x00C3029C
+#define QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR 0x00C302BC
+#define QCA6390_WCSS_CC_WCSS_UMAC_GDSCR 0x00C30298
+#define QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR 0x00C300C4
+#define QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR 0x00C30138
+#define QCA6390_WCSS_PMM_TOP_PMM_INT_CLR 0x00B70168
+#define QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN 0x00B700D8
+
+#define QCA6390_TLMM_GPIO_IN_OUT57 0x01839004
+#define QCA6390_TLMM_GPIO_INTR_CFG57 0x01839008
+#define QCA6390_TLMM_GPIO_INTR_STATUS57 0x0183900C
+#define QCA6390_TLMM_GPIO_IN_OUT59 0x0183b004
+#define QCA6390_TLMM_GPIO_INTR_CFG59 0x0183b008
+#define QCA6390_TLMM_GPIO_INTR_STATUS59 0x0183b00C
+
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2 0x00B6017C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2 0x00B60190
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1 0x00B6018C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1 0x00B60178
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1 0x00B600B0
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1 0x00B60044
+
+#define QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG 0x01F806C4
+#define QCA6390_WLAON_SOC_POWER_CTRL 0x01F80000
+#define QCA6390_WLAON_PCIE_PWR_CTRL_REG 0x01F806BC
+#define QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG 0x1F806C8
+#define QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG 0x1F806CC
+#define QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG 0x1F806D0
+#define QCA6390_WLAON_SOC_PWR_WDG_BARK_THRSHD 0x1F80004
+#define QCA6390_WLAON_SOC_PWR_WDG_BITE_THRSHD 0x1F80008
+#define QCA6390_WLAON_SW_COLD_RESET 0x1F8000C
+#define QCA6390_WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE 0x1F8001C
+#define QCA6390_WLAON_GDSC_DELAY_SETTING 0x1F80024
+#define QCA6390_WLAON_GDSC_DELAY_SETTING2 0x1F80028
+#define QCA6390_WLAON_WL_PWR_STATUS_REG 0x1F8002C
+#define QCA6390_WLAON_WL_AON_DBG_CFG_REG 0x1F80030
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL1 0x1F80100
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL6 0x1F80108
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL7 0x1F8010C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3 0x1F80118
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4 0x1F8011C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5 0x1F80120
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL8 0x1F801F0
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL2 0x1F801F4
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL9 0x1F801F8
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL1 0x1F80200
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL2 0x1F80204
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL3 0x1F80208
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL4 0x1F8020C
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL5 0x1F80210
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL6 0x1F80214
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL7 0x1F80218
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL8 0x1F8021C
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL9 0x1F80220
+#define QCA6390_WLAON_WCSSAON_CONFIG_REG 0x1F80300
+#define QCA6390_WLAON_WLAN_OEM_DEBUG_REG 0x1F80304
+#define QCA6390_WLAON_WLAN_RAM_DUMP_REG 0x1F80308
+#define QCA6390_WLAON_QDSS_WCSS_REG 0x1F8030C
+#define QCA6390_WLAON_QDSS_WCSS_ACK 0x1F80310
+#define QCA6390_WLAON_WL_CLK_CNTL_KDF_REG 0x1F80314
+#define QCA6390_WLAON_WL_CLK_CNTL_PMU_HFRC_REG 0x1F80318
+#define QCA6390_WLAON_QFPROM_PWR_CTRL_REG 0x1F8031C
+#define QCA6390_WLAON_DLY_CONFIG 0x1F80400
+#define QCA6390_WLAON_WLAON_Q6_IRQ_REG 0x1F80404
+#define QCA6390_WLAON_PCIE_INTF_SW_CFG_REG 0x1F80408
+#define QCA6390_WLAON_PCIE_INTF_STICKY_SW_CFG_REG 0x1F8040C
+#define QCA6390_WLAON_PCIE_INTF_PHY_SW_CFG_REG 0x1F80410
+#define QCA6390_WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG 0x1F80414
+#define QCA6390_WLAON_Q6_COOKIE_BIT 0x1F80500
+#define QCA6390_WLAON_WARM_SW_ENTRY 0x1F80504
+#define QCA6390_WLAON_RESET_DBG_SW_ENTRY 0x1F80508
+#define QCA6390_WLAON_WL_PMUNOC_CFG_REG 0x1F8050C
+#define QCA6390_WLAON_RESET_CAUSE_CFG_REG 0x1F80510
+#define QCA6390_WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG 0x1F80514
+#define QCA6390_WLAON_DEBUG 0x1F80600
+#define QCA6390_WLAON_SOC_PARAMETERS 0x1F80604
+#define QCA6390_WLAON_WLPM_SIGNAL 0x1F80608
+#define QCA6390_WLAON_SOC_RESET_CAUSE_REG 0x1F8060C
+#define QCA6390_WLAON_WAKEUP_PCIE_SOC_REG 0x1F80610
+#define QCA6390_WLAON_PBL_STACK_CANARY 0x1F80614
+#define QCA6390_WLAON_MEM_TOT_NUM_GRP_REG 0x1F80618
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP0_REG 0x1F8061C
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP1_REG 0x1F80620
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP2_REG 0x1F80624
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP3_REG 0x1F80628
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP0_REG 0x1F8062C
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP1_REG 0x1F80630
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP2_REG 0x1F80634
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP3_REG 0x1F80638
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG 0x1F8063C
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG 0x1F80640
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG 0x1F80644
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG 0x1F80648
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG 0x1F8064C
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG 0x1F80650
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG 0x1F80654
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG 0x1F80658
+#define QCA6390_WLAON_MEM_CNT_SEL_REG 0x1F8065C
+#define QCA6390_WLAON_MEM_NO_EXTBHS_REG 0x1F80660
+#define QCA6390_WLAON_MEM_DEBUG_REG 0x1F80664
+#define QCA6390_WLAON_MEM_DEBUG_BUS_REG 0x1F80668
+#define QCA6390_WLAON_MEM_REDUN_CFG_REG 0x1F8066C
+#define QCA6390_WLAON_WL_AON_SPARE2 0x1F80670
+#define QCA6390_WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG 0x1F80680
+#define QCA6390_WLAON_BTFM_WLAN_IPC_STATUS_REG 0x1F80690
+#define QCA6390_WLAON_MPM_COUNTER_CHICKEN_BITS 0x1F806A0
+#define QCA6390_WLAON_WLPM_CHICKEN_BITS 0x1F806A4
+#define QCA6390_WLAON_PCIE_PHY_PWR_REG 0x1F806A8
+#define QCA6390_WLAON_WL_CLK_CNTL_PMU_LPO2M_REG 0x1F806AC
+#define QCA6390_WLAON_WL_SS_ROOT_CLK_SWITCH_REG 0x1F806B0
+#define QCA6390_WLAON_POWERCTRL_PMU_REG 0x1F806B4
+#define QCA6390_WLAON_POWERCTRL_MEM_REG 0x1F806B8
+#define QCA6390_WLAON_SOC_PWR_PROFILE_REG 0x1F806C0
+#define QCA6390_WLAON_MEM_SVS_CFG_REG 0x1F806D4
+#define QCA6390_WLAON_CMN_AON_MISC_REG 0x1F806D8
+#define QCA6390_WLAON_INTR_STATUS 0x1F80700
+
+#define QCA6390_SYSPM_SYSPM_PWR_STATUS 0x1F82000
+#define QCA6390_SYSPM_DBG_BTFM_AON_REG 0x1F82004
+#define QCA6390_SYSPM_DBG_BUS_SEL_REG 0x1F82008
+#define QCA6390_SYSPM_WCSSAON_SR_STATUS 0x1F8200C
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 8b7d70e..3fe7605 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -724,7 +724,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 
 	for (i = 0; i < n_profiles; i++) {
 		/* the tables start at element 3 */
-		static int pos = 3;
+		int pos = 3;
 
 		/* The EWRD profiles officially go from 2 to 4, but we
 		 * save them in sar_profiles[1-3] (because we don't
@@ -836,6 +836,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+	/*
+	 * The GEO_TX_POWER_LIMIT command is not supported on earlier
+	 * firmware versions.  Unfortunately, we don't have a TLV API
+	 * flag to rely on, so rely on the major version which is in
+	 * the first byte of ucode_ver.  This was implemented
+	 * initially on version 38 and then backported to 36, 29 and
+	 * 17.
+	 */
+	return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+	       IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 {
 	struct iwl_geo_tx_power_profiles_resp *resp;
@@ -851,6 +867,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 		.data = { &geo_cmd },
 	};
 
+	if (!iwl_mvm_sar_geo_support(mvm))
+		return -EOPNOTSUPP;
+
 	ret = iwl_mvm_send_cmd(mvm, &cmd);
 	if (ret) {
 		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -876,13 +895,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 	int ret, i, j;
 	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
-	/*
-	 * This command is not supported on earlier firmware versions.
-	 * Unfortunately, we don't have a TLV API flag to rely on, so
-	 * rely on the major version which is in the first byte of
-	 * ucode_ver.
-	 */
-	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+	if (!iwl_mvm_sar_geo_support(mvm))
 		return 0;
 
 	ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 93f0d38..42fdb79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 					 DMA_TO_DEVICE);
 	}
 
+	meta->tbs = 0;
+
 	if (trans->cfg->use_tfh) {
 		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index b025ba1..e39bb5c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
 
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME	(MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
+#define WPA_GTK_OUI_OFFSET				2
 #define RSN_GTK_OUI_OFFSET				2
 
 #define MWIFIEX_OUI_NOT_PRESENT			0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 6dd771c..ed27147 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
 	u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
 	if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
-		iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+		iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+					    WPA_GTK_OUI_OFFSET);
 		oui = &mwifiex_wpa_oui[cipher][0];
 		ret = mwifiex_search_oui_in_ie(iebody, oui);
 		if (ret)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5081ff..1c84910 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
 			nskb = xenvif_alloc_skb(0);
 			if (unlikely(nskb == NULL)) {
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				xenvif_tx_err(queue, &txreq, extra_count, idx);
 				if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
 				/* Failure in xenvif_set_skb_gso is fatal. */
+				skb_shinfo(skb)->nr_frags = 0;
 				kfree_skb(skb);
 				kfree_skb(nskb);
 				break;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 2c8f425..7581eb8 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -808,7 +808,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
 	}
 
 	nci_reset_ntf = kzalloc(NCI_RESET_NTF_LEN + 1,  GFP_DMA | GFP_KERNEL);
-	if (!nci_reset_rsp) {
+	if (!nci_reset_ntf) {
 		ret = -ENOMEM;
 		goto done;
 	}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 260248fb..a11e210 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -20,11 +20,6 @@ module_param(multipath, bool, 0444);
 MODULE_PARM_DESC(multipath,
 	"turn on native support for multiple controllers per subsystem");
 
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
-{
-	return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
-}
-
 /*
  * If multipathing is enabled we need to always use the subsystem instance
  * number for numbering our devices to avoid conflicts between subsystems that
@@ -516,7 +511,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 {
 	int error;
 
-	if (!nvme_ctrl_use_ana(ctrl))
+	/* check if multipath is enabled and we have the capability */
+	if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
 		return 0;
 
 	ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e82cdae..d5e29b5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -464,7 +464,11 @@ extern const struct attribute_group nvme_ns_id_attr_group;
 extern const struct block_device_operations nvme_ns_head_ops;
 
 #ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+	return ctrl->ana_log_buf != NULL;
+}
+
 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
 			struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
index 88253ff..de03667 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -79,6 +79,7 @@
 #define UFS_PHY_RX_HSGEAR_CAPABILITY		PHY_OFF(0xB4)
 #define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x150)
 #define UFS_PHY_BIST_FIXED_PAT_CTRL		PHY_OFF(0x60)
+#define UFS_PHY_RX_SIGDET_CTRL1			PHY_OFF(0x154)
 
 /* UFS PHY TX registers */
 #define QSERDES_TX0_PWM_GEAR_1_DIVIDER_BAND0_1	TX_OFF(0, 0x168)
@@ -270,6 +271,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HS_GEAR_BAND, 0x06),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HSGEAR_CAPABILITY, 0x03),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_HSGEAR_CAPABILITY, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0E),
 };
 
 static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 0e3fc66..cac0034 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -486,12 +486,12 @@ static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
 	GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
 		ctx->base, ctx->end);
 
-	if (addr1 < ctx->base || addr1 >= ctx->end) {
+	if (unlikely(addr1 < ctx->base || addr1 >= ctx->end)) {
 		GSIERR("address = 0x%llx not in range\n", addr1);
 		GSI_ASSERT();
 	}
 
-	if (addr2 < ctx->base || addr2 >= ctx->end) {
+	if (unlikely(addr2 < ctx->base || addr2 >= ctx->end)) {
 		GSIERR("address = 0x%llx not in range\n", addr2);
 		GSI_ASSERT();
 	}
@@ -512,14 +512,14 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
 	uint64_t rp;
 
 	ch_id = evt->chid;
-	if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
+	if (unlikely(WARN_ON(ch_id >= gsi_ctx->max_ch))) {
 		GSIERR("Unexpected ch %d\n", ch_id);
 		return;
 	}
 
 	ch_ctx = &gsi_ctx->chan[ch_id];
-	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
-		ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
+	if (unlikely(WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ch_ctx->props.prot != GSI_CHAN_PROT_GCI)))
 		return;
 
 	if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
@@ -555,7 +555,7 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
 	notify->bytes_xfered = evt->len;
 
 	if (callback) {
-		if (atomic_read(&ch_ctx->poll_mode)) {
+		if (unlikely(atomic_read(&ch_ctx->poll_mode))) {
 			GSIERR("Calling client callback in polling mode\n");
 			WARN_ON(1);
 		}
@@ -633,7 +633,8 @@ static void gsi_handle_ieob(int ee)
 
 	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
 		if ((1 << i) & ch & msk) {
-			if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+			if (unlikely(i >= gsi_ctx->max_ev
+				|| i >= GSI_EVT_RING_MAX)) {
 				GSIERR("invalid event %d\n", i);
 				break;
 			}
@@ -646,7 +647,8 @@ static void gsi_handle_ieob(int ee)
 			if (ctx->props.intr == GSI_INTR_MSI)
 				continue;
 
-			if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
+			if (unlikely(ctx->props.intf !=
+				GSI_EVT_CHTYPE_GPI_EV)) {
 				GSIERR("Unexpected irq intf %d\n",
 					ctx->props.intf);
 				GSI_ASSERT();
@@ -780,7 +782,7 @@ static void gsi_handle_irq(void)
 		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
 			gsi_handle_general(ee);
 
-		if (++cnt > GSI_ISR_MAX_ITER) {
+		if (unlikely(++cnt > GSI_ISR_MAX_ITER)) {
 			/*
 			 * Max number of spurious interrupts from hardware.
 			 * Unexpected hardware state.
@@ -942,17 +944,17 @@ int gsi_complete_clk_grant(unsigned long dev_hdl)
 {
 	unsigned long flags;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (!gsi_ctx->per_registered) {
+	if (unlikely(!gsi_ctx->per_registered)) {
 		GSIERR("no client registered\n");
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
-	if (dev_hdl != (uintptr_t)gsi_ctx) {
+	if (unlikely(dev_hdl != (uintptr_t)gsi_ctx)) {
 		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
 				gsi_ctx);
 		return -GSI_STATUS_INVALID_PARAMS;
@@ -1871,19 +1873,19 @@ int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
 {
 	struct gsi_evt_ctx *ctx;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+	if (unlikely(evt_ring_hdl >= gsi_ctx->max_ev)) {
 		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
 	ctx = &gsi_ctx->evtr[evt_ring_hdl];
 
-	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+	if (unlikely(ctx->state != GSI_EVT_RING_STATE_ALLOCATED)) {
 		GSIERR("bad state %d\n",
 				gsi_ctx->evtr[evt_ring_hdl].state);
 		return -GSI_STATUS_UNSUPPORTED_OP;
@@ -1901,19 +1903,19 @@ int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
 	struct gsi_chan_ctx *ctx;
 	uint32_t val;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (chan_hdl >= gsi_ctx->max_ch) {
+	if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
 		GSIERR("bad chan_hdl=%lu\n", chan_hdl);
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
 	ctx = &gsi_ctx->chan[chan_hdl];
 
-	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+	if (unlikely(ctx->state != GSI_CHAN_STATE_STARTED)) {
 		GSIERR("bad state %d\n", ctx->state);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3384,8 +3386,18 @@ int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
 		}
 	}
 
-	/* TODO: Increase escape buffer size if we hit this */
-	GSIERR("user_data is full\n");
+	/* Go over original userdata when escape buffer is full (costly) */
+	GSIDBG("escape buffer is full\n");
+	for (i = 0; i < end; i++) {
+		if (!ctx->user_data[i].valid) {
+			ctx->user_data[i].valid = true;
+			return i;
+		}
+	}
+
+	/* Everything is full (possibly a stall) */
+	GSIERR("both userdata array and escape buffer is full\n");
+	BUG();
 	return 0xFFFF;
 }
 
@@ -3397,13 +3409,13 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
 	uint16_t idx;
 
 	memset(&gci_tre, 0, sizeof(gci_tre));
-	if (xfer->addr & 0xFFFFFF0000000000) {
+	if (unlikely(xfer->addr & 0xFFFFFF0000000000)) {
 		GSIERR("chan_hdl=%u add too large=%llx\n",
 			ctx->props.ch_id, xfer->addr);
 		return -EINVAL;
 	}
 
-	if (xfer->type != GSI_XFER_ELEM_DATA) {
+	if (unlikely(xfer->type != GSI_XFER_ELEM_DATA)) {
 		GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
 			xfer->type);
 		return -EINVAL;
@@ -3417,12 +3429,12 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
 	gci_tre.buf_len = xfer->len;
 	gci_tre.re_type = GSI_RE_COAL;
 	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
-	if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
+	if (unlikely(gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX)))
 		return -EPERM;
 
 	/* write the TRE to ring */
 	*tre_gci_ptr = gci_tre;
-	ctx->user_data[idx].p = xfer->xfer_user_data;
+	ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
 
 	return 0;
 }
@@ -3476,21 +3488,29 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
 	spinlock_t *slock;
 	unsigned long flags;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
+	if (unlikely(chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer))) {
 		GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
 				chan_hdl, num_xfers, xfer);
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
+	if (unlikely(gsi_ctx->chan[chan_hdl].state
+				 == GSI_CHAN_STATE_NOT_ALLOCATED)) {
+		GSIERR("bad state %d\n",
+			   gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+
 	ctx = &gsi_ctx->chan[chan_hdl];
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
-			ctx->props.prot != GSI_CHAN_PROT_GCI) {
+	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+			ctx->props.prot != GSI_CHAN_PROT_GCI)) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3512,7 +3532,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
 	 */
 	if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
 		__gsi_query_channel_free_re(ctx, &free);
-		if (num_xfers > free) {
+		if (unlikely(num_xfers > free)) {
 			GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
 				chan_hdl, num_xfers, free);
 			spin_unlock_irqrestore(slock, flags);
@@ -3532,7 +3552,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
 		gsi_incr_ring_wp(&ctx->ring);
 	}
 
-	if (i != num_xfers) {
+	if (unlikely(i != num_xfers)) {
 		/* reject all the xfers */
 		ctx->ring.wp_local = wp_rollback;
 		spin_unlock_irqrestore(slock, flags);
@@ -3609,13 +3629,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
 	int i;
 	unsigned long flags;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (chan_hdl >= gsi_ctx->max_ch || !notify ||
-	    !actual_num || expected_num <= 0) {
+	if (unlikely(chan_hdl >= gsi_ctx->max_ch || !notify ||
+	    !actual_num || expected_num <= 0)) {
 		GSIERR("bad params chan_hdl=%lu notify=%pK\n",
 			chan_hdl, notify);
 		GSIERR("actual_num=%pK expected_num=%d\n",
@@ -3626,13 +3646,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
 	ctx = &gsi_ctx->chan[chan_hdl];
 	ee = gsi_ctx->per.ee;
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
-		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI)) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	if (!ctx->evtr) {
+	if (unlikely(!ctx->evtr)) {
 		GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3690,25 +3710,25 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	unsigned long flags;
 	enum gsi_chan_mode chan_mode;
 
-	if (!gsi_ctx) {
+	if (unlikely(!gsi_ctx)) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return -GSI_STATUS_NODEV;
 	}
 
-	if (chan_hdl >= gsi_ctx->max_ch) {
+	if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
 		GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
 	ctx = &gsi_ctx->chan[chan_hdl];
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
-		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI)) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	if (!ctx->evtr || !ctx->evtr->props.exclusive) {
+	if (unlikely(!ctx->evtr || !ctx->evtr->props.exclusive)) {
 		GSIERR("cannot configure mode on chan_hdl=%lu\n",
 				chan_hdl);
 		return -GSI_STATUS_UNSUPPORTED_OP;
@@ -3719,8 +3739,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	else
 		curr = GSI_CHAN_MODE_CALLBACK;
 
-	if (mode == curr) {
-		GSIERR("already in requested mode %u chan_hdl=%lu\n",
+	if (unlikely(mode == curr)) {
+		GSIDBG("already in requested mode %u chan_hdl=%lu\n",
 				curr, chan_hdl);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3731,7 +3751,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
 			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
 		atomic_set(&ctx->poll_mode, mode);
-		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
 			ctx->evtr->id, mode);
@@ -3741,7 +3761,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
 		atomic_set(&ctx->poll_mode, mode);
-		if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
 			atomic_set(&ctx->evtr->chan->poll_mode, mode);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
 		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index ff70d64..f6fc8c7 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -584,7 +584,7 @@ static netdev_tx_t ecm_ipa_start_xmit
 	}
 
 	ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
-	if (ret) {
+	if (unlikely(ret)) {
 		ECM_IPA_DEBUG("Failed to activate PM client\n");
 		netif_stop_queue(net);
 		goto fail_pm_activate;
@@ -607,7 +607,7 @@ static netdev_tx_t ecm_ipa_start_xmit
 				, skb->protocol);
 
 	ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
-	if (ret) {
+	if (unlikely(ret)) {
 		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
 		goto fail_tx_packet;
 	}
@@ -642,7 +642,7 @@ static void ecm_ipa_packet_receive_notify
 	int result;
 	unsigned int packet_len;
 
-	if (!skb) {
+	if (unlikely(!skb)) {
 		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
 		return;
 	}
@@ -655,7 +655,7 @@ static void ecm_ipa_packet_receive_notify
 		return;
 	}
 
-	if (evt != IPA_RECEIVE)	{
+	if (unlikely(evt != IPA_RECEIVE))	{
 		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
 		return;
 	}
@@ -664,7 +664,7 @@ static void ecm_ipa_packet_receive_notify
 	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
 
 	result = netif_rx(skb);
-	if (result)
+	if (unlikely(result))
 		ECM_IPA_ERROR("fail on netif_rx\n");
 	ecm_ipa_ctx->net->stats.rx_packets++;
 	ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1129,12 +1129,12 @@ static void ecm_ipa_tx_complete_notify
 	struct sk_buff *skb = (struct sk_buff *)data;
 	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
 
-	if (!skb) {
+	if (unlikely(!skb)) {
 		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
 		return;
 	}
 
-	if (!ecm_ipa_ctx) {
+	if (unlikely(!ecm_ipa_ctx)) {
 		ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
 		return;
 	}
@@ -1144,7 +1144,7 @@ static void ecm_ipa_tx_complete_notify
 		skb->len, skb->protocol,
 		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
 
-	if (evt != IPA_WRITE_DONE) {
+	if (unlikely(evt != IPA_WRITE_DONE)) {
 		ECM_IPA_ERROR("unsupported event on Tx callback\n");
 		return;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index efdf97b..c74bbe1 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -800,7 +800,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
 	case ODU_BRIDGE_MODE_ROUTER:
 		/* Router mode - pass skb to IPA */
 		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
-		if (res) {
+		if (unlikely(res)) {
 			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
 			goto out;
 		}
@@ -813,7 +813,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
 		    ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
 			ODU_BRIDGE_DBG_LOW("QMI packet\n");
 			skb_copied = skb_clone(skb, GFP_KERNEL);
-			if (!skb_copied) {
+			if (unlikely(!skb_copied)) {
 				ODU_BRIDGE_ERR("No memory\n");
 				return -ENOMEM;
 			}
@@ -834,13 +834,13 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
 			ODU_BRIDGE_DBG_LOW(
 				"Multicast pkt, send to APPS and IPA\n");
 			skb_copied = skb_clone(skb, GFP_KERNEL);
-			if (!skb_copied) {
+			if (unlikely(!skb_copied)) {
 				ODU_BRIDGE_ERR("No memory\n");
 				return -ENOMEM;
 			}
 
 			res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
-			if (res) {
+			if (unlikely(res)) {
 				ODU_BRIDGE_DBG("tx dp failed %d\n", res);
 				dev_kfree_skb(skb_copied);
 				goto out;
@@ -855,7 +855,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
 		}
 
 		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
-		if (res) {
+		if (unlikely(res)) {
 			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
 			goto out;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index fd2eab5..3c2f3acf 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -940,7 +940,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 	}
 
 	ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
-	if (ret) {
+	if (unlikely(ret)) {
 		RNDIS_IPA_DEBUG("Failed activate PM client\n");
 		netif_stop_queue(net);
 		goto fail_pm_activate;
@@ -959,7 +959,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 	skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
 	trace_rndis_tx_dp(skb->protocol);
 	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
-	if (ret) {
+	if (unlikely(ret)) {
 		RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
 		goto fail_tx_packet;
 	}
@@ -1006,7 +1006,7 @@ static void rndis_ipa_tx_complete_notify(
 
 	ret = 0;
 	NULL_CHECK_RETVAL(private);
-	if (ret)
+	if (unlikely(ret))
 		return;
 
 	trace_rndis_status_rcvd(skb->protocol);
@@ -1120,7 +1120,7 @@ static void rndis_ipa_packet_receive_notify(
 		return;
 	}
 
-	if (evt != IPA_RECEIVE)	{
+	if (unlikely(evt != IPA_RECEIVE)) {
 		RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
 		return;
 	}
@@ -1140,7 +1140,7 @@ static void rndis_ipa_packet_receive_notify(
 
 	trace_rndis_netif_ni(skb->protocol);
 	result = netif_rx_ni(skb);
-	if (result)
+	if (unlikely(result))
 		RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
 	rndis_ipa_ctx->net->stats.rx_packets++;
 	rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1817,7 +1817,7 @@ static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
 	if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
 		struct sk_buff *new_skb = skb_copy_expand(skb,
 			sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
-		if (!new_skb) {
+		if (unlikely(!new_skb)) {
 			RNDIS_IPA_ERROR("no memory for skb expand\n");
 			return skb;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 77c0f2a..7bc80ac 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -55,6 +55,8 @@
 #include "ipa_trace.h"
 #include "ipa_odl.h"
 
+#define IPA_SUSPEND_BUSY_TIMEOUT (msecs_to_jiffies(10))
+
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
  */
@@ -118,7 +120,7 @@ static void ipa3_load_ipa_fw(struct work_struct *work);
 static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
 
 static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
-static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
+static DECLARE_DELAYED_WORK(ipa_dec_clients_disable_clks_on_wq_work,
 	ipa_dec_clients_disable_clks_on_wq);
 
 static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg);
@@ -4811,8 +4813,16 @@ static void __ipa3_dec_client_disable_clks(void)
 	ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
 	if (ret > 0)
 		goto unlock_mutex;
-	ipa3_suspend_apps_pipes(true);
-	ipa3_disable_clks();
+	ret = ipa3_suspend_apps_pipes(true);
+	if (ret) {
+		/* HW is busy, retry after some time */
+		atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+		queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+			&ipa_dec_clients_disable_clks_on_wq_work,
+			IPA_SUSPEND_BUSY_TIMEOUT);
+	} else {
+		ipa3_disable_clks();
+	}
 
 unlock_mutex:
 	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
@@ -4865,8 +4875,8 @@ void ipa3_dec_client_disable_clks_no_block(
 	}
 
 	/* seems like this is the only client holding the clocks */
-	queue_work(ipa3_ctx->power_mgmt_wq,
-		&ipa_dec_clients_disable_clks_on_wq_work);
+	queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+		&ipa_dec_clients_disable_clks_on_wq_work, 0);
 }
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index bd789b4..d0ee749 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -236,6 +236,10 @@ static void ipa3_send_nop_desc(struct work_struct *work)
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
 
 	IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+
+	if (atomic_read(&sys->workqueue_flushed))
+		return;
+
 	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
 	if (!tx_pkt) {
 		queue_work(sys->wq, &sys->work);
@@ -344,7 +348,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
 	for (i = 0; i < num_desc; i++) {
 		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
 					   GFP_ATOMIC);
-		if (!tx_pkt) {
+		if (unlikely(!tx_pkt)) {
 			IPAERR("failed to alloc tx wrapper\n");
 			result = -ENOMEM;
 			goto failure;
@@ -358,8 +362,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
 
 		/* populate tag field */
 		if (desc[i].is_tag_status) {
-			if (ipa_populate_tag_field(&desc[i], tx_pkt,
-				&tag_pyld_ret)) {
+			if (unlikely(ipa_populate_tag_field(&desc[i], tx_pkt,
+				&tag_pyld_ret))) {
 				IPAERR("Failed to populate tag field\n");
 				result = -EFAULT;
 				goto failure_dma_map;
@@ -399,7 +403,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
 				tx_pkt->no_unmap_dma = true;
 			}
 		}
-		if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+		if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+			tx_pkt->mem.phys_base))) {
 			IPAERR("failed to do dma map.\n");
 			result = -EFAULT;
 			goto failure_dma_map;
@@ -450,7 +455,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
 	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
 	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
 			gsi_xfer, true);
-	if (result != GSI_STATUS_SUCCESS) {
+	if (unlikely(result != GSI_STATUS_SUCCESS)) {
 		IPAERR_RL("GSI xfer failed.\n");
 		result = -EFAULT;
 		goto failure;
@@ -466,6 +471,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
 
 	/* set the timer for sending the NOP descriptor */
 	if (send_nop) {
+
 		ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
 
 		IPADBG_LOW("scheduling timer for ch %lu\n",
@@ -1268,6 +1274,8 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 	if (IPA_CLIENT_IS_CONS(ep->client))
 		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
 	flush_workqueue(ep->sys->wq);
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		atomic_set(&ep->sys->workqueue_flushed, 1);
 
 	/* tear down the default pipe before we reset the channel*/
 	if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
@@ -1490,7 +1498,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		return -EINVAL;
 	}
 
-	if (skb->len == 0) {
+	if (unlikely(skb->len == 0)) {
 		IPAERR("packet size is 0\n");
 		return -EINVAL;
 	}
@@ -1506,7 +1514,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 	 */
 	if (IPA_CLIENT_IS_CONS(dst)) {
 		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
-		if (-1 == src_ep_idx) {
+		if (unlikely(-1 == src_ep_idx)) {
 			IPAERR("Client %u is not mapped\n",
 				IPA_CLIENT_APPS_LAN_PROD);
 			goto fail_gen;
@@ -1514,7 +1522,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		dst_ep_idx = ipa3_get_ep_mapping(dst);
 	} else {
 		src_ep_idx = ipa3_get_ep_mapping(dst);
-		if (-1 == src_ep_idx) {
+		if (unlikely(-1 == src_ep_idx)) {
 			IPAERR("Client %u is not mapped\n", dst);
 			goto fail_gen;
 		}
@@ -1526,7 +1534,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 
 	sys = ipa3_ctx->ep[src_ep_idx].sys;
 
-	if (!sys || !sys->ep->valid) {
+	if (unlikely(!sys || !sys->ep->valid)) {
 		IPAERR_RL("pipe %d not valid\n", src_ep_idx);
 		goto fail_pipe_not_valid;
 	}
@@ -1547,7 +1555,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
 		max_desc -= gsi_ep->prefetch_threshold;
 	if (num_frags + 3 > max_desc) {
-		if (skb_linearize(skb)) {
+		if (unlikely(skb_linearize(skb))) {
 			IPAERR("Failed to linear skb with %d frags\n",
 				num_frags);
 			goto fail_gen;
@@ -1561,7 +1569,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		 * 1 desc for each frag
 		 */
 		desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
-		if (!desc) {
+		if (unlikely(!desc)) {
 			IPAERR("failed to alloc desc array\n");
 			goto fail_gen;
 		}
@@ -1623,7 +1631,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 			desc[skb_idx].callback = NULL;
 		}
 
-		if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
+		if (unlikely(ipa3_send(sys, num_frags + data_idx,
+		    desc, true))) {
 			IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
 				skb, num_frags);
 			goto fail_send;
@@ -1654,7 +1663,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 			desc[data_idx].dma_address = meta->dma_address;
 		}
 		if (num_frags == 0) {
-			if (ipa3_send(sys, data_idx + 1, desc, true)) {
+			if (unlikely(ipa3_send(sys, data_idx + 1,
+				 desc, true))) {
 				IPAERR("fail to send skb %pK HWP\n", skb);
 				goto fail_mem;
 			}
@@ -1673,8 +1683,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 			desc[data_idx+f].user2 = desc[data_idx].user2;
 			desc[data_idx].callback = NULL;
 
-			if (ipa3_send(sys, num_frags + data_idx + 1,
-				desc, true)) {
+			if (unlikely(ipa3_send(sys, num_frags + data_idx + 1,
+			    desc, true))) {
 				IPAERR("fail to send skb %pK num_frags %u\n",
 					skb, num_frags);
 				goto fail_mem;
@@ -1729,26 +1739,27 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
 begin:
 	while (1) {
 		next = (curr + 1) % sys->repl->capacity;
-		if (next == atomic_read(&sys->repl->head_idx))
+		if (unlikely(next == atomic_read(&sys->repl->head_idx)))
 			goto fail_kmem_cache_alloc;
 
 		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
 					   flag);
-		if (!rx_pkt)
+		if (unlikely(!rx_pkt))
 			goto fail_kmem_cache_alloc;
 
 		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
 		rx_pkt->sys = sys;
 
 		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
-		if (rx_pkt->data.skb == NULL)
+		if (unlikely(rx_pkt->data.skb == NULL))
 			goto fail_skb_alloc;
 
 		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 						     sys->rx_buff_sz,
 						     DMA_FROM_DEVICE);
-		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+		if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+		    rx_pkt->data.dma_addr))) {
 			pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
 			       __func__, (void *)rx_pkt->data.dma_addr,
 			       ptr, sys);
@@ -1802,8 +1813,8 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
 	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
 			rx_pkt->page_data.page, 0,
 			rx_pkt->len, DMA_FROM_DEVICE);
-	if (dma_mapping_error(ipa3_ctx->pdev,
-		rx_pkt->page_data.dma_addr)) {
+	if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+		rx_pkt->page_data.dma_addr))) {
 		pr_err_ratelimited("%s dma map fail %pK for %pK\n",
 			__func__, (void *)rx_pkt->page_data.dma_addr,
 			rx_pkt->page_data.page);
@@ -1829,7 +1840,7 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
 
 	for (curr = 0; curr < sys->repl->capacity; curr++) {
 		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
-		if (!rx_pkt) {
+		if (unlikely(!rx_pkt)) {
 			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
 			ipa_assert();
 			break;
@@ -1877,7 +1888,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 			rx_pkt = ipa3_alloc_rx_pkt_page(flag, true);
 			if (!rx_pkt && flag == GFP_ATOMIC)
 				break;
-			else if (!rx_pkt)
+			else if (unlikely(!rx_pkt))
 				goto fail_kmem_cache_alloc;
 			rx_pkt->sys = sys;
 		}
@@ -1901,7 +1912,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 		if (idx == IPA_REPL_XFER_MAX) {
 			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
 				gsi_xfer_elem_array, false);
-			if (ret != GSI_STATUS_SUCCESS) {
+			if (unlikely(ret != GSI_STATUS_SUCCESS)) {
 				/* we don't expect this will happen */
 				IPAERR("failed to provide buffer: %d\n", ret);
 				ipa_assert();
@@ -1913,7 +1924,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 	/* only ring doorbell once here */
 	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
 			gsi_xfer_elem_array, true);
-	if (ret == GSI_STATUS_SUCCESS) {
+	if (likely(ret == GSI_STATUS_SUCCESS)) {
 		/* ensure write is done before setting head index */
 		mb();
 		atomic_set(&sys->repl->head_idx, curr);
@@ -1971,7 +1982,7 @@ static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
 			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
 				&gsi_xfer_elem_one, true);
 
-			if (ret) {
+			if (unlikely(ret)) {
 				IPAERR("failed to provide buffer: %d\n", ret);
 				goto fail_provide_rx_buffer;
 			}
@@ -3134,14 +3145,14 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
 		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
 	}
 
-	if (notify->veid >= GSI_VEID_MAX) {
+	if (unlikely(notify->veid >= GSI_VEID_MAX)) {
 		WARN_ON(1);
 		return NULL;
 	}
 
 	/*Assesrt when WAN consumer channel receive EOB event*/
-	if (notify->evt_id == GSI_CHAN_EVT_EOB &&
-		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+	if (unlikely(notify->evt_id == GSI_CHAN_EVT_EOB &&
+		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)) {
 		IPAERR("EOB event received on WAN consumer channel\n");
 		ipa_assert();
 	}
@@ -3265,13 +3276,13 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
 	struct ipa3_sys_context *coal_sys;
 	int ipa_ep_idx;
 
-	if (!notify) {
+	if (unlikely(!notify)) {
 		IPAERR_RL("gsi_chan_xfer_notify is null\n");
 		return;
 	}
 	rx_skb = handle_skb_completion(notify, true);
 
-	if (rx_skb) {
+	if (likely(rx_skb)) {
 		sys->pyld_hdlr(rx_skb, sys);
 
 		/* For coalescing, we have 2 transfer rings to replenish */
@@ -3279,7 +3290,7 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
 			ipa_ep_idx = ipa3_get_ep_mapping(
 					IPA_CLIENT_APPS_WAN_CONS);
 
-			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			if (unlikely(ipa_ep_idx == IPA_EP_NOT_ALLOCATED)) {
 				IPAERR("Invalid client.\n");
 				return;
 			}
@@ -3339,8 +3350,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
 					 */
 					ipa_ep_idx = ipa3_get_ep_mapping(
 						IPA_CLIENT_APPS_WAN_CONS);
-					if (ipa_ep_idx ==
-						IPA_EP_NOT_ALLOCATED) {
+					if (unlikely(ipa_ep_idx ==
+						IPA_EP_NOT_ALLOCATED)) {
 						IPAERR("Invalid client.\n");
 						return;
 					}
@@ -3376,8 +3387,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
 				 */
 				ipa_ep_idx = ipa3_get_ep_mapping(
 						IPA_CLIENT_APPS_WAN_CONS);
-				if (ipa_ep_idx ==
-					IPA_EP_NOT_ALLOCATED) {
+				if (unlikely(ipa_ep_idx ==
+					IPA_EP_NOT_ALLOCATED)) {
 					IPAERR("Invalid client.\n");
 					return;
 				}
@@ -3514,6 +3525,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		sys->policy = IPA_POLICY_INTR_MODE;
 		sys->use_comm_evt_ring = true;
 		INIT_WORK(&sys->work, ipa3_send_nop_desc);
+		atomic_set(&sys->workqueue_flushed, 0);
 
 		/*
 		 * enable source notification status for exception packets
@@ -3543,6 +3555,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 			sys->policy = IPA_POLICY_INTR_MODE;
 			sys->use_comm_evt_ring = true;
 			INIT_WORK(&sys->work, ipa3_send_nop_desc);
+			atomic_set(&sys->workqueue_flushed, 0);
 		}
 	} else {
 		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
@@ -4165,7 +4178,7 @@ static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_sys_context *sys;
 
-	if (!notify) {
+	if (unlikely(!notify)) {
 		IPAERR("gsi notify is NULL.\n");
 		return;
 	}
@@ -4197,7 +4210,7 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_sys_context *sys;
 
-	if (!notify) {
+	if (unlikely(!notify)) {
 		IPAERR("gsi notify is NULL.\n");
 		return;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7bfe159..58c924f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -975,6 +975,7 @@ struct ipa3_repl_ctx {
 struct ipa3_sys_context {
 	u32 len;
 	atomic_t curr_polling_state;
+	atomic_t workqueue_flushed;
 	struct delayed_work switch_to_intr_work;
 	enum ipa3_sys_pipe_policy policy;
 	bool use_comm_evt_ring;
@@ -2914,7 +2915,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
 int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
 int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
 void ipa3_set_resorce_groups_min_max_limits(void);
-void ipa3_suspend_apps_pipes(bool suspend);
+int ipa3_suspend_apps_pipes(bool suspend);
 int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
 	enum ipa_ip_type ip_type,
 	bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 80a3e2a..e5896ec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -70,7 +70,7 @@
 #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
 #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
 #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
-
+#define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
 enum mhip_re_type {
 	MHIP_RE_XFER = 0x2,
 	MHIP_RE_NOP = 0x4,
@@ -424,8 +424,6 @@ static void ipa_mpm_change_teth_state(int probe_id,
 static void ipa_mpm_change_gsi_state(int probe_id,
 	enum ipa_mpm_mhip_chan mhip_chan,
 	enum ipa_mpm_gsi_state next_state);
-static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
-	enum ipa_mpm_start_stop_type start);
 static int ipa_mpm_probe(struct platform_device *pdev);
 static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 	int probe_id);
@@ -435,6 +433,7 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 	enum ipa_mpm_mhip_chan mhip_chan,
 	int probe_id,
 	enum ipa_mpm_start_stop_type start_stop);
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl);
 
 static struct mhi_driver mhi_driver = {
 	.id_table = mhi_driver_match_table,
@@ -500,6 +499,17 @@ static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
 	return result;
 }
 
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl)
+{
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.en = IPA_HOLB_TMR_EN;
+	/* 31 ms timer, which is less than tag timeout */
+	holb_cfg.tmr_val = IPA_MHIP_HOLB_TMO;
+	return ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+}
+
 /**
  * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
  * @va_addr: virtual address that needs to be mapped
@@ -532,7 +542,7 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
 
 	/* check cache coherent */
 	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
-		IPA_MPM_DBG(" enable cache coherent\n");
+		IPA_MPM_DBG_LOW(" enable cache coherent\n");
 		prot |= IOMMU_CACHE;
 	}
 
@@ -1034,6 +1044,21 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 		goto fail_alloc_channel;
 	}
 
+	if (IPA_CLIENT_IS_CONS(mhip_client)) {
+		/*
+		 * Enable HOLB timer one time after bootup/SSR.
+		 * The HOLB timeout drops the packets on MHIP if
+		 * there is a stall on MHIP TX pipe greater than
+		 * configured timeout.
+		 */
+		result = ipa_mpm_start_mhip_holb_tmo(ipa_ep_idx);
+		if (result) {
+			IPA_MPM_ERR("HOLB config failed for %d, fail = %d\n",
+				ipa_ep_idx, result);
+			goto fail_alloc_channel;
+		}
+	}
+
 	if (IPA_CLIENT_IS_PROD(mhip_client))
 		ipa_mpm_change_gsi_state(mhi_idx,
 			IPA_MPM_MHIP_CHAN_DL,
@@ -1326,11 +1351,9 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
 
 	get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
 
-	if (mhip_idx != IPA_MPM_MHIP_CH_ID_2) {
+	if (mhip_idx != IPA_MPM_MHIP_CH_ID_2)
 		/* For DPL, stop only DL channel */
-		ipa_mpm_start_stop_ul_mhip_data_path(mhip_idx, MPM_MHIP_STOP);
 		ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
-	}
 
 	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
 
@@ -1713,13 +1736,6 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		case MHIP_STATUS_SUCCESS:
 			ipa_mpm_ctx->md[probe_id].teth_state =
 						IPA_MPM_TETH_CONNECTED;
-			ret = ipa_mpm_start_stop_ul_mhip_data_path(
-						probe_id, MPM_MHIP_START);
-			if (ret) {
-				IPA_MPM_ERR("UL chan start err = %d\n", ret);
-				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
-				return ret;
-			}
 			break;
 		case MHIP_STATUS_EP_NOT_READY:
 		case MHIP_STATUS_NO_OP:
@@ -1767,8 +1783,6 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		switch (status) {
 		case MHIP_STATUS_SUCCESS:
 			ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
-			ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
-							MPM_MHIP_STOP);
 			break;
 		case MHIP_STATUS_NO_OP:
 		case MHIP_STATUS_EP_NOT_READY:
@@ -1881,64 +1895,6 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
 		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
 }
 
-static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
-	enum ipa_mpm_start_stop_type start)
-{
-	int ipa_ep_idx;
-	int res = 0;
-	enum ipa_client_type ul_chan, dl_chan;
-
-	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
-		IPA_MPM_ERR("Unknown probe_id\n");
-		return 0;
-	}
-	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
-	IPA_MPM_DBG("Start/Stop Data Path ? = %d\n", start);
-
-
-	/* MHIP Start Data path:
-	 * IPA MHIP Producer: remove HOLB
-	 * IPA MHIP Consumer : no op as there is no delay on these pipes.
-	 */
-	if (start) {
-		IPA_MPM_DBG("Enabling data path\n");
-		if (ul_chan != IPA_CLIENT_MAX) {
-			/* Remove HOLB on the producer pipe */
-			IPA_MPM_DBG("Removing HOLB on ep = %s\n",
-				__stringify(ul_chan));
-			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
-
-			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
-				IPAERR("failed to get idx");
-				return ipa_ep_idx;
-			}
-
-			res = ipa3_enable_data_path(ipa_ep_idx);
-			if (res)
-				IPA_MPM_ERR("Enable data path failed res=%d\n",
-					res);
-		}
-	} else {
-		IPA_MPM_DBG("Disabling data path\n");
-		if (ul_chan != IPA_CLIENT_MAX) {
-			/* Set HOLB on the producer pipe */
-			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
-
-			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
-				IPAERR("failed to get idx");
-				return ipa_ep_idx;
-			}
-
-			res = ipa3_disable_data_path(ipa_ep_idx);
-			if (res)
-				IPA_MPM_ERR("disable data path failed res=%d\n",
-					res);
-		}
-	}
-
-	return res;
-}
-
 /* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
  * Currently we have 4 MHI channels.
  */
@@ -2294,12 +2250,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 			/* No teth started yet, disable UL channel */
 			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
 						probe_id, MPM_MHIP_STOP);
-			/* Disable data path */
-			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
-				MPM_MHIP_STOP)) {
-				IPA_MPM_ERR("MHIP Enable data path failed\n");
-				goto fail_start_channel;
-			}
 		}
 		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
 		break;
@@ -2308,14 +2258,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		IPA_MPM_DBG("UL channel is already started, continue\n");
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
 
-		/* Enable data path */
-		if (ul_prod != IPA_CLIENT_MAX) {
-			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
-				MPM_MHIP_START)) {
-				IPA_MPM_ERR("MHIP Enable data path failed\n");
-				goto fail_start_channel;
-			}
-		}
 		/* Lift the delay for rmnet USB prod pipe */
 		if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
 			pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
@@ -2477,8 +2419,8 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
 	case MHI_CB_PENDING_DATA:
 	case MHI_CB_SYS_ERROR:
 	case MHI_CB_FATAL_ERROR:
-	case MHI_CB_BW_REQ:
 	case MHI_CB_EE_MISSION_MODE:
+	case MHI_CB_DTR_SIGNAL:
 		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
 		break;
 	}
@@ -2596,8 +2538,6 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	case MHIP_STATUS_SUCCESS:
 	case MHIP_STATUS_NO_OP:
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
-		ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
-						MPM_MHIP_START);
 
 		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
 
@@ -2717,7 +2657,6 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 	case MHIP_STATUS_NO_OP:
 	case MHIP_STATUS_EP_NOT_READY:
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
-		ipa_mpm_start_stop_ul_mhip_data_path(probe_id, MPM_MHIP_STOP);
 		break;
 	case MHIP_STATUS_FAIL:
 	case MHIP_STATUS_BAD_STATE:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index c73f32c..f35abd0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -971,12 +971,13 @@ static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
  */
 int ipa_pm_activate(u32 hdl)
 {
-	if (ipa_pm_ctx == NULL) {
+	if (unlikely(ipa_pm_ctx == NULL)) {
 		IPA_PM_ERR("PM_ctx is null\n");
 		return -EINVAL;
 	}
 
-	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+	if (unlikely(hdl >= IPA_PM_MAX_CLIENTS ||
+		ipa_pm_ctx->clients[hdl] == NULL)) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
 	}
@@ -993,12 +994,13 @@ int ipa_pm_activate(u32 hdl)
  */
 int ipa_pm_activate_sync(u32 hdl)
 {
-	if (ipa_pm_ctx == NULL) {
+	if (unlikely(ipa_pm_ctx == NULL)) {
 		IPA_PM_ERR("PM_ctx is null\n");
 		return -EINVAL;
 	}
 
-	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+	if (unlikely(hdl >= IPA_PM_MAX_CLIENTS ||
+		ipa_pm_ctx->clients[hdl] == NULL)) {
 		IPA_PM_ERR("Invalid Param\n");
 		return -EINVAL;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 983ff16..d6a057b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -3050,7 +3050,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
 	}
 
 	for (i = 0; i < num_buffers; i++) {
-		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+		IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
 			&info[i].pa, info[i].iova, info[i].size);
 		info[i].result = ipa3_iommu_map(cb->iommu_domain,
 			rounddown(info[i].iova, PAGE_SIZE),
@@ -3080,7 +3080,7 @@ int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
 	}
 
 	for (i = 0; i < num_buffers; i++) {
-		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+		IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
 			&info[i].pa, info[i].iova, info[i].size);
 		info[i].result = iommu_unmap(cb->iommu_domain,
 			rounddown(info[i].iova, PAGE_SIZE),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 197e99b..1eea72d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -39,7 +39,7 @@
 #define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
 #define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
 
-#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+#define IPA_MAX_HOLB_TMR_VAL (4294967296 - 1)
 
 #define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
 #define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
@@ -3898,8 +3898,8 @@ static void ipa_cfg_qtime(void)
 
 	/* Configure timestamp resolution */
 	memset(&ts_cfg, 0, sizeof(ts_cfg));
-	ts_cfg.dpl_timestamp_lsb = 0;
-	ts_cfg.dpl_timestamp_sel = false; /* DPL: use legacy 1ms resolution */
+	ts_cfg.dpl_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+	ts_cfg.dpl_timestamp_sel = true;
 	ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
 	ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT;
 	val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
@@ -4037,18 +4037,18 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
 	int ipa_ep_idx;
 	u8 hw_idx = ipa3_get_hw_type_index();
 
-	if (client >= IPA_CLIENT_MAX || client < 0) {
+	if (unlikely(client >= IPA_CLIENT_MAX || client < 0)) {
 		IPAERR_RL("Bad client number! client =%d\n", client);
 		return IPA_EP_NOT_ALLOCATED;
 	}
 
-	if (!ipa3_ep_mapping[hw_idx][client].valid)
+	if (unlikely(!ipa3_ep_mapping[hw_idx][client].valid))
 		return IPA_EP_NOT_ALLOCATED;
 
 	ipa_ep_idx =
 		ipa3_ep_mapping[hw_idx][client].ipa_gsi_ep_info.ipa_ep_num;
-	if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
-		&& client != IPA_CLIENT_DUMMY_CONS))
+	if (unlikely(ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
+		&& client != IPA_CLIENT_DUMMY_CONS)))
 		return IPA_EP_NOT_ALLOCATED;
 
 	return ipa_ep_idx;
@@ -4066,7 +4066,7 @@ const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	int ep_idx;
 
 	ep_idx = ipa3_get_ep_mapping(client);
-	if (ep_idx == IPA_EP_NOT_ALLOCATED)
+	if (unlikely(ep_idx == IPA_EP_NOT_ALLOCATED))
 		return NULL;
 
 	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
@@ -5986,6 +5986,7 @@ int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
 	ctrl->ipa_init_sram = _ipa_init_sram_v3;
 	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
 	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+	ctrl->max_holb_tmr_val = IPA_MAX_HOLB_TMR_VAL;
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 		ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
@@ -7485,12 +7486,10 @@ void ipa3_set_resorce_groups_min_max_limits(void)
 	IPADBG("EXIT\n");
 }
 
-static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
+static bool ipa3_gsi_channel_is_quite(struct ipa3_ep_context *ep)
 {
 	bool empty;
 
-	IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
-	gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
 	gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
 	if (!empty) {
 		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
@@ -7499,6 +7498,7 @@ static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
 		if (!atomic_read(&ep->sys->curr_polling_state))
 			__ipa_gsi_irq_rx_scedule_poll(ep->sys);
 	}
+	return empty;
 }
 
 static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
@@ -7624,141 +7624,167 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
 	return res;
 }
 
-void ipa3_suspend_apps_pipes(bool suspend)
+static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
-	struct ipa_ep_cfg_ctrl cfg;
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
 	int res;
 
-	memset(&cfg, 0, sizeof(cfg));
-	cfg.ipa_ep_suspend = suspend;
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR("not supported\n");
+		return -EPERM;
+	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
-		IPAERR("IPA client mapping failed\n");
+		IPADBG("client %d not configued\n", client);
+		return 0;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid)
+		return 0;
+
+	IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx);
+	/*
+	 * move the channel to callback mode.
+	 * This needs to happen before starting the channel to make
+	 * sure we don't loose any interrupt
+	 */
+	if (!suspend && !atomic_read(&ep->sys->curr_polling_state) &&
+		!IPA_CLIENT_IS_APPS_PROD(client))
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+					GSI_CHAN_MODE_CALLBACK);
+
+	if (suspend) {
+		res = __ipa3_stop_gsi_channel(ipa_ep_idx);
+		if (res) {
+			IPAERR("failed to stop LAN channel\n");
+			ipa_assert();
+		}
+	} else {
+		res = gsi_start_channel(ep->gsi_chan_hdl);
+		if (res) {
+			IPAERR("failed to start LAN channel\n");
+			ipa_assert();
+		}
+	}
+
+	/* Apps prod pipes use common event ring so cannot configure mode*/
+	if (IPA_CLIENT_IS_APPS_PROD(client))
+		return 0;
+
+	if (suspend) {
+		IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+		gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+		if (!ipa3_gsi_channel_is_quite(ep))
+			return -EAGAIN;
+	} else if (!atomic_read(&ep->sys->curr_polling_state)) {
+		IPADBG("switch ch %ld to callback\n", ep->gsi_chan_hdl);
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+	}
+
+	return 0;
+}
+
+void ipa3_force_close_coal(void)
+{
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	struct ipa3_desc desc;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
+		return;
+
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
 		ipa_assert();
 		return;
 	}
-	ep = &ipa3_ctx->ep[ipa_ep_idx];
-	if (ep->valid) {
-		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
-			ipa_ep_idx);
-		/*
-		 * move the channel to callback mode.
-		 * This needs to happen before starting the channel to make
-		 * sure we don't loose any interrupt
-		 */
-		if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
-			gsi_config_channel_mode(ep->gsi_chan_hdl,
-				GSI_CHAN_MODE_CALLBACK);
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
 
-		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-			if (suspend) {
-				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
-				if (res) {
-					IPAERR("failed to stop LAN channel\n");
-					ipa_assert();
-				}
-			} else {
-				res = gsi_start_channel(ep->gsi_chan_hdl);
-				if (res) {
-					IPAERR("failed to start LAN channel\n");
-					ipa_assert();
-				}
-			}
-		} else {
-			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+	IPADBG("Sending 1 descriptor for coal force close\n");
+	if (ipa3_send_cmd_timeout(1, &desc,
+		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+		IPAERR("ipa3_send_cmd failed\n");
+		ipa_assert();
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+}
+
+int ipa3_suspend_apps_pipes(bool suspend)
+{
+	int res;
+	enum ipa_client_type client;
+
+	if (suspend)
+		ipa3_force_close_coal();
+
+	for (client = 0; client < IPA_CLIENT_MAX; client++) {
+		if (IPA_CLIENT_IS_APPS_CONS(client)) {
+			res = _ipa_suspend_resume_pipe(client, suspend);
+			if (res)
+				goto undo_cons;
 		}
-		if (suspend)
-			ipa3_gsi_poll_after_suspend(ep);
 	}
 
-	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
-	/* Considering the case for SSR. */
-	if (ipa_ep_idx == -1) {
-		IPADBG("Invalid mapping for IPA_CLIENT_APPS_WAN_CONS\n");
-		return;
-	}
-	ep = &ipa3_ctx->ep[ipa_ep_idx];
-	if (ep->valid) {
-		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
-			ipa_ep_idx);
-		/*
-		 * move the channel to callback mode.
-		 * This needs to happen before starting the channel to make
-		 * sure we don't loose any interrupt
-		 */
-		if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
-			gsi_config_channel_mode(ep->gsi_chan_hdl,
-				GSI_CHAN_MODE_CALLBACK);
-		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-			if (suspend) {
-				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
-				if (res) {
-					IPAERR("failed to stop WAN channel\n");
-					ipa_assert();
-				}
-			} else if (!atomic_read(&ipa3_ctx->is_ssr)) {
-				/* If SSR was alreday started not required to
-				 * start WAN channel,Because in SSR will stop
-				 * channel and reset the channel.
-				 */
-				res = gsi_start_channel(ep->gsi_chan_hdl);
-				if (res) {
-					IPAERR("failed to start WAN channel\n");
-					ipa_assert();
-				}
-			}
-		} else {
-			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+	if (suspend) {
+		struct ipahal_reg_tx_wrapper tx;
+		int ep_idx;
+
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		if (ep_idx == IPA_EP_NOT_ALLOCATED ||
+				(!ipa3_ctx->ep[ep_idx].valid))
+			goto do_prod;
+
+		ipahal_read_reg_fields(IPA_STATE_TX_WRAPPER, &tx);
+		if (tx.coal_slave_open_frame != 0) {
+			IPADBG("COAL frame is open 0x%x\n",
+				tx.coal_slave_open_frame);
+			goto undo_cons;
 		}
-		if (suspend)
-			ipa3_gsi_poll_after_suspend(ep);
+
+		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+		res = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
+			ipa3_ctx->ee);
+		if (res) {
+			IPADBG("suspend irq is pending 0x%x\n", res);
+			goto undo_cons;
+		}
+	}
+do_prod:
+	for (client = 0; client < IPA_CLIENT_MAX; client++) {
+		if (IPA_CLIENT_IS_APPS_PROD(client)) {
+			res = _ipa_suspend_resume_pipe(client, suspend);
+			if (res)
+				goto undo_prod;
+		}
 	}
 
-	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
-	/* Considering the case for SSR. */
-	if (ipa_ep_idx == -1) {
-		IPADBG("Invalid mapping for IPA_CLIENT_ODL_DPL_CONS\n");
-		return;
-	}
-	ep = &ipa3_ctx->ep[ipa_ep_idx];
-	if (ep->valid) {
-		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
-			ipa_ep_idx);
-		/*
-		 * move the channel to callback mode.
-		 * This needs to happen before starting the channel to make
-		 * sure we don't loose any interrupt
-		 */
-		if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
-			gsi_config_channel_mode(ep->gsi_chan_hdl,
-			GSI_CHAN_MODE_CALLBACK);
-		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-			if (suspend) {
-				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
-				if (res) {
-					IPAERR("failed to stop ODL channel\n");
-					ipa_assert();
-				}
-			} else if (!atomic_read(&ipa3_ctx->is_ssr)) {
-				/* If SSR was alreday started not required to
-				 * start WAN channel,Because in SSR will stop
-				 * channel and reset the channel.
-				 */
-				res = gsi_start_channel(ep->gsi_chan_hdl);
-				if (res) {
-					IPAERR("failed to start ODL channel\n");
-					ipa_assert();
-				}
-			}
-		} else {
-			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
-		}
-		if (suspend)
-			ipa3_gsi_poll_after_suspend(ep);
-	}
+	return 0;
+undo_prod:
+	for (client--; client < IPA_CLIENT_MAX && client >= 0; client--)
+		if (IPA_CLIENT_IS_APPS_PROD(client))
+			_ipa_suspend_resume_pipe(client, !suspend);
+	client = IPA_CLIENT_MAX;
+undo_cons:
+	for (client--; client < IPA_CLIENT_MAX && client >= 0; client--)
+		if (IPA_CLIENT_IS_APPS_CONS(client))
+			_ipa_suspend_resume_pipe(client, !suspend);
+	return res;
 }
 
 int ipa3_allocate_dma_task_for_gsi(void)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 6bf24c86..440788f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1180,6 +1180,112 @@ static void ipareg_parse_comp_cfg_v4_5(
 		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5);
 }
 
+static void ipareg_parse_state_tx_wrapper_v4_5(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_prod_prod_bresp_toggle_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_mbim_pkt_fms_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK);
+
+	tx->mbim_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK);
+
+	tx->trnseq_force_valid = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK);
+
+	tx->pkt_drop_cnt_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK);
+
+	tx->nlo_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK);
+
+	tx->coal_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK);
+}
+
+static void ipareg_parse_state_tx_wrapper_v4_7(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7);
+}
+
 static void ipareg_construct_qcncm(
 	enum ipahal_reg_name reg, const void *fields, u32 *val)
 {
@@ -2968,6 +3074,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	[IPA_HW_v4_5][IPA_COMP_CFG] = {
 		ipareg_construct_comp_cfg_v4_5, ipareg_parse_comp_cfg_v4_5,
 		0x0000003C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_5,
+		0x00000090, 0, 0, 0, 1 },
 	[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
 		-1, 0, 0, 0, 0},
@@ -3167,6 +3276,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	[IPA_HW_v4_5][IPA_COAL_QMAP_CFG] = {
 		ipareg_construct_coal_qmap_cfg, ipareg_parse_coal_qmap_cfg,
 		0x00001810, 0, 0, 0, 0},
+	[IPA_HW_v4_7][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_7,
+		0x00000090, 0, 0, 0, 1 },
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 9fa862c..5191d3b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -380,6 +380,27 @@ struct ipahal_reg_comp_cfg {
 };
 
 /*
+ * struct ipahal_reg_tx_wrapper- IPA TX Wrapper state information
+ */
+struct ipahal_reg_tx_wrapper {
+	bool tx0_idle;
+	bool tx1_idle;
+	bool ipa_prod_ackmngr_db_empty;
+	bool ipa_prod_ackmngr_state_idle;
+	bool ipa_prod_prod_bresp_empty;
+	bool ipa_prod_prod_bresp_toggle_idle;
+	bool ipa_mbim_pkt_fms_idle;
+	u8 mbim_direct_dma;
+	bool trnseq_force_valid;
+	bool pkt_drop_cnt_idle;
+	u8 nlo_direct_dma;
+	u8 coal_direct_dma;
+	bool coal_slave_idle;
+	bool coal_slave_ctx_idle;
+	u8 coal_slave_open_frame;
+};
+
+/*
  * struct ipa_hash_tuple - Hash tuple members for flt and rt
  *  the fields tells if to be masked or not
  * @src_id: pipe number for flt, table index for rt
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 119ad8c..d934b91 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -626,5 +626,52 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
 #define IPA_COAL_QMAP_CFG_BMSK 0x1
 #define IPA_COAL_QMAP_CFG_SHFT 0
 
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x1f
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x100000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7 28
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7 0x80000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7 19
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7 0x40000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7 18
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7 4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7 3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7 2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7 1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7 0
 
 #endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 37246e1..8c38ac6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1228,7 +1228,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
 	unsigned long flags;
 
-	if (rmnet_ipa3_ctx->ipa_config_is_apq) {
+	if (unlikely(rmnet_ipa3_ctx->ipa_config_is_apq)) {
 		IPAWANERR_RL("IPA embedded data on APQ platform\n");
 		dev_kfree_skb_any(skb);
 		dev->stats.tx_dropped++;
@@ -1295,7 +1295,8 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 		spin_unlock_irqrestore(&wwan_ptr->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
-	if (ret) {
+
+	if (unlikely(ret)) {
 		IPAWANERR("[%s] fatal: ipa pm activate failed %d\n",
 		       dev->name, ret);
 		dev_kfree_skb_any(skb);
@@ -1318,7 +1319,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
 	 * IPA_CLIENT_Q6_WAN_CONS based on status configuration
 	 */
 	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
-	if (ret) {
+	if (unlikely(ret)) {
 		atomic_dec(&wwan_ptr->outstanding_pkts);
 		if (ret == -EPIPE) {
 			IPAWANERR_RL("[%s] fatal: pipe is not valid\n",
@@ -1419,7 +1420,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
 {
 	struct net_device *dev = (struct net_device *)priv;
 
-	if (evt == IPA_RECEIVE) {
+	if (likely(evt == IPA_RECEIVE)) {
 		struct sk_buff *skb = (struct sk_buff *)data;
 		int result;
 		unsigned int packet_len = skb->len;
@@ -1442,7 +1443,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
 			}
 		}
 
-		if (result)	{
+		if (unlikely(result))	{
 			pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
 							   __func__, __LINE__);
 			dev->stats.rx_dropped++;
@@ -3588,36 +3589,42 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 	data->ipv6_tx_bytes +=
 		con_stats->client[index].num_ipv6_bytes;
 
-	/* query WIGIG UL stats */
-	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
-	rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats, reset);
-	if (rc) {
-		IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
-		kfree(con_stats);
-		return rc;
+	if (ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD) !=
+			IPA_EP_NOT_ALLOCATED) {
+		/* query WIGIG UL stats */
+		memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
+		rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats,
+									reset);
+		if (rc) {
+			IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
+			kfree(con_stats);
+			return rc;
+		}
+
+		if (rmnet_ipa3_ctx->ipa_config_is_apq)
+			index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+		else
+			index = IPA_CLIENT_Q6_WAN_CONS;
+
+		IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+				con_stats->client[index].num_ipv4_pkts,
+				con_stats->client[index].num_ipv4_bytes,
+				con_stats->client[index].num_ipv6_pkts,
+				con_stats->client[index].num_ipv6_bytes);
+
+		/* update the WIGIG UL stats */
+		data->ipv4_tx_packets +=
+			con_stats->client[index].num_ipv4_pkts;
+		data->ipv6_tx_packets +=
+			con_stats->client[index].num_ipv6_pkts;
+		data->ipv4_tx_bytes +=
+			con_stats->client[index].num_ipv4_bytes;
+		data->ipv6_tx_bytes +=
+			con_stats->client[index].num_ipv6_bytes;
+	} else {
+		IPAWANDBG("IPA_CLIENT_WIGIG_PROD client not supported\n");
 	}
 
-	if (rmnet_ipa3_ctx->ipa_config_is_apq)
-		index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
-	else
-		index = IPA_CLIENT_Q6_WAN_CONS;
-
-	IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
-		con_stats->client[index].num_ipv4_pkts,
-		con_stats->client[index].num_ipv4_bytes,
-		con_stats->client[index].num_ipv6_pkts,
-		con_stats->client[index].num_ipv6_bytes);
-
-	/* update the WIGIG UL stats */
-	data->ipv4_tx_packets +=
-		con_stats->client[index].num_ipv4_pkts;
-	data->ipv6_tx_packets +=
-		con_stats->client[index].num_ipv6_pkts;
-	data->ipv4_tx_bytes +=
-		con_stats->client[index].num_ipv4_bytes;
-	data->ipv6_tx_bytes +=
-		con_stats->client[index].num_ipv6_bytes;
-
 	IPAWANDBG("v4_tx_p(%lu) v6_tx_p(%lu) v4_tx_b(%lu) v6_tx_b(%lu)\n",
 		(unsigned long) data->ipv4_tx_packets,
 		(unsigned long) data->ipv6_tx_packets,
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 1fbd8fc..33ff3d8 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -108,8 +108,6 @@ static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
 	list_for_each(pos, &ext_disp->display_list)
 		count++;
 
-	data->codec.stream_id = count;
-
 	list_add(&node->list, &ext_disp->display_list);
 
 
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index cc46ea5..928fb6e 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"QG-K: %s: " fmt, __func__
@@ -100,7 +100,8 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
 			rc = -EINVAL;
 		} else {
 			/* OCV is passed as deci-uV  - 10^-4 V */
-			soc = interpolate_soc(&battery->profile[bp.table_index],
+			soc = qg_interpolate_soc(
+					&battery->profile[bp.table_index],
 					bp.batt_temp, UV_TO_DECIUV(bp.ocv_uv));
 			soc = CAP(QG_MIN_SOC, QG_MAX_SOC, soc);
 			rc = put_user(soc, &bp_user->soc);
@@ -120,7 +121,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
 					bp.table_index);
 			rc = -EINVAL;
 		} else {
-			ocv_uv = interpolate_var(
+			ocv_uv = qg_interpolate_var(
 					&battery->profile[bp.table_index],
 					bp.batt_temp, bp.soc);
 			ocv_uv = DECIUV_TO_UV(ocv_uv);
@@ -142,7 +143,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
 					bp.table_index);
 			rc = -EINVAL;
 		} else {
-			fcc_mah = interpolate_single_row_lut(
+			fcc_mah = qg_interpolate_single_row_lut(
 					&battery->profile[bp.table_index],
 					bp.batt_temp, DEGC_SCALE);
 			fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah);
@@ -162,7 +163,8 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
 					bp.table_index);
 			rc = -EINVAL;
 		} else {
-			var = interpolate_var(&battery->profile[bp.table_index],
+			var = qg_interpolate_var(
+					&battery->profile[bp.table_index],
 					bp.batt_temp, bp.soc);
 			var = CAP(QG_MIN_VAR, QG_MAX_VAR, var);
 			rc = put_user(var, &bp_user->var);
@@ -182,7 +184,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
 					bp.table_index);
 			rc = -EINVAL;
 		} else {
-			slope = interpolate_slope(
+			slope = qg_interpolate_slope(
 					&battery->profile[bp.table_index],
 					bp.batt_temp, bp.soc);
 			slope = CAP(QG_MIN_SLOPE, QG_MAX_SLOPE, slope);
@@ -394,7 +396,7 @@ int lookup_soc_ocv(u32 *soc, u32 ocv_uv, int batt_temp, bool charging)
 	if (!the_battery || !the_battery->profile_node)
 		return -ENODEV;
 
-	*soc = interpolate_soc(&the_battery->profile[table_index],
+	*soc = qg_interpolate_soc(&the_battery->profile[table_index],
 				batt_temp, UV_TO_DECIUV(ocv_uv));
 
 	*soc = CAP(0, 100, DIV_ROUND_CLOSEST(*soc, 100));
@@ -410,7 +412,7 @@ int qg_get_nominal_capacity(u32 *nom_cap_uah, int batt_temp, bool charging)
 	if (!the_battery || !the_battery->profile_node)
 		return -ENODEV;
 
-	fcc_mah = interpolate_single_row_lut(
+	fcc_mah = qg_interpolate_single_row_lut(
 				&the_battery->profile[table_index],
 					batt_temp, DEGC_SCALE);
 	fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah);
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 7e0b20c..a0c07de 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -57,6 +57,7 @@ struct qg_dt {
 	int			shutdown_soc_threshold;
 	int			min_sleep_time_secs;
 	int			sys_min_volt_mv;
+	int			fvss_vbat_mv;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
@@ -67,6 +68,7 @@ struct qg_dt {
 	bool			use_s7_ocv;
 	bool			qg_sleep_config;
 	bool			qg_fast_chg_cfg;
+	bool			fvss_enable;
 };
 
 struct qg_esr_data {
@@ -129,6 +131,7 @@ struct qpnp_qg {
 	bool			dc_present;
 	bool			charge_full;
 	bool			force_soc;
+	bool			fvss_active;
 	int			charge_status;
 	int			charge_type;
 	int			chg_iterm_ma;
@@ -137,6 +140,8 @@ struct qpnp_qg {
 	int			esr_nominal;
 	int			soh;
 	int			soc_reporting_ready;
+	int			last_fifo_v_uv;
+	int			last_fifo_i_ua;
 	u32			fifo_done_count;
 	u32			wa_flags;
 	u32			seq_no;
@@ -145,6 +150,8 @@ struct qpnp_qg {
 	u32			esr_last;
 	u32			s2_state;
 	u32			s2_state_mask;
+	u32			soc_fvss_entry;
+	u32			vbat_fvss_entry;
 	ktime_t			last_user_update_time;
 	ktime_t			last_fifo_update_time;
 	unsigned long		last_maint_soc_update_time;
diff --git a/drivers/power/supply/qcom/qg-profile-lib.c b/drivers/power/supply/qcom/qg-profile-lib.c
index 7585dad..bf8efc9 100644
--- a/drivers/power/supply/qcom/qg-profile-lib.c
+++ b/drivers/power/supply/qcom/qg-profile-lib.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -9,7 +9,7 @@
 #include "qg-profile-lib.h"
 #include "qg-defs.h"
 
-static int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+int qg_linear_interpolate(int y0, int x0, int y1, int x1, int x)
 {
 	if (y0 == y1 || x == x0)
 		return y0;
@@ -19,7 +19,7 @@ static int linear_interpolate(int y0, int x0, int y1, int x1, int x)
 	return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
 }
 
-int interpolate_single_row_lut(struct profile_table_data *lut,
+int qg_interpolate_single_row_lut(struct profile_table_data *lut,
 						int x, int scale)
 {
 	int i, result;
@@ -45,7 +45,7 @@ int interpolate_single_row_lut(struct profile_table_data *lut,
 	if (x == lut->col_entries[i] * scale) {
 		result = lut->data[0][i];
 	} else {
-		result = linear_interpolate(
+		result = qg_linear_interpolate(
 			lut->data[0][i-1],
 			lut->col_entries[i-1] * scale,
 			lut->data[0][i],
@@ -56,7 +56,7 @@ int interpolate_single_row_lut(struct profile_table_data *lut,
 	return result;
 }
 
-int interpolate_soc(struct profile_table_data *lut,
+int qg_interpolate_soc(struct profile_table_data *lut,
 				int batt_temp, int ocv)
 {
 	int i, j, soc_high, soc_low, soc;
@@ -87,7 +87,7 @@ int interpolate_soc(struct profile_table_data *lut,
 			if (ocv >= lut->data[i][j]) {
 				if (ocv == lut->data[i][j])
 					return lut->row_entries[i];
-				soc = linear_interpolate(
+				soc = qg_linear_interpolate(
 					lut->row_entries[i],
 					lut->data[i][j],
 					lut->row_entries[i - 1],
@@ -108,7 +108,7 @@ int interpolate_soc(struct profile_table_data *lut,
 	for (i = 0; i < rows-1; i++) {
 		if (soc_high == 0 && is_between(lut->data[i][j],
 				lut->data[i+1][j], ocv)) {
-			soc_high = linear_interpolate(
+			soc_high = qg_linear_interpolate(
 				lut->row_entries[i],
 				lut->data[i][j],
 				lut->row_entries[i + 1],
@@ -118,7 +118,7 @@ int interpolate_soc(struct profile_table_data *lut,
 
 		if (soc_low == 0 && is_between(lut->data[i][j-1],
 				lut->data[i+1][j-1], ocv)) {
-			soc_low = linear_interpolate(
+			soc_low = qg_linear_interpolate(
 				lut->row_entries[i],
 				lut->data[i][j-1],
 				lut->row_entries[i + 1],
@@ -127,7 +127,7 @@ int interpolate_soc(struct profile_table_data *lut,
 		}
 
 		if (soc_high && soc_low) {
-			soc = linear_interpolate(
+			soc = qg_linear_interpolate(
 				soc_low,
 				lut->col_entries[j-1] * DEGC_SCALE,
 				soc_high,
@@ -148,7 +148,7 @@ int interpolate_soc(struct profile_table_data *lut,
 	return 10000;
 }
 
-int interpolate_var(struct profile_table_data *lut,
+int qg_interpolate_var(struct profile_table_data *lut,
 				int batt_temp, int soc)
 {
 	int i, var1, var2, var, rows, cols;
@@ -192,7 +192,7 @@ int interpolate_var(struct profile_table_data *lut,
 			break;
 
 	if (batt_temp == lut->col_entries[i] * DEGC_SCALE) {
-		var = linear_interpolate(
+		var = qg_linear_interpolate(
 				lut->data[row1][i],
 				lut->row_entries[row1],
 				lut->data[row2][i],
@@ -201,21 +201,21 @@ int interpolate_var(struct profile_table_data *lut,
 		return var;
 	}
 
-	var1 = linear_interpolate(
+	var1 = qg_linear_interpolate(
 				lut->data[row1][i - 1],
 				lut->col_entries[i - 1] * DEGC_SCALE,
 				lut->data[row1][i],
 				lut->col_entries[i] * DEGC_SCALE,
 				batt_temp);
 
-	var2 = linear_interpolate(
+	var2 = qg_linear_interpolate(
 				lut->data[row2][i - 1],
 				lut->col_entries[i - 1] * DEGC_SCALE,
 				lut->data[row2][i],
 				lut->col_entries[i] * DEGC_SCALE,
 				batt_temp);
 
-	var = linear_interpolate(
+	var = qg_linear_interpolate(
 				var1,
 				lut->row_entries[row1],
 				var2,
@@ -225,7 +225,7 @@ int interpolate_var(struct profile_table_data *lut,
 	return var;
 }
 
-int interpolate_slope(struct profile_table_data *lut,
+int qg_interpolate_slope(struct profile_table_data *lut,
 					int batt_temp, int soc)
 {
 	int i, ocvrow1, ocvrow2, rows, cols;
@@ -277,14 +277,14 @@ int interpolate_slope(struct profile_table_data *lut,
 			lut->row_entries[row2]);
 		return slope;
 	}
-	ocvrow1 = linear_interpolate(
+	ocvrow1 = qg_linear_interpolate(
 			lut->data[row1][i - 1],
 			lut->col_entries[i - 1] * DEGC_SCALE,
 			lut->data[row1][i],
 			lut->col_entries[i] * DEGC_SCALE,
 			batt_temp);
 
-	ocvrow2 = linear_interpolate(
+	ocvrow2 = qg_linear_interpolate(
 			lut->data[row2][i - 1],
 			lut->col_entries[i - 1] * DEGC_SCALE,
 			lut->data[row2][i],
diff --git a/drivers/power/supply/qcom/qg-profile-lib.h b/drivers/power/supply/qcom/qg-profile-lib.h
index 5585239..ba7e03e 100644
--- a/drivers/power/supply/qcom/qg-profile-lib.h
+++ b/drivers/power/supply/qcom/qg-profile-lib.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QG_PROFILE_LIB_H__
@@ -15,13 +15,14 @@ struct profile_table_data {
 	int		**data;
 };
 
-int interpolate_single_row_lut(struct profile_table_data *lut,
+int qg_linear_interpolate(int y0, int x0, int y1, int x1, int x);
+int qg_interpolate_single_row_lut(struct profile_table_data *lut,
 						int x, int scale);
-int interpolate_soc(struct profile_table_data *lut,
+int qg_interpolate_soc(struct profile_table_data *lut,
 				int batt_temp, int ocv);
-int interpolate_var(struct profile_table_data *lut,
+int qg_interpolate_var(struct profile_table_data *lut,
 				int batt_temp, int soc);
-int interpolate_slope(struct profile_table_data *lut,
+int qg_interpolate_slope(struct profile_table_data *lut,
 				int batt_temp, int soc);
 
 #endif /*__QG_PROFILE_LIB_H__ */
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index d8d0c6e..3ecd32b 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -17,6 +17,7 @@
 #include "qg-reg.h"
 #include "qg-util.h"
 #include "qg-defs.h"
+#include "qg-profile-lib.h"
 #include "qg-soc.h"
 
 #define DEFAULT_UPDATE_TIME_MS			64000
@@ -45,6 +46,11 @@ static ssize_t soc_interval_ms_store(struct device *dev,
 }
 DEVICE_ATTR_RW(soc_interval_ms);
 
+static int qg_fvss_delta_soc_interval_ms = 10000;
+module_param_named(
+	fvss_soc_interval_ms, qg_fvss_delta_soc_interval_ms, int, 0600
+);
+
 static int qg_delta_soc_cold_interval_ms = 4000;
 static ssize_t soc_cold_interval_ms_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -87,6 +93,84 @@ static ssize_t maint_soc_update_ms_store(struct device *dev,
 }
 DEVICE_ATTR_RW(maint_soc_update_ms);
 
+/* FVSS scaling only based on VBAT */
+static int qg_fvss_vbat_scaling = 1;
+module_param_named(
+	fvss_vbat_scaling, qg_fvss_vbat_scaling, int, 0600
+);
+
+static int qg_process_fvss_soc(struct qpnp_qg *chip, int sys_soc)
+{
+	int rc, vbat_uv = 0, vbat_cutoff_uv = chip->dt.vbatt_cutoff_mv * 1000;
+	int soc_vbat = 0, wt_vbat = 0, wt_sys = 0, soc_fvss = 0;
+
+	if (!chip->dt.fvss_enable)
+		goto exit_soc_scale;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING)
+		goto exit_soc_scale;
+
+	rc = qg_get_battery_voltage(chip, &vbat_uv);
+	if (rc < 0)
+		goto exit_soc_scale;
+
+	if (!chip->last_fifo_v_uv)
+		chip->last_fifo_v_uv = vbat_uv;
+
+	if (chip->last_fifo_v_uv > (chip->dt.fvss_vbat_mv * 1000)) {
+		qg_dbg(chip, QG_DEBUG_SOC, "FVSS: last_fifo_v=%d fvss_entry_uv=%d - exit\n",
+			chip->last_fifo_v_uv, chip->dt.fvss_vbat_mv * 1000);
+		goto exit_soc_scale;
+	}
+
+	/* Enter FVSS */
+	if (!chip->fvss_active) {
+		chip->vbat_fvss_entry = CAP(vbat_cutoff_uv,
+					chip->dt.fvss_vbat_mv * 1000,
+					chip->last_fifo_v_uv);
+		chip->soc_fvss_entry = sys_soc;
+		chip->fvss_active = true;
+	} else if (chip->last_fifo_v_uv > chip->vbat_fvss_entry) {
+		/* VBAT has gone beyond the entry voltage */
+		chip->vbat_fvss_entry = chip->last_fifo_v_uv;
+		chip->soc_fvss_entry = sys_soc;
+	}
+
+	soc_vbat = qg_linear_interpolate(chip->soc_fvss_entry,
+					chip->vbat_fvss_entry,
+					0,
+					vbat_cutoff_uv,
+					chip->last_fifo_v_uv);
+	soc_vbat = CAP(0, 100, soc_vbat);
+
+	if (qg_fvss_vbat_scaling) {
+		wt_vbat = 100;
+		wt_sys = 0;
+	} else {
+		wt_sys = qg_linear_interpolate(100,
+					chip->soc_fvss_entry,
+					0,
+					0,
+					sys_soc);
+		wt_sys = CAP(0, 100, wt_sys);
+		wt_vbat = 100 - wt_sys;
+	}
+
+	soc_fvss = ((soc_vbat * wt_vbat) + (sys_soc * wt_sys)) / 100;
+	soc_fvss = CAP(0, 100, soc_fvss);
+
+	qg_dbg(chip, QG_DEBUG_SOC, "FVSS: vbat_fvss_entry=%d soc_fvss_entry=%d cutoff_uv=%d vbat_uv=%d fifo_avg_v=%d soc_vbat=%d sys_soc=%d wt_vbat=%d wt_sys=%d soc_fvss=%d\n",
+			chip->vbat_fvss_entry, chip->soc_fvss_entry,
+			vbat_cutoff_uv, vbat_uv, chip->last_fifo_v_uv,
+			soc_vbat, sys_soc, wt_vbat, wt_sys, soc_fvss);
+
+	return soc_fvss;
+
+exit_soc_scale:
+	chip->fvss_active = false;
+	return sys_soc;
+}
+
 int qg_adjust_sys_soc(struct qpnp_qg *chip)
 {
 	int soc, vbat_uv, rc;
@@ -94,7 +178,7 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 
 	chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
 
-	if (chip->sys_soc == QG_MIN_SOC) {
+	if (chip->sys_soc <= 50) { /* 0.5% */
 		/* Hold SOC to 1% of VBAT has not dropped below cutoff */
 		rc = qg_get_battery_voltage(chip, &vbat_uv);
 		if (!rc && vbat_uv >= (vcutoff_uv + VBAT_LOW_HYST_UV))
@@ -113,8 +197,11 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 		soc = DIV_ROUND_CLOSEST(chip->sys_soc, 100);
 	}
 
-	qg_dbg(chip, QG_DEBUG_SOC, "last_adj_sys_soc=%d  adj_sys_soc=%d\n",
-					chip->last_adj_ssoc, soc);
+	qg_dbg(chip, QG_DEBUG_SOC, "sys_soc=%d adjusted sys_soc=%d\n",
+					chip->sys_soc, soc);
+
+	soc = qg_process_fvss_soc(chip, soc);
+
 	chip->last_adj_ssoc = soc;
 
 	return soc;
@@ -144,6 +231,8 @@ static void get_next_update_time(struct qpnp_qg *chip)
 	else if (chip->maint_soc > 0 && chip->maint_soc >= chip->recharge_soc)
 		/* if in maintenance mode scale slower */
 		min_delta_soc_interval_ms = qg_maint_soc_update_ms;
+	else if (chip->fvss_active)
+		min_delta_soc_interval_ms = qg_fvss_delta_soc_interval_ms;
 
 	if (!min_delta_soc_interval_ms)
 		min_delta_soc_interval_ms = 1000;	/* 1 second */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index 24e8319..a868f1d 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -2493,8 +2493,8 @@ static void profile_load_work(struct work_struct *work)
 out:
 	if (!chip->esr_fast_calib || is_debug_batt_id(fg)) {
 		/* If it is debug battery, then disable ESR fast calibration */
-		chip->esr_fast_calib = false;
 		fg_gen4_esr_fast_calib_config(chip, false);
+		chip->esr_fast_calib = false;
 	}
 
 	if (chip->dt.multi_profile_load && rc < 0)
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 6c85dd9..3c8ec13 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -493,6 +493,9 @@ static int qg_process_fifo(struct qpnp_qg *chip, u32 fifo_length)
 		chip->kdata.fifo[j].interval = sample_interval;
 		chip->kdata.fifo[j].count = sample_count;
 
+		chip->last_fifo_v_uv = chip->kdata.fifo[j].v;
+		chip->last_fifo_i_ua = chip->kdata.fifo[j].i;
+
 		qg_dbg(chip, QG_DEBUG_FIFO, "FIFO %d raw_v=%d uV=%d raw_i=%d uA=%d interval=%d count=%d\n",
 					j, fifo_v,
 					chip->kdata.fifo[j].v,
@@ -557,6 +560,9 @@ static int qg_process_accumulator(struct qpnp_qg *chip)
 	if (chip->kdata.fifo_length == MAX_FIFO_LENGTH)
 		chip->kdata.fifo_length = MAX_FIFO_LENGTH - 1;
 
+	chip->last_fifo_v_uv = chip->kdata.fifo[index].v;
+	chip->last_fifo_i_ua = chip->kdata.fifo[index].i;
+
 	if (chip->kdata.fifo_length == 1)	/* Only accumulator data */
 		chip->kdata.seq_no = chip->seq_no++ % U32_MAX;
 
@@ -2101,6 +2107,9 @@ static int qg_psy_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_POWER_AVG:
 		rc = qg_get_power(chip, &pval->intval, true);
 		break;
+	case POWER_SUPPLY_PROP_SCALE_MODE_EN:
+		pval->intval = chip->fvss_active;
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -2159,6 +2168,7 @@ static enum power_supply_property qg_psy_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_POWER_NOW,
+	POWER_SUPPLY_PROP_SCALE_MODE_EN,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -2578,6 +2588,12 @@ static ssize_t qg_device_read(struct file *file, char __user *buf, size_t count,
 	struct qpnp_qg *chip = file->private_data;
 	unsigned long data_size = sizeof(chip->kdata);
 
+	if (count < data_size) {
+		pr_err("Invalid datasize %lu, expected lesser then %zu\n",
+							data_size, count);
+		return -EINVAL;
+	}
+
 	/* non-blocking access, return */
 	if (!chip->data_ready && (file->f_flags & O_NONBLOCK))
 		return 0;
@@ -3812,6 +3828,7 @@ static int qg_parse_cl_dt(struct qpnp_qg *chip)
 #define ESR_CHG_MIN_IBAT_UA		(-450000)
 #define DEFAULT_SLEEP_TIME_SECS		1800 /* 30 mins */
 #define DEFAULT_SYS_MIN_VOLT_MV		2800
+#define DEFAULT_FVSS_VBAT_MV		3500
 static int qg_parse_dt(struct qpnp_qg *chip)
 {
 	int rc = 0;
@@ -4043,6 +4060,18 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 	else
 		chip->dt.min_sleep_time_secs = temp;
 
+	if (of_property_read_bool(node, "qcom,fvss-enable")) {
+
+		chip->dt.fvss_enable = true;
+
+		rc = of_property_read_u32(node,
+				"qcom,fvss-vbatt-mv", &temp);
+		if (rc < 0)
+			chip->dt.fvss_vbat_mv = DEFAULT_FVSS_VBAT_MV;
+		else
+			chip->dt.fvss_vbat_mv = temp;
+	}
+
 	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
 			chip->dt.delta_soc, chip->dt.qg_ext_sense);
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 212d99d..4d851df 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -313,7 +313,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
 	export->pwm = pwm;
 	mutex_init(&export->lock);
 
-	export->child.class = parent->class;
 	export->child.release = pwm_export_release;
 	export->child.parent = parent;
 	export->child.devt = MKDEV(0, 0);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index d859315..c996be2 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1607,9 +1607,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
 static void qcom_glink_rpdev_release(struct device *dev)
 {
 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
-	struct glink_channel *channel = to_glink_channel(rpdev->ept);
 
-	channel->rpdev = NULL;
 	kfree(rpdev);
 }
 
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4ac4a73..4b7cc8d 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1569,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
 		rc = qdio_kick_outbound_q(q, phys_aob);
 	} else if (need_siga_sync(q)) {
 		rc = qdio_siga_sync_q(q);
+	} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+		   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+		   state == SLSB_CU_OUTPUT_PRIMED) {
+		/* The previous buffer is not processed yet, tack on. */
+		qperf_inc(q, fast_requeue);
 	} else {
-		/* try to fast requeue buffers */
-		get_buf_state(q, prev_buf(bufnr), &state, 0);
-		if (state != SLSB_CU_OUTPUT_PRIMED)
-			rc = qdio_kick_outbound_q(q, 0);
-		else
-			qperf_inc(q, fast_requeue);
+		rc = qdio_kick_outbound_q(q, 0);
 	}
 
 	/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 70a006ba..4fe06ff 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
 				  sizeof(*pa->pa_iova_pfn) +
 				  sizeof(*pa->pa_pfn),
 				  GFP_KERNEL);
-	if (unlikely(!pa->pa_iova_pfn))
+	if (unlikely(!pa->pa_iova_pfn)) {
+		pa->pa_nr = 0;
 		return -ENOMEM;
+	}
 	pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
 	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index d1154ba..9c21938 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -54,6 +54,7 @@
 #define ALUA_FAILOVER_TIMEOUT		60
 #define ALUA_FAILOVER_RETRIES		5
 #define ALUA_RTPG_DELAY_MSECS		5
+#define ALUA_RTPG_RETRY_DELAY		2
 
 /* device handler flags */
 #define ALUA_OPTIMIZE_STPG		0x01
@@ -696,7 +697,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
 	case SCSI_ACCESS_STATE_TRANSITIONING:
 		if (time_before(jiffies, pg->expiry)) {
 			/* State transition, retry */
-			pg->interval = 2;
+			pg->interval = ALUA_RTPG_RETRY_DELAY;
 			err = SCSI_DH_RETRY;
 		} else {
 			struct alua_dh_data *h;
@@ -821,6 +822,8 @@ static void alua_rtpg_work(struct work_struct *work)
 				spin_lock_irqsave(&pg->lock, flags);
 				pg->flags &= ~ALUA_PG_RUNNING;
 				pg->flags |= ALUA_PG_RUN_RTPG;
+				if (!pg->interval)
+					pg->interval = ALUA_RTPG_RETRY_DELAY;
 				spin_unlock_irqrestore(&pg->lock, flags);
 				queue_delayed_work(kaluad_wq, &pg->rtpg_work,
 						   pg->interval * HZ);
@@ -832,6 +835,8 @@ static void alua_rtpg_work(struct work_struct *work)
 		spin_lock_irqsave(&pg->lock, flags);
 		if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
 			pg->flags &= ~ALUA_PG_RUNNING;
+			if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+				pg->interval = ALUA_RTPG_RETRY_DELAY;
 			pg->flags |= ALUA_PG_RUN_RTPG;
 			spin_unlock_irqrestore(&pg->lock, flags);
 			queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c43eccd..f570b8c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
 		switch (c2->error_data.status) {
 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			if (cmd)
+				cmd->result = 0;
 			break;
 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
 			cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
 	/* check for good status */
 	if (likely(c2->error_data.serv_response == 0 &&
-			c2->error_data.status == 0))
+			c2->error_data.status == 0)) {
+		cmd->result = 0;
 		return hpsa_cmd_free_and_done(h, c, cmd);
+	}
 
 	/*
 	 * Any RAID offload error results in retry which will use
@@ -5618,6 +5622,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 	c = cmd_tagged_alloc(h, cmd);
 
 	/*
+	 * This is necessary because the SML doesn't zero out this field during
+	 * error recovery.
+	 */
+	cmd->result = 0;
+
+	/*
 	 * Call alternate submit routine for I/O accelerated commands.
 	 * Retries always go down the normal I/O path.
 	 */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca97..71d53bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	ibmvfc_purge_requests(vhost, DID_ERROR);
-	ibmvfc_free_event_pool(vhost);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	ibmvfc_free_event_pool(vhost);
 
 	ibmvfc_free_mem(vhost);
 	spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e0c8722..806ceab 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3025,6 +3025,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
 	u32 size;
 	unsigned long buff_addr;
 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+	unsigned long chunk_left_bytes;
 	unsigned long src_addr;
 	unsigned long flags;
 	u32 buff_offset;
@@ -3050,6 +3051,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
 	}
 
 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
 
 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f84f9bf..ddce32f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 		ql_log(ql_log_warn, vha, 0xd049,
 		    "Failed to allocate ct_sns request.\n");
 		kfree(fcport);
-		fcport = NULL;
+		return NULL;
 	}
 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
 	INIT_LIST_HEAD(&fcport->gnl_entry);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f7b612f..495ac44 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1806,7 +1806,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 		ret = ufshcd_uic_hibern8_enter(hba);
 		if (ret)
 			/* link will be bad state so no need to scale_up_gear */
-			return ret;
+			goto clk_scaling_unprepare;
 		ufshcd_custom_cmd_log(hba, "Hibern8-entered");
 	}
 
@@ -1819,7 +1819,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 		ret = ufshcd_uic_hibern8_exit(hba);
 		if (ret)
 			/* link will be bad state so no need to scale_up_gear */
-			return ret;
+			goto clk_scaling_unprepare;
 		ufshcd_custom_cmd_log(hba, "Hibern8-Exited");
 	}
 
@@ -7075,8 +7075,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	/*
 	 * if host reset is required then skip clearing the pending
-	 * transfers forcefully because they will automatically get
-	 * cleared after link startup.
+	 * transfers forcefully because they will get cleared during
+	 * host reset and restore
 	 */
 	if (needs_reset)
 		goto skip_pending_xfer_clear;
@@ -7891,9 +7891,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 	int err;
 	unsigned long flags;
 
-	/* Reset the host controller */
+	/*
+	 * Stop the host controller and complete the requests
+	 * cleared by h/w
+	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufshcd_hba_stop(hba, false);
+	hba->silence_err_logs = true;
+	ufshcd_complete_requests(hba);
+	hba->silence_err_logs = false;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* scale up clocks to max frequency before full reinitialization */
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 93e8457..0e754ff 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -2036,10 +2036,6 @@ static int ngd_slim_remove(struct platform_device *pdev)
 	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
 
 	ngd_slim_enable(dev, false);
-	if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
-		__depr_arm_iommu_detach_device(dev->iommu_desc.cb_dev);
-		__depr_arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
-	}
 	if (dev->sysfs_created)
 		sysfs_remove_file(&dev->dev->kobj,
 				&dev_attr_debug_mask.attr);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 5baabc3..cd5b1ca 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -257,7 +257,6 @@ struct msm_slim_bulk_wr {
 
 struct msm_slim_iommu {
 	struct device			*cb_dev;
-	struct dma_iommu_mapping	*iommu_map;
 	bool				s1_bypass;
 };
 
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1a36bc8..473e29d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -24,6 +24,16 @@
 	  helps reduce power consumption during idle mode of the system.
 	  If unsure, say N
 
+config BUG_ON_HW_MEM_ONLINE_FAIL
+	bool "Trigger a BUG when HW memory online fails"
+	depends on QCOM_MEM_OFFLINE
+	help
+	  Select this option if kernel should BUG when the hardware
+	  onlining of memory hotplug blocks fails. This helps to catch
+	  online failures much quicker and avoids the later side effects
+	  of such memory online failures.
+	  If unsure, say N
+
 config QCOM_GENI_SE
 	tristate "QCOM GENI Serial Engine Driver"
 	depends on ARCH_QCOM || COMPILE_TEST
@@ -197,6 +207,8 @@
 	  status indication and disables flows while grant size is reached.
 	  If unsure or not use burst mode flow control, say 'N'.
 
+source "drivers/soc/qcom/rmnet_ctl/Kconfig"
+
 config QCOM_QMI_POWER_COLLAPSE
 	bool "Enable power save features"
 	depends on QCOM_QMI_RMNET
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 64024f4..ef36b9c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -8,7 +8,7 @@
 obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
 qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
 obj-$(CONFIG_QCOM_QMI_RMNET)	+= qmi_rmnet.o
-obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o
+obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o dfc_qmap.o
 obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
 obj-$(CONFIG_QCOM_RMTFS_MEM)	+= rmtfs_mem.o
 obj-$(CONFIG_QCOM_RPMH)		+= qcom_rpmh.o
@@ -86,3 +86,4 @@
 obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
 obj-$(CONFIG_ICNSS) += icnss.o
 obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index d1241fe5..eb0c41c 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -564,13 +564,15 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
 		return -EINVAL;
 
 	if (drvdata->enable[curr_list]) {
-		dev_err(drvdata->dev, "DCC is already enabled\n");
+		dev_err(drvdata->dev, "List %d is already enabled\n",
+				curr_list);
 		return -EINVAL;
 	}
 
 	lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(curr_list));
 	if (lock_reg & 0x1) {
-		dev_err(drvdata->dev, "DCC is already enabled\n");
+		dev_err(drvdata->dev, "List %d is already locked\n",
+				curr_list);
 		return -EINVAL;
 	}
 
@@ -578,6 +580,21 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
 	return 0;
 }
 
+static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
+{
+	bool dcc_enable = false;
+	int list;
+
+	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
+		if (drvdata->enable[list]) {
+			dcc_enable = true;
+			break;
+		}
+	}
+
+	return dcc_enable;
+}
+
 static int dcc_enable(struct dcc_drvdata *drvdata)
 {
 	int ret = 0;
@@ -586,7 +603,9 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 
 	mutex_lock(&drvdata->mutex);
 
-	memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+	if (!is_dcc_enabled(drvdata)) {
+		memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+	}
 
 	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
 
@@ -667,21 +686,6 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
 	mutex_unlock(&drvdata->mutex);
 }
 
-static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
-{
-	bool dcc_enable = false;
-	int list;
-
-	for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
-		if (drvdata->enable[list]) {
-			dcc_enable = true;
-			break;
-		}
-	}
-
-	return dcc_enable;
-}
-
 static ssize_t curr_list_show(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 {
diff --git a/drivers/soc/qcom/ddr_stats.c b/drivers/soc/qcom/ddr_stats.c
index cb10342..49b0209 100644
--- a/drivers/soc/qcom/ddr_stats.c
+++ b/drivers/soc/qcom/ddr_stats.c
@@ -18,9 +18,10 @@
 #include <linux/uaccess.h>
 #include <asm/arch_timer.h>
 
+#include <clocksource/arm_arch_timer.h>
+
 #define MAGIC_KEY1		0xA1157A75
 #define MAX_NUM_MODES		0x14
-#define MSM_ARCH_TIMER_FREQ	19200000
 
 #define GET_PDATA_OF_ATTR(attr) \
 	(container_of(attr, struct ddr_stats_kobj_attr, ka)->pd)
@@ -48,10 +49,9 @@ struct ddr_stats_kobj_attr {
 	struct ddr_stats_platform_data *pd;
 };
 
-static inline u64 get_time_in_msec(u64 counter)
+static u64 get_time_in_msec(u64 counter)
 {
-	do_div(counter, MSM_ARCH_TIMER_FREQ);
-	counter *= MSEC_PER_SEC;
+	do_div(counter, (arch_timer_get_rate()/MSEC_PER_SEC));
 	return counter;
 }
 
diff --git a/drivers/soc/qcom/dfc_defs.h b/drivers/soc/qcom/dfc_defs.h
new file mode 100644
index 0000000..7553707
--- /dev/null
+++ b/drivers/soc/qcom/dfc_defs.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DFC_DEFS_H
+#define _DFC_DEFS_H
+
+#include <linux/soc/qcom/qmi.h>
+#include "qmi_rmnet_i.h"
+
+#define DFC_ACK_TYPE_DISABLE 1
+#define DFC_ACK_TYPE_THRESHOLD 2
+
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
+#define DFC_MAX_QOS_ID_V01 2
+
+struct dfc_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *dfc_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	struct work_struct qmi_ind_work;
+	struct list_head qmi_ind_q;
+	spinlock_t qmi_ind_lock;
+	int index;
+	int restart_state;
+};
+
+enum dfc_ip_type_enum_v01 {
+	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	DFC_IPV4_TYPE_V01 = 0x4,
+	DFC_IPV6_TYPE_V01 = 0x6,
+	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct dfc_qos_id_type_v01 {
+	u32 qos_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_flow_status_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 num_bytes;
+	u16 seq_num;
+	u8 qos_ids_len;
+	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+};
+
+struct dfc_ancillary_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 reserved;
+};
+
+struct dfc_flow_status_ind_msg_v01 {
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+	u8 eod_ack_reqd_valid;
+	u8 eod_ack_reqd;
+	u8 ancillary_info_valid;
+	u8 ancillary_info_len;
+	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_bearer_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_tx_link_status_ind_msg_v01 {
+	u8 tx_status;
+	u8 bearer_info_valid;
+	u8 bearer_info_len;
+	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
+};
+
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind);
+
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind);
+
+#endif /* _DFC_DEFS_H */
diff --git a/drivers/soc/qcom/dfc_qmap.c b/drivers/soc/qcom/dfc_qmap.c
new file mode 100644
index 0000000..a4b2095
--- /dev/null
+++ b/drivers/soc/qcom/dfc_qmap.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <net/pkt_sched.h>
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
+#include <trace/events/dfc.h>
+#include <soc/qcom/rmnet_ctl.h>
+#include "dfc_defs.h"
+
+#define QMAP_DFC_VER		1
+
+#define QMAP_CMD_DONE		-1
+
+#define QMAP_CMD_REQUEST	0
+#define QMAP_CMD_ACK		1
+#define QMAP_CMD_UNSUPPORTED	2
+#define QMAP_CMD_INVALID	3
+
+#define QMAP_DFC_CONFIG		10
+#define QMAP_DFC_IND		11
+#define QMAP_DFC_QUERY		12
+#define QMAP_DFC_END_MARKER	13
+
+struct qmap_hdr {
+	u8	cd_pad;
+	u8	mux_id;
+	__be16	pkt_len;
+} __aligned(1);
+
+#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
+
+struct qmap_cmd_hdr {
+	u8	pad_len:6;
+	u8	reserved_bit:1;
+	u8	cd_bit:1;
+	u8	mux_id;
+	__be16	pkt_len;
+	u8	cmd_name;
+	u8	cmd_type:2;
+	u8	reserved:6;
+	u16	reserved2;
+	__be32	tx_id;
+} __aligned(1);
+
+struct qmap_dfc_config {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			cmd_id;
+	u8			reserved;
+	u8			tx_info:1;
+	u8			reserved2:7;
+	__be32			ep_type;
+	__be32			iface_id;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_ind {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	__be16			seq_num;
+	u8			reserved2;
+	u8			tx_info_valid:1;
+	u8			tx_info:1;
+	u8			reserved3:6;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			bearer_status:3;
+	u8			reserved4:4;
+	__be32			grant;
+	u32			reserved5;
+	u32			reserved6;
+} __aligned(1);
+
+struct qmap_dfc_query {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_query_resp {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			reserved:7;
+	u8			invalid:1;
+	u8			reserved2:7;
+	__be32			grant;
+	u32			reserved3;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_req {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_cnf {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
+static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
+static struct dfc_qmi_data __rcu *qmap_dfc_data;
+static atomic_t qmap_txid;
+static void *rmnet_ctl_handle;
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 u8 bearer_id, u16 seq, u32 tx_id);
+
+static void dfc_qmap_send_cmd(struct sk_buff *skb)
+{
+	trace_dfc_qmap(skb->data, skb->len, false);
+
+	if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
+		pr_err("Failed to send to rmnet ctl\n");
+		kfree_skb(skb);
+	}
+}
+
+static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
+				     struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
+
+	trace_dfc_qmap(skb->data, skb->len, false);
+	dev_queue_xmit(skb);
+}
+
+static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
+			       struct sk_buff *skb)
+{
+	struct qmap_dfc_ind *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_ind))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_ind *)skb->data;
+
+	if (cmd->tx_info_valid) {
+		memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
+		qmap_tx_ind.tx_status = cmd->tx_info;
+		qmap_tx_ind.bearer_info_valid = 1;
+		qmap_tx_ind.bearer_info_len = 1;
+		qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
+
+		dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
+
+		/* Ignore grant since it is always 0 */
+		goto done;
+	}
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+done:
+	return QMAP_CMD_ACK;
+}
+
+static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
+				      struct sk_buff *skb)
+{
+	struct qmap_dfc_query_resp *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_query_resp))
+		return QMAP_CMD_DONE;
+
+	cmd = (struct qmap_dfc_query_resp *)skb->data;
+
+	if (cmd->invalid)
+		return QMAP_CMD_DONE;
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+	return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
+				    u8 bearer_id, u16 seq_num, u32 tx_id)
+{
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+
+	dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
+	if (!dev)
+		return;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+
+	if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
+		bearer->ack_req = 1;
+		bearer->ack_txid = tx_id;
+	} else {
+		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+}
+
+static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
+					  struct sk_buff *skb)
+{
+	struct qmap_dfc_end_marker_req *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_end_marker_req *)skb->data;
+
+	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
+				ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
+
+	return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_cmd_handler(struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+	struct dfc_qmi_data *dfc;
+	int rc = QMAP_CMD_DONE;
+
+	if (!skb)
+		return;
+
+	trace_dfc_qmap(skb->data, skb->len, true);
+
+	if (skb->len < sizeof(struct qmap_cmd_hdr))
+		goto free_skb;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+	if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
+		goto free_skb;
+
+	if (cmd->cmd_name == QMAP_DFC_QUERY) {
+		if (cmd->cmd_type != QMAP_CMD_ACK)
+			goto free_skb;
+	} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
+		goto free_skb;
+	}
+
+	rcu_read_lock();
+
+	dfc = rcu_dereference(qmap_dfc_data);
+	if (!dfc || READ_ONCE(dfc->restart_state)) {
+		rcu_read_unlock();
+		goto free_skb;
+	}
+
+	switch (cmd->cmd_name) {
+	case QMAP_DFC_IND:
+		rc = dfc_qmap_handle_ind(dfc, skb);
+		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
+		break;
+
+	case QMAP_DFC_QUERY:
+		rc = dfc_qmap_handle_query_resp(dfc, skb);
+		break;
+
+	case QMAP_DFC_END_MARKER:
+		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
+		break;
+
+	default:
+		rc = QMAP_CMD_UNSUPPORTED;
+	}
+
+	/* Send ack */
+	if (rc != QMAP_CMD_DONE) {
+		cmd->cmd_type = rc;
+		if (cmd->cmd_name == QMAP_DFC_IND)
+			dfc_qmap_send_inband_ack(dfc, skb);
+		else
+			dfc_qmap_send_cmd(skb);
+
+		rcu_read_unlock();
+		return;
+	}
+
+	rcu_read_unlock();
+
+free_skb:
+	kfree_skb(skb);
+}
+
+static void dfc_qmap_send_config(struct dfc_qmi_data *data)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_config *dfc_config;
+	unsigned int len = sizeof(struct qmap_dfc_config);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
+	memset(dfc_config, 0, len);
+
+	dfc_config->hdr.cd_bit = 1;
+	dfc_config->hdr.mux_id = 0;
+	dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
+	dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+	dfc_config->cmd_ver = QMAP_DFC_VER;
+	dfc_config->cmd_id = QMAP_DFC_IND;
+	dfc_config->tx_info = 1;
+	dfc_config->ep_type = htonl(data->svc.ep_type);
+	dfc_config->iface_id = htonl(data->svc.iface_id);
+
+	dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_query *dfc_query;
+	unsigned int len = sizeof(struct qmap_dfc_query);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
+	memset(dfc_query, 0, len);
+
+	dfc_query->hdr.cd_bit = 1;
+	dfc_query->hdr.mux_id = mux_id;
+	dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
+	dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+	dfc_query->cmd_ver = QMAP_DFC_VER;
+	dfc_query->bearer_id = bearer_id;
+
+	dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 u8 bearer_id, u16 seq, u32 tx_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_end_marker_cnf *em_cnf;
+	unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
+	memset(em_cnf, 0, len);
+
+	em_cnf->hdr.cd_bit = 1;
+	em_cnf->hdr.mux_id = qos->mux_id;
+	em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
+	em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
+	em_cnf->hdr.tx_id = htonl(tx_id);
+
+	em_cnf->cmd_ver = QMAP_DFC_VER;
+	em_cnf->bearer_id = bearer_id;
+	em_cnf->seq_num = htons(seq);
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = qos->real_dev;
+
+	/* This cmd needs to be sent in-band */
+	trace_dfc_qmap(skb->data, skb->len, false);
+	rmnet_map_tx_qmap_cmd(skb);
+}
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
+{
+	struct rmnet_bearer_map *bearer;
+
+	if (type == DFC_ACK_TYPE_DISABLE) {
+		bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+		if (bearer)
+			dfc_qmap_send_end_marker_cnf(qos, bearer_id,
+						     seq, bearer->ack_txid);
+	} else if (type == DFC_ACK_TYPE_THRESHOLD) {
+		dfc_qmap_send_query(qos->mux_id, bearer_id);
+	}
+}
+
+static struct rmnet_ctl_client_hooks cb = {
+	.ctl_dl_client_hook = dfc_qmap_cmd_handler,
+};
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *data;
+
+	if (!port || !qmi)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->rmnet_port = port;
+	data->index = index;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	qmi->dfc_clients[index] = (void *)data;
+	rcu_assign_pointer(qmap_dfc_data, data);
+
+	atomic_set(&qmap_txid, 0);
+
+	rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
+	if (!rmnet_ctl_handle)
+		pr_err("Failed to register with rmnet ctl\n");
+
+	trace_dfc_client_state_up(data->index, data->svc.instance,
+				  data->svc.ep_type, data->svc.iface_id);
+
+	pr_info("DFC QMAP init\n");
+
+	dfc_qmap_send_config(data);
+
+	return 0;
+}
+
+void dfc_qmap_client_exit(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+	if (!data) {
+		pr_err("%s() data is null\n", __func__);
+		return;
+	}
+
+	trace_dfc_client_state_down(data->index, 0);
+
+	rmnet_ctl_unregister_client(rmnet_ctl_handle);
+
+	WRITE_ONCE(data->restart_state, 1);
+	RCU_INIT_POINTER(qmap_dfc_data, NULL);
+	synchronize_rcu();
+
+	kfree(data);
+
+	pr_info("DFC QMAP exit\n");
+}
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 05a491c..f175881 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -3,26 +3,14 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/rtnetlink.h>
 #include <net/pkt_sched.h>
-#include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
 #include <soc/qcom/qmi_rmnet.h>
+#include "dfc_defs.h"
 
-#include "qmi_rmnet_i.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/dfc.h>
 
-#define DFC_MASK_TCP_BIDIR 0x1
-#define DFC_MASK_RAT_SWITCH 0x2
-#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
-#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
-
-#define DFC_MAX_QOS_ID_V01 2
-
-#define DFC_ACK_TYPE_DISABLE 1
-#define DFC_ACK_TYPE_THRESHOLD 2
-
 struct dfc_qmap_header {
 	u8  pad_len:6;
 	u8  reserved_bit:1;
@@ -47,20 +35,6 @@ struct dfc_ack_cmd {
 	u8  bearer_id;
 } __aligned(1);
 
-struct dfc_qmi_data {
-	void *rmnet_port;
-	struct workqueue_struct *dfc_wq;
-	struct work_struct svc_arrive;
-	struct qmi_handle handle;
-	struct sockaddr_qrtr ssctl;
-	struct svc_info svc;
-	struct work_struct qmi_ind_work;
-	struct list_head qmi_ind_q;
-	spinlock_t qmi_ind_lock;
-	int index;
-	int restart_state;
-};
-
 static void dfc_svc_init(struct work_struct *work);
 
 /* **************************************************** */
@@ -106,28 +80,6 @@ struct dfc_indication_register_resp_msg_v01 {
 	struct qmi_response_type_v01 resp;
 };
 
-enum dfc_ip_type_enum_v01 {
-	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
-	DFC_IPV4_TYPE_V01 = 0x4,
-	DFC_IPV6_TYPE_V01 = 0x6,
-	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
-};
-
-struct dfc_qos_id_type_v01 {
-	u32 qos_id;
-	enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_flow_status_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	u32 num_bytes;
-	u16 seq_num;
-	u8 qos_ids_len;
-	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
-};
-
 static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
 	{
 		.data_type	= QMI_UNSIGNED_4_BYTE,
@@ -241,13 +193,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
 	},
 };
 
-struct dfc_ancillary_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	u32 reserved;
-};
-
 static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
 	{
 		.data_type	= QMI_UNSIGNED_1_BYTE,
@@ -300,31 +245,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
 	},
 };
 
-struct dfc_flow_status_ind_msg_v01 {
-	u8 flow_status_valid;
-	u8 flow_status_len;
-	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
-	u8 eod_ack_reqd_valid;
-	u8 eod_ack_reqd;
-	u8 ancillary_info_valid;
-	u8 ancillary_info_len;
-	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
-};
-
-struct dfc_bearer_info_type_v01 {
-	u8 subs_id;
-	u8 mux_id;
-	u8 bearer_id;
-	enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_tx_link_status_ind_msg_v01 {
-	u8 tx_status;
-	u8 bearer_info_valid;
-	u8 bearer_info_len;
-	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
-};
-
 struct dfc_get_flow_status_req_msg_v01 {
 	u8 bearer_id_list_valid;
 	u8 bearer_id_list_len;
@@ -954,6 +874,11 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
 	if (!qos)
 		return;
 
+	if (dfc_qmap) {
+		dfc_qmap_send_ack(qos, bearer_id, seq, type);
+		return;
+	}
+
 	skb = alloc_skb(data_size, GFP_ATOMIC);
 	if (!skb)
 		return;
@@ -1083,6 +1008,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
 			action = true;
 
+		/* This is needed by qmap */
+		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
+			dfc_qmap_send_ack(qos, itm->bearer_id,
+					  itm->seq, DFC_ACK_TYPE_DISABLE);
+
 		itm->grant_size = fc_info->num_bytes;
 		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
 		itm->seq = fc_info->seq_num;
@@ -1099,10 +1029,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 	return rc;
 }
 
-static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
-				      struct dfc_svc_ind *svc_ind)
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind)
 {
-	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
 	struct net_device *dev;
 	struct qos_info *qos;
 	struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1176,13 +1105,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
 	if (!itm)
 		return;
 
+	/* If no change in tx status, ignore */
+	if (itm->tx_off == !tx_status)
+		return;
+
 	if (itm->grant_size && !tx_status) {
 		itm->grant_size = 0;
 		itm->tcp_bidir = false;
 		dfc_bearer_flow_ctl(dev, itm, qos);
 	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
 		itm->grant_size = DEFAULT_GRANT;
-		itm->grant_thresh = DEFAULT_GRANT;
+		itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
 		itm->seq = 0;
 		itm->ack_req = 0;
 		dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1191,10 +1124,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
 	itm->tx_off = !tx_status;
 }
 
-static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
-					  struct dfc_svc_ind *svc_ind)
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind)
 {
-	struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
 	struct net_device *dev;
 	struct qos_info *qos;
 	struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1256,10 +1188,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)
 
 		if (!dfc->restart_state) {
 			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
-				dfc_do_burst_flow_control(dfc, svc_ind);
+				dfc_do_burst_flow_control(
+						dfc, &svc_ind->d.dfc_info);
 			else if (svc_ind->msg_id ==
 					QMI_DFC_TX_LINK_STATUS_IND_V01)
-				dfc_handle_tx_link_status_ind(dfc, svc_ind);
+				dfc_handle_tx_link_status_ind(
+						dfc, &svc_ind->d.tx_status);
 		}
 		kfree(svc_ind);
 	} while (1);
@@ -1583,7 +1517,7 @@ void dfc_qmi_query_flow(void *dfc_data)
 	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
 	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
 		sizeof(resp->flow_status[0]) * resp->flow_status_len);
-	dfc_do_burst_flow_control(data, svc_ind);
+	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);
 
 done:
 	kfree(svc_ind);
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 721d80d..678efeb 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -187,6 +187,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 
 		nb->nb.notifier_call = glink_ssr_ssr_cb;
 		nb->nb.priority = GLINK_SSR_PRIORITY;
+		nb->ssr = ssr;
 
 		handle = subsys_notif_register_notifier(nb->ssr_label, &nb->nb);
 		if (IS_ERR_OR_NULL(handle)) {
@@ -195,7 +196,6 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
 			continue;
 		}
 
-		nb->ssr = ssr;
 		nb->ssr_register_handle = handle;
 		list_add_tail(&nb->list, &ssr->notify_list);
 	}
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 4b0785a..82f0717 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -45,6 +45,8 @@ enum memory_states {
 	MAX_STATE,
 };
 
+static enum memory_states *mem_sec_state;
+
 static struct mem_offline_mailbox {
 	struct mbox_client cl;
 	struct mbox_chan *mbox;
@@ -134,6 +136,148 @@ static int aop_send_msg(unsigned long addr, bool online)
 	return (mbox_send_message(mailbox.mbox, &pkt) < 0);
 }
 
+/*
+ * When offline_granule >= memory block size, this returns the number of
+ * sections in a offlineable segment.
+ * When offline_granule < memory block size, returns the sections_per_block.
+ */
+static unsigned long get_rounded_sections_per_segment(void)
+{
+
+	return max(((offline_granule * SZ_1M) / memory_block_size_bytes()) *
+		     sections_per_block,
+		     (unsigned long)sections_per_block);
+}
+
+static int send_msg(struct memory_notify *mn, bool online, int count)
+{
+	unsigned long segment_size = offline_granule * SZ_1M;
+	unsigned long start, base_sec_nr, sec_nr, sections_per_segment;
+	int ret, idx, i;
+
+	sections_per_segment = get_rounded_sections_per_segment();
+	sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn));
+	idx = (sec_nr - start_section_nr) / sections_per_segment;
+	base_sec_nr = start_section_nr + (idx * sections_per_segment);
+	start = section_nr_to_pfn(base_sec_nr);
+
+	for (i = 0; i < count; ++i) {
+		ret = aop_send_msg(__pfn_to_phys(start), online);
+		if (ret) {
+			pr_err("PASR: AOP %s request addr:0x%llx failed\n",
+			       online ? "online" : "offline",
+			       __pfn_to_phys(start));
+			goto undo;
+		}
+
+		start = __phys_to_pfn(__pfn_to_phys(start) + segment_size);
+	}
+
+	return 0;
+undo:
+	start = section_nr_to_pfn(base_sec_nr);
+	while (i-- > 0) {
+		int ret;
+
+		ret = aop_send_msg(__pfn_to_phys(start), !online);
+		if (ret)
+			panic("Failed to completely online/offline a hotpluggable segment. A quasi state of memblock can cause randomn system failures.");
+		start = __phys_to_pfn(__pfn_to_phys(start) + segment_size);
+	}
+
+	return ret;
+}
+
+static bool need_to_send_remote_request(struct memory_notify *mn,
+				    enum memory_states request)
+{
+	int i, idx, cur_idx;
+	int base_sec_nr, sec_nr;
+	unsigned long sections_per_segment;
+
+	sections_per_segment = get_rounded_sections_per_segment();
+	sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn));
+	idx = (sec_nr - start_section_nr) / sections_per_segment;
+	cur_idx = (sec_nr - start_section_nr) / sections_per_block;
+	base_sec_nr = start_section_nr + (idx * sections_per_segment);
+
+	/*
+	 * For MEM_OFFLINE, don't send the request if there are other online
+	 * blocks in the segment.
+	 * For MEM_ONLINE, don't send the request if there is already one
+	 * online block in the segment.
+	 */
+	if (request == MEMORY_OFFLINE || request == MEMORY_ONLINE) {
+		for (i = base_sec_nr;
+		     i < (base_sec_nr + sections_per_segment);
+		     i += sections_per_block) {
+			idx = (i - start_section_nr) / sections_per_block;
+			/* current operating block */
+			if (idx == cur_idx)
+				continue;
+			if (mem_sec_state[idx] == MEMORY_ONLINE)
+				goto out;
+		}
+		return true;
+	}
+out:
+	return false;
+}
+
+/*
+ * This returns the number of hotpluggable segments in a memory block.
+ */
+static int get_num_memblock_hotplug_segments(void)
+{
+	unsigned long segment_size = offline_granule * SZ_1M;
+	unsigned long block_size = memory_block_size_bytes();
+
+	if (segment_size < block_size) {
+		if (block_size % segment_size) {
+			pr_warn("PASR is unusable. Offline granule size should be in multiples for memory_block_size_bytes.\n");
+			return 0;
+		}
+		return block_size / segment_size;
+	}
+
+	return 1;
+}
+
+static int mem_change_refresh_state(struct memory_notify *mn,
+				    enum memory_states state)
+{
+	int start = SECTION_ALIGN_DOWN(mn->start_pfn);
+	unsigned long sec_nr = pfn_to_section_nr(start);
+	bool online = (state == MEMORY_ONLINE) ? true : false;
+	unsigned long idx = (sec_nr - start_section_nr) / sections_per_block;
+	int ret, count;
+
+	if (mem_sec_state[idx] == state) {
+		/* we shouldn't be getting this request */
+		pr_warn("mem-offline: state of mem%d block already in %s state. Ignoring refresh state change request\n",
+				sec_nr, online ? "online" : "offline");
+		return 0;
+	}
+
+	count = get_num_memblock_hotplug_segments();
+	if (!count)
+		return -EINVAL;
+
+	if (!need_to_send_remote_request(mn, state))
+		goto out;
+
+	ret = send_msg(mn, online, count);
+	if (ret) {
+		/* online failures are critical failures */
+		if (online)
+			BUG_ON(IS_ENABLED(CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL));
+		return -EINVAL;
+	}
+out:
+	mem_sec_state[idx] = state;
+	return 0;
+}
+
 static int mem_event_callback(struct notifier_block *self,
 				unsigned long action, void *arg)
 {
@@ -173,9 +317,9 @@ static int mem_event_callback(struct notifier_block *self,
 			   idx) / sections_per_block].fail_count;
 		cur = ktime_get();
 
-		if (aop_send_msg(__pfn_to_phys(start), true))
-			pr_err("PASR: AOP online request addr:0x%llx failed\n",
-			       __pfn_to_phys(start));
+		if (mem_change_refresh_state(mn, MEMORY_ONLINE))
+			return NOTIFY_BAD;
+
 		if (!debug_pagealloc_enabled()) {
 			/* Create kernel page-tables */
 			create_pgtable_mapping(start_addr, end_addr);
@@ -201,9 +345,11 @@ static int mem_event_callback(struct notifier_block *self,
 			/* Clear kernel page-tables */
 			clear_pgtable_mapping(start_addr, end_addr);
 		}
-		if (aop_send_msg(__pfn_to_phys(start), false))
-			pr_err("PASR: AOP offline request addr:0x%llx failed\n",
-			       __pfn_to_phys(start));
+		mem_change_refresh_state(mn, MEMORY_OFFLINE);
+		/*
+		 * Notifying that something went bad at this stage won't
+		 * help since this is the last stage of memory hotplug.
+		 */
 
 		delay = ktime_ms_delta(ktime_get(), cur);
 		record_stat(sec_nr, delay, MEMORY_OFFLINE);
@@ -214,9 +360,7 @@ static int mem_event_callback(struct notifier_block *self,
 	case MEM_CANCEL_ONLINE:
 		pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%llx end = 0x%llx\n",
 				start_addr, end_addr);
-		if (aop_send_msg(__pfn_to_phys(start), false))
-			pr_err("PASR: AOP online request addr:0x%llx failed\n",
-			       __pfn_to_phys(start));
+		mem_change_refresh_state(mn, MEMORY_OFFLINE);
 		break;
 	default:
 		break;
@@ -348,9 +492,6 @@ static struct attribute_group mem_attr_group = {
 
 static int mem_sysfs_init(void)
 {
-	unsigned int total_blks = (end_section_nr - start_section_nr + 1) /
-							sections_per_block;
-
 	if (start_section_nr == end_section_nr)
 		return -EINVAL;
 
@@ -361,11 +502,6 @@ static int mem_sysfs_init(void)
 	if (sysfs_create_group(kobj, &mem_attr_group))
 		kobject_put(kobj);
 
-	mem_info = kzalloc(sizeof(*mem_info) * total_blks * MAX_STATE,
-								GFP_KERNEL);
-	if (!mem_info)
-		return -ENOMEM;
-
 	return 0;
 }
 
@@ -384,8 +520,9 @@ static int mem_parse_dt(struct platform_device *pdev)
 		return -EINVAL;
 	}
 	offline_granule = be32_to_cpup(val);
-	if (!offline_granule && !(offline_granule & (offline_granule - 1)) &&
-			offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) {
+	if (!offline_granule || (offline_granule & (offline_granule - 1)) ||
+	    ((offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) &&
+	     (MIN_MEMORY_BLOCK_SIZE % (offline_granule * SZ_1M)))) {
 		pr_err("mem-offine: invalid granule property\n");
 		return -EINVAL;
 	}
@@ -413,7 +550,8 @@ static struct notifier_block hotplug_memory_callback_nb = {
 
 static int mem_offline_driver_probe(struct platform_device *pdev)
 {
-	int ret;
+	unsigned int total_blks;
+	int ret, i;
 
 	ret = mem_parse_dt(pdev);
 	if (ret)
@@ -426,16 +564,46 @@ static int mem_offline_driver_probe(struct platform_device *pdev)
 	if (ret > 0)
 		pr_err("mem-offline: !!ERROR!! Auto onlining some memory blocks failed. System could run with less RAM\n");
 
-	if (mem_sysfs_init())
-		return -ENODEV;
+	total_blks = (end_section_nr - start_section_nr + 1) /
+			sections_per_block;
+	mem_info = kcalloc(total_blks * MAX_STATE, sizeof(*mem_info),
+			   GFP_KERNEL);
+	if (!mem_info)
+		return -ENOMEM;
+
+	mem_sec_state = kcalloc(total_blks, sizeof(*mem_sec_state), GFP_KERNEL);
+	if (!mem_sec_state) {
+		ret = -ENOMEM;
+		goto err_free_mem_info;
+	}
+
+	/* we assume that hardware state of mem blocks are online after boot */
+	for (i = 0; i < total_blks; i++)
+		mem_sec_state[i] = MEMORY_ONLINE;
+
+	if (mem_sysfs_init()) {
+		ret = -ENODEV;
+		goto err_free_mem_sec_state;
+	}
 
 	if (register_hotmemory_notifier(&hotplug_memory_callback_nb)) {
 		pr_err("mem-offline: Registering memory hotplug notifier failed\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_sysfs_remove_group;
 	}
 	pr_info("mem-offline: Added memory blocks ranging from mem%lu - mem%lu\n",
 			start_section_nr, end_section_nr);
+
 	return 0;
+
+err_sysfs_remove_group:
+	sysfs_remove_group(kobj, &mem_attr_group);
+	kobject_put(kobj);
+err_free_mem_sec_state:
+	kfree(mem_sec_state);
+err_free_mem_info:
+	kfree(mem_info);
+	return ret;
 }
 
 static const struct of_device_id mem_offline_match_table[] = {
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 91c99f2..46732bf 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -23,11 +23,15 @@
 
 #define FLAG_DFC_MASK 0x000F
 #define FLAG_POWERSAVE_MASK 0x0010
+#define FLAG_QMAP_MASK 0x0020
+
 #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
 #define DFC_SUPPORTED_MODE(m) \
 	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
 
 int dfc_mode;
+int dfc_qmap;
 #define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
 
 unsigned int rmnet_wq_frequency __read_mostly = 1000;
@@ -82,7 +86,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
 {
 	int i;
 
-	if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+	if (!qmi)
 		return NULL;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -379,18 +383,12 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
 	int i;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		if (qmi->dfc_clients[i])
+		if (qmi->dfc_clients[i] && !dfc_qmap)
 			dfc_qmi_query_flow(qmi->dfc_clients[i]);
 	}
 }
 
 #else
-static inline void
-qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
-			   struct rmnet_flow_map *itm, int add_flow)
-{
-}
-
 static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
 {
 }
@@ -423,7 +421,7 @@ static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
 static int
 qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
-	int idx, rc, err = 0;
+	int idx, err = 0;
 	struct svc_info svc;
 
 	ASSERT_RTNL();
@@ -447,18 +445,17 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 	svc.ep_type = tcm->tcm_info;
 	svc.iface_id = tcm->tcm_parent;
 
-	if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
+	if (DFC_SUPPORTED_MODE(dfc_mode) &&
 	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
-		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
-		if (rc < 0)
-			err = rc;
+		if (dfc_qmap)
+			err = dfc_qmap_client_init(port, idx, &svc, qmi);
+		else
+			err = dfc_qmi_client_init(port, idx, &svc, qmi);
 	}
 
 	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
 	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
-		rc = wda_qmi_client_init(port, &svc, qmi);
-		if (rc < 0)
-			err = rc;
+		err = wda_qmi_client_init(port, &svc, qmi);
 	}
 
 	return err;
@@ -477,7 +474,10 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
 		data = qmi->dfc_pending[idx];
 
 	if (data) {
-		dfc_qmi_client_exit(data);
+		if (dfc_qmap)
+			dfc_qmap_client_exit(data);
+		else
+			dfc_qmi_client_exit(data);
 		qmi->dfc_clients[idx] = NULL;
 		qmi->dfc_pending[idx] = NULL;
 	}
@@ -524,20 +524,22 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
 
 	switch (tcm->tcm_family) {
 	case NLMSG_FLOW_ACTIVATE:
-		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
 		    !qmi_rmnet_has_dfc_client(qmi))
 			return;
 
 		qmi_rmnet_add_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_FLOW_DEACTIVATE:
-		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
 			return;
 
 		qmi_rmnet_del_flow(dev, tcm, qmi);
 		break;
 	case NLMSG_CLIENT_SETUP:
 		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
+		dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
+
 		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
 		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
 			return;
@@ -628,7 +630,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
 			continue;
 		do_wake = !bearer->grant_size;
 		bearer->grant_size = DEFAULT_GRANT;
-		bearer->grant_thresh = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
 		bearer->seq = 0;
 		bearer->ack_req = 0;
 		bearer->tcp_bidir = false;
@@ -795,7 +797,7 @@ void qmi_rmnet_ps_on_notify(void *port)
 {
 	struct qmi_rmnet_ps_ind *tmp;
 
-	list_for_each_entry(tmp, &ps_list, list)
+	list_for_each_entry_rcu(tmp, &ps_list, list)
 		tmp->ps_on_handler(port);
 }
 EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
@@ -804,8 +806,9 @@ void qmi_rmnet_ps_off_notify(void *port)
 {
 	struct qmi_rmnet_ps_ind *tmp;
 
-	list_for_each_entry(tmp, &ps_list, list)
+	list_for_each_entry_rcu(tmp, &ps_list, list)
 		tmp->ps_off_handler(port);
+
 }
 EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
 
@@ -831,13 +834,12 @@ int qmi_rmnet_ps_ind_deregister(void *port,
 	if (!port || !ps_ind)
 		return -EINVAL;
 
-	list_for_each_entry(tmp, &ps_list, list) {
+	list_for_each_entry_rcu(tmp, &ps_list, list) {
 		if (tmp == ps_ind) {
 			list_del_rcu(&ps_ind->list);
 			goto done;
 		}
 	}
-
 done:
 	return 0;
 }
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 1466822..15dee7c 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -9,9 +9,6 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 
-#define IP_VER_4 4
-#define IP_VER_6 6
-
 #define MAX_MQ_NUM 10
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
@@ -21,6 +18,7 @@
 #define DFC_MODE_FLOW_ID 2
 #define DFC_MODE_MQ_NUM 3
 extern int dfc_mode;
+extern int dfc_qmap;
 
 struct rmnet_bearer_map {
 	struct list_head list;
@@ -35,6 +33,7 @@ struct rmnet_bearer_map {
 	bool tcp_bidir;
 	bool rat_switch;
 	bool tx_off;
+	u32 ack_txid;
 };
 
 struct rmnet_flow_map {
@@ -125,6 +124,13 @@ void dfc_qmi_query_flow(void *dfc_data);
 int dfc_bearer_flow_ctl(struct net_device *dev,
 			struct rmnet_bearer_map *bearer,
 			struct qos_info *qos);
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi);
+
+void dfc_qmap_client_exit(void *dfc_data);
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -150,17 +156,6 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
 {
 }
 
-static inline void
-dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
-		    int ip_type, u32 mark, unsigned int len)
-{
-}
-
-static inline void
-dfc_qmi_query_flow(void *dfc_data)
-{
-}
-
 static inline int
 dfc_bearer_flow_ctl(struct net_device *dev,
 		    struct rmnet_bearer_map *bearer,
@@ -168,6 +163,17 @@ dfc_bearer_flow_ctl(struct net_device *dev,
 {
 	return 0;
 }
+
+static inline int
+dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+		     struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void dfc_qmap_client_exit(void *dfc_data)
+{
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
diff --git a/drivers/soc/qcom/rmnet_ctl/Kconfig b/drivers/soc/qcom/rmnet_ctl/Kconfig
new file mode 100644
index 0000000..bfb91fbd
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RMNET CTL driver
+#
+
+menuconfig RMNET_CTL
+	tristate "RmNet Control driver"
+	depends on MHI_BUS
+	help
+	  Enable the RMNET CTL module which is used for communicating with
+	  device via map command protocol. This module will receive QMAP
+	  control commands via MHI.
diff --git a/drivers/soc/qcom/rmnet_ctl/Makefile b/drivers/soc/qcom/rmnet_ctl/Makefile
new file mode 100644
index 0000000..bf798da
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RMNET CTL module
+#
+
+rmnet_ctl-y		 += rmnet_ctl_client.o
+rmnet_ctl-y		 += rmnet_ctl_mhi.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl.o
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
new file mode 100644
index 0000000..299b301
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#include <soc/qcom/rmnet_ctl.h>
+#include "rmnet_ctl_client.h"
+
+struct rmnet_ctl_client {
+	struct rmnet_ctl_client_hooks hooks;
+};
+
+struct rmnet_ctl_endpoint {
+	struct rmnet_ctl_dev __rcu *dev;
+	struct rmnet_ctl_client __rcu *client;
+};
+
+static DEFINE_SPINLOCK(client_lock);
+static struct rmnet_ctl_endpoint ctl_ep;
+
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
+{
+	rcu_assign_pointer(ctl_ep.dev, dev);
+}
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len)
+{
+	struct rmnet_ctl_client *client;
+	struct sk_buff *skb;
+
+	if (unlikely(!data || !len))
+		return;
+
+	rcu_read_lock();
+
+	client = rcu_dereference(ctl_ep.client);
+
+	if (client && client->hooks.ctl_dl_client_hook) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (skb) {
+			skb_put_data(skb, data, len);
+			skb->protocol = htons(ETH_P_MAP);
+			client->hooks.ctl_dl_client_hook(skb);
+		}
+	}
+
+	rcu_read_unlock();
+}
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
+{
+	struct rmnet_ctl_client *client;
+
+	if (!hook)
+		return NULL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+	client->hooks = *hook;
+
+	spin_lock(&client_lock);
+
+	/* Only support one client for now */
+	if (rcu_dereference(ctl_ep.client)) {
+		spin_unlock(&client_lock);
+		kfree(client);
+		return NULL;
+	}
+
+	rcu_assign_pointer(ctl_ep.client, client);
+
+	spin_unlock(&client_lock);
+
+	return client;
+}
+EXPORT_SYMBOL(rmnet_ctl_register_client);
+
+int rmnet_ctl_unregister_client(void *handle)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+
+	spin_lock(&client_lock);
+
+	if (rcu_dereference(ctl_ep.client) != client) {
+		spin_unlock(&client_lock);
+		return -EINVAL;
+	}
+
+	RCU_INIT_POINTER(ctl_ep.client, NULL);
+
+	spin_unlock(&client_lock);
+
+	synchronize_rcu();
+	kfree(client);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_ctl_unregister_client);
+
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+	struct rmnet_ctl_dev *dev;
+	int rc = -EINVAL;
+
+	if (client != rcu_dereference(ctl_ep.client))
+		return rc;
+
+	rcu_read_lock();
+
+	dev = rcu_dereference(ctl_ep.dev);
+	if (dev && dev->xmit)
+		rc = dev->xmit(dev, skb);
+
+	rcu_read_unlock();
+
+	return rc;
+}
+EXPORT_SYMBOL(rmnet_ctl_send_client);
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
new file mode 100644
index 0000000..6362581
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#ifndef _RMNET_CTL_CLIENT_H_
+#define _RMNET_CTL_CLIENT_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_stats {
+	u64 rx_pkts;
+	u64 rx_err;
+	u64 tx_pkts;
+	u64 tx_err;
+	u64 tx_complete;
+};
+
+struct rmnet_ctl_dev {
+	int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
+	struct rmnet_ctl_stats stats;
+};
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len);
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
+
+#endif /* _RMNET_CTL_CLIENT_H_ */
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
new file mode 100644
index 0000000..af84e13
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL mhi handler
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/mhi.h>
+#include "rmnet_ctl_client.h"
+
+#define RMNET_CTL_DEFAULT_MRU 1024
+
+struct rmnet_ctl_mhi_dev {
+	struct mhi_device *mhi_dev;
+	struct rmnet_ctl_dev dev;
+	u32 mru;
+	spinlock_t rx_lock; /* rx lock */
+	spinlock_t tx_lock; /* tx lock */
+	atomic_t in_reset;
+};
+
+static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
+				dev, struct rmnet_ctl_mhi_dev, dev);
+	int rc;
+
+	spin_lock_bh(&ctl_dev->tx_lock);
+
+	rc = mhi_queue_transfer(ctl_dev->mhi_dev,
+				DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+	if (rc)
+		dev->stats.tx_err++;
+	else
+		dev->stats.tx_pkts++;
+
+	spin_unlock_bh(&ctl_dev->tx_lock);
+
+	return rc;
+}
+
+static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
+				    gfp_t gfp, void *free_buf)
+{
+	struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
+	void *buf;
+	int no_tre, i, rc;
+
+	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+	for (i = 0; i < no_tre; i++) {
+		if (free_buf) {
+			buf = free_buf;
+			free_buf = NULL;
+		} else {
+			buf = kmalloc(ctl_dev->mru, gfp);
+		}
+
+		if (!buf)
+			return;
+
+		spin_lock_bh(&ctl_dev->rx_lock);
+		rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+					buf, ctl_dev->mru, MHI_EOT);
+		spin_unlock_bh(&ctl_dev->rx_lock);
+
+		if (rc) {
+			kfree(buf);
+			return;
+		}
+	}
+}
+
+static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_res->transaction_status || !mhi_res->buf_addr) {
+		ctl_dev->dev.stats.rx_err++;
+	} else {
+		ctl_dev->dev.stats.rx_pkts++;
+		rmnet_ctl_endpoint_post(mhi_res->buf_addr,
+					mhi_res->bytes_xferd);
+	}
+
+	/* Re-supply receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
+}
+
+static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+	struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
+
+	if (skb) {
+		ctl_dev->dev.stats.tx_complete++;
+		kfree_skb(skb);
+	}
+}
+
+static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
+				      enum MHI_CB mhi_cb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_cb != MHI_CB_FATAL_ERROR)
+		return;
+
+	atomic_inc(&ctl_dev->in_reset);
+}
+
+static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
+			   const struct mhi_device_id *id)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev;
+	struct device_node *of_node = mhi_dev->dev.of_node;
+	int rc;
+
+	ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
+	if (!ctl_dev)
+		return -ENOMEM;
+
+	ctl_dev->mhi_dev = mhi_dev;
+	ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
+
+	spin_lock_init(&ctl_dev->rx_lock);
+	spin_lock_init(&ctl_dev->tx_lock);
+	atomic_set(&ctl_dev->in_reset, 0);
+	dev_set_drvdata(&mhi_dev->dev, ctl_dev);
+
+	rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
+	if (rc || !ctl_dev->mru)
+		ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
+
+	rc = mhi_prepare_for_transfer(mhi_dev);
+	if (rc) {
+		pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	/* Post receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
+
+	rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
+
+	pr_info("rmnet_ctl driver probed\n");
+
+	return 0;
+}
+
+static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
+{
+	rmnet_ctl_endpoint_setdev(NULL);
+	synchronize_rcu();
+	dev_set_drvdata(&mhi_dev->dev, NULL);
+
+	pr_info("rmnet_ctl driver removed\n");
+}
+
+static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
+	{ .chan = "RMNET_CTL" },
+	{}
+};
+
+static struct mhi_driver rmnet_ctl_driver = {
+	.probe = rmnet_ctl_probe,
+	.remove = rmnet_ctl_remove,
+	.dl_xfer_cb = rmnet_ctl_dl_callback,
+	.ul_xfer_cb = rmnet_ctl_ul_callback,
+	.status_cb = rmnet_ctl_status_callback,
+	.id_table = rmnet_ctl_mhi_match,
+	.driver = {
+		.name = "rmnet_ctl",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_driver(rmnet_ctl_driver,
+	      mhi_driver_register, mhi_driver_unregister);
+
+MODULE_DESCRIPTION("RmNet Control Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index d1be4a9..f735395 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -103,6 +103,8 @@
 #define MEM_RGN_SRVR_ID 1
 #define MEM_MAP_SRVR_ID 2
 #define CBOBJ_SERVER_ID_START 0x10
+/* local obj id is represented by 15 bits */
+#define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
 /* CBOBJs will be served by server id 0x10 onwards */
 #define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
 #define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
@@ -294,6 +296,9 @@ static  struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
 
 static uint32_t next_mem_region_obj_id_locked(void)
 {
+	if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
+		g_last_mem_rgn_id = 0;
+
 	while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
 		;
 
@@ -302,6 +307,9 @@ static uint32_t next_mem_region_obj_id_locked(void)
 
 static uint32_t next_mem_map_obj_id_locked(void)
 {
+	if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
+		g_last_mem_map_obj_id = 0;
+
 	while (find_mem_obj_locked(++g_last_mem_map_obj_id,
 					SMCINVOKE_MEM_MAP_OBJ))
 		;
@@ -842,8 +850,10 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 
 	cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
 	if (!cb_req) {
-		ret =  OBJECT_ERROR_KMEM;
-		goto out;
+		/* we need to return error to caller so fill up result */
+		cb_req = buf;
+		cb_req->result = OBJECT_ERROR_KMEM;
+		return;
 	}
 
 	/* check whether it is to be served by kernel or userspace */
@@ -901,9 +911,11 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
 			release_tzhandle_locked(cb_req->hdr.tzhandle);
 		}
 	}
-	hash_del(&cb_txn->hash);
-	memcpy(buf, cb_req, buf_len);
-	kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+	if (cb_txn) {
+		hash_del(&cb_txn->hash);
+		memcpy(buf, cb_req, buf_len);
+		kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+	}
 	mutex_unlock(&g_smcinvoke_lock);
 }
 
@@ -1523,34 +1535,26 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
 	int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
 	bool tz_acked = false;
 
-	if (_IOC_SIZE(cmd) != sizeof(req)) {
-		ret =  -EINVAL;
-		goto out;
-	}
-	if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
-		ret = -EPERM;
-		goto out;
-	}
+	if (_IOC_SIZE(cmd) != sizeof(req))
+		return -EINVAL;
+
+	if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ)
+		return -EPERM;
+
 	ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
-	if (ret) {
-		ret =  -EFAULT;
-		goto out;
-	}
+	if (ret)
+		return -EFAULT;
+
+	if (req.argsize != sizeof(union smcinvoke_arg))
+		return -EINVAL;
 
 	nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
 			OBJECT_COUNTS_NUM_objects(req.counts);
 
-	if (req.argsize != sizeof(union smcinvoke_arg)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	if (nr_args) {
 		args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
-		if (!args_buf) {
-			ret = -ENOMEM;
-			goto out;
-		}
+		if (!args_buf)
+			return -ENOMEM;
 
 		ret = copy_from_user(args_buf, u64_to_user_ptr(req.args),
 					nr_args * req.argsize);
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index 9472d4f..286feb5 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -200,7 +200,7 @@ static ssize_t cmac_buf_show(struct device *dev,
 		return -EINVAL;
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+	ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
 		cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
 
 	return ret;
@@ -274,7 +274,7 @@ static ssize_t pbl_cmac_show(struct device *dev,
 		return -EINVAL;
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+	ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
 	    pbl_cmac_buf[0], pbl_cmac_buf[1], pbl_cmac_buf[2], pbl_cmac_buf[3]);
 
 	return ret;
@@ -393,7 +393,7 @@ static long spss_utils_ioctl(struct file *file,
 		}
 
 		memcpy(cmac_buf, data, sizeof(cmac_buf));
-		pr_info("cmac_buf: 0x%x,0x%x,0x%x,0x%x\n",
+		pr_info("saved fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
 			cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
 
 		/*
@@ -402,13 +402,17 @@ static long spss_utils_ioctl(struct file *file,
 		 * therefore read the spu pbl fw cmac from ioctl.
 		 * The callback shall be called on spss SSR.
 		 */
-		pr_info("read pbl cmac from shared memory\n");
+		pr_debug("read pbl cmac from shared memory\n");
 		spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
 		spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
 		if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
 			is_pbl_ce = true; /* cmacs not the same */
 		else
 			is_pbl_ce = false;
+
+		pr_info("calc fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
+			pbl_cmac_buf[0], pbl_cmac_buf[1],
+			pbl_cmac_buf[2], pbl_cmac_buf[3]);
 		break;
 
 	default:
@@ -675,11 +679,12 @@ static int spss_parse_dt(struct device_node *node)
 		return -EFAULT;
 	}
 
-	pr_info("pil_addr [0x%x].\n", pil_addr);
-	pr_info("pil_size [0x%x].\n", pil_size);
+	pr_debug("pil_addr [0x%08x].\n", pil_addr);
+	pr_debug("pil_size [0x%08x].\n", pil_size);
 
 	/* cmac buffer after spss firmware end */
 	cmac_mem_addr = pil_addr + pil_size;
+	pr_info("iar_buf_addr [0x%08x].\n", cmac_mem_addr);
 
 	ret = of_property_read_u32(node, "qcom,spss-fuse3-addr",
 		&spss_fuse3_addr);
@@ -705,7 +710,7 @@ static int spss_parse_dt(struct device_node *node)
 	/* read IAR_FEATURE_ENABLED from soc fuse */
 	val1 = readl_relaxed(spss_fuse3_reg);
 	spss_fuse3_mask = (1<<spss_fuse3_bit);
-	pr_info("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+	pr_debug("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
 		spss_fuse3_addr, val1, spss_fuse3_mask);
 	if (val1 & spss_fuse3_mask)
 		is_iar_enabled = true;
@@ -737,7 +742,7 @@ static int spss_parse_dt(struct device_node *node)
 
 	val1 = readl_relaxed(spss_fuse4_reg);
 	spss_fuse4_mask = (0x07 << spss_fuse4_bit); /* 3 bits */
-	pr_info("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+	pr_debug("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
 	spss_fuse4_addr, val1, spss_fuse4_mask);
 	val1 = ((val1 & spss_fuse4_mask) >> spss_fuse4_bit) & 0x07;
 
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index b1ab461..5a4c1ab 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -37,6 +37,7 @@
 #define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH	40
 #define QMI_SSCTL_RESP_MSG_LENGTH		7
 #define QMI_SSCTL_EMPTY_MSG_LENGTH		0
+#define QMI_SSCTL_MAX_MSG_LENGTH		90
 
 #define SSCTL_SERVICE_ID			0x2B
 #define SSCTL_VER_2				2
@@ -532,11 +533,10 @@ static struct qmi_elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
  */
 int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
 {
-	struct qmi_ssctl_get_failure_reason_resp_msg resp;
+	struct qmi_ssctl_get_failure_reason_resp_msg resp = { { 0 } };
 	struct sysmon_qmi_data *data = NULL, *temp;
 	struct qmi_txn txn;
 	const char *dest_ss = dest_desc->name;
-	const char expect[] = "ssr:return:";
 	char req = 0;
 	int ret;
 
@@ -601,12 +601,8 @@ int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
 		goto out;
 	}
 
-	if (!strcmp(resp.error_message, expect)) {
-		pr_err("Unexpected response %s\n", resp.error_message);
-		ret = -EPROTO;
-		goto out;
-	}
 	strlcpy(buf, resp.error_message, resp.error_message_len);
+	return 0;
 out:
 	return ret;
 }
@@ -643,7 +639,7 @@ int sysmon_notifier_register(struct subsys_desc *desc)
 	}
 
 	rc = qmi_handle_init(&data->clnt_handle,
-			QMI_SSCTL_RESP_MSG_LENGTH, &ssctl_ops,
+			QMI_SSCTL_MAX_MSG_LENGTH, &ssctl_ops,
 			qmi_indication_handler);
 	if (rc < 0) {
 		pr_err("Sysmon QMI handle init failed rc:%d\n", rc);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 1c85745..c7d45dc 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1185,6 +1185,28 @@ static const struct file_operations ion_fops = {
 #endif
 };
 
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+
+	if (heap->debug_show)
+		heap->debug_show(heap, s, unused);
+
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 static int debug_shrink_set(void *data, u64 val)
 {
 	struct ion_heap *heap = data;
@@ -1222,6 +1244,7 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
+	char debug_name[64], buf[256];
 	int ret;
 
 	if (!heap->ops->allocate || !heap->ops->free)
@@ -1249,12 +1272,22 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 	plist_node_init(&heap->node, -heap->id);
 	plist_add(&heap->node, &dev->heaps);
 
-	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
-		char debug_name[64];
+	if (heap->debug_show) {
+		snprintf(debug_name, 64, "%s_stats", heap->name);
+		if (!debugfs_create_file(debug_name, 0664, dev->debug_root,
+					 heap, &debug_heap_fops))
+			pr_err("Failed to create heap debugfs at %s/%s\n",
+			       dentry_path(dev->debug_root, buf, 256),
+			       debug_name);
+	}
 
+	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
-		debugfs_create_file(debug_name, 0644, dev->debug_root,
-				    heap, &debug_shrink_fops);
+		if (!debugfs_create_file(debug_name, 0644, dev->debug_root,
+					 heap, &debug_shrink_fops))
+			pr_err("Failed to create heap debugfs at %s/%s\n",
+			       dentry_path(dev->debug_root, buf, 256),
+			       debug_name);
 	}
 
 	dev->heap_cnt++;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index ed0898f..63e9218 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -49,6 +49,8 @@ static bool pool_refill_ok(struct ion_page_pool *pool)
 
 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
+	if (fatal_signal_pending(current))
+		return NULL;
 	return alloc_pages(pool->gfp_mask, pool->order);
 }
 
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee..caf4d4d 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			    unsigned int flags)
 {
-	int divider, base, prescale;
+	unsigned int divider, base, prescale;
 
-	/* This function needs improvment */
+	/* This function needs improvement */
 	/* Don't know if divider==0 works. */
 
 	for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 			divider = (*nanosec) / base;
 			break;
 		case CMDF_ROUND_UP:
-			divider = (*nanosec) / base;
+			divider = DIV_ROUND_UP(*nanosec, base);
 			break;
 		}
 		if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
 	}
 
 	prescale = 15;
-	base = timer_base * (1 << prescale);
+	base = timer_base * (prescale + 1);
 	divider = 65535;
 	*nanosec = divider * base;
 	return (prescale << 16) | (divider);
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index c747e9c..0cef1d6 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
 		break;
 	case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
 		ret = scnprintf(buf, PAGE_SIZE, "%u\n",
-				gasket_page_table_num_entries(
+				gasket_page_table_num_simple_entries(
 					gasket_dev->page_table[0]));
 		break;
 	case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 5449287..f0415de 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -25,6 +25,13 @@ static int tsens_get_temp(void *data, int *temp)
 	return tmdev->ops->get_temp(s, temp);
 }
 
+static int tsens_get_min_temp(void *data, int *temp)
+{
+	struct tsens_sensor *s = data;
+
+	return tsens_2xxx_get_min_temp(s, temp);
+}
+
 static int tsens_set_trip_temp(void *data, int low_temp, int high_temp)
 {
 	struct tsens_sensor *s = data;
@@ -82,6 +89,9 @@ static const struct of_device_id tsens_table[] = {
 	{	.compatible = "qcom,tsens24xx",
 		.data = &data_tsens24xx,
 	},
+	{	.compatible = "qcom,tsens26xx",
+		.data = &data_tsens26xx,
+	},
 	{	.compatible = "qcom,msm8937-tsens",
 		.data = &data_tsens14xx,
 	},
@@ -97,6 +107,10 @@ static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
 	.set_trips = tsens_set_trip_temp,
 };
 
+static struct thermal_zone_of_device_ops tsens_tm_min_thermal_zone_ops = {
+	.get_temp = tsens_get_min_temp,
+};
+
 static int get_device_tree_data(struct platform_device *pdev,
 				struct tsens_device *tmdev)
 {
@@ -105,6 +119,7 @@ static int get_device_tree_data(struct platform_device *pdev,
 	const struct tsens_data *data;
 	int rc = 0;
 	struct resource *res_tsens_mem;
+	u32 min_temp_id;
 
 	if (!of_match_node(tsens_table, of_node)) {
 		pr_err("Need to read SoC specific fuse map\n");
@@ -179,6 +194,11 @@ static int get_device_tree_data(struct platform_device *pdev,
 		}
 	}
 
+	if (!of_property_read_u32(of_node, "0C-sensor-num", &min_temp_id))
+		tmdev->min_temp_sensor_id = (int)min_temp_id;
+	else
+		tmdev->min_temp_sensor_id = MIN_TEMP_DEF_OFFSET;
+
 	return rc;
 }
 
@@ -209,6 +229,17 @@ static int tsens_thermal_zone_register(struct tsens_device *tmdev)
 		return -ENODEV;
 	}
 
+	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
+		tmdev->min_temp.tmdev = tmdev;
+		tmdev->min_temp.hw_id = tmdev->min_temp_sensor_id;
+		tmdev->min_temp.tzd =
+			devm_thermal_zone_of_sensor_register(
+			&tmdev->pdev->dev, tmdev->min_temp_sensor_id,
+			&tmdev->min_temp, &tsens_tm_min_thermal_zone_ops);
+		if (IS_ERR(tmdev->min_temp.tzd))
+			pr_err("Error registering min temp sensor\n");
+	}
+
 	/* Register virtual thermal sensors. */
 	qti_virtual_sensor_register(&tmdev->pdev->dev);
 
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 8ee67c6..bf8768a 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -31,6 +31,7 @@
 #define SLOPE_DEFAULT		3200
 
 #define IPC_LOGPAGES 10
+#define MIN_TEMP_DEF_OFFSET		0xFF
 
 enum tsens_dbg_type {
 	TSENS_DBG_POLL,
@@ -208,14 +209,20 @@ struct tsens_device {
 	const struct tsens_data		*ctrl_data;
 	struct tsens_mtc_sysfs  mtcsys;
 	int				trdy_fail_ctr;
+	struct tsens_sensor		min_temp;
+	u8				min_temp_sensor_id;
 	struct tsens_sensor		sensor[0];
 };
 
-extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx,
+		data_tsens26xx;
 extern const struct tsens_data data_tsens14xx, data_tsens14xx_405;
 extern struct list_head tsens_device_list;
 
 extern int calibrate_8937(struct tsens_device *tmdev);
 extern int calibrate_405(struct tsens_device *tmdev);
 
+extern int tsens_2xxx_get_min_temp(
+		struct tsens_sensor *sensor, int *temp);
+
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 062e53e..7d040ae 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -42,12 +42,15 @@
 #define TSENS_TM_UPPER_LOWER_INT_MASK(n)	((n) + 0x10)
 #define TSENS_TM_UPPER_INT_SET(n)		(1 << (n + 16))
 #define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK	0xfff
+#define TSENS_TM_MIN_TEMP_VALID_BIT		BIT(16)
 #define TSENS_TM_SN_STATUS_VALID_BIT		BIT(21)
 #define TSENS_TM_SN_STATUS_CRITICAL_STATUS	BIT(19)
 #define TSENS_TM_SN_STATUS_UPPER_STATUS		BIT(18)
 #define TSENS_TM_SN_STATUS_LOWER_STATUS		BIT(17)
 #define TSENS_TM_SN_LAST_TEMP_MASK		0xfff
 #define TSENS_TM_CODE_BIT_MASK			0xfff
+#define TSENS_TM_0C_THR_MASK			0xfff
+#define TSENS_TM_0C_THR_OFFSET			12
 #define TSENS_TM_CODE_SIGN_BIT			0x800
 #define TSENS_TM_SCALE_DECI_MILLIDEG		100
 #define TSENS_DEBUG_WDOG_TRIGGER_COUNT		5
@@ -58,6 +61,10 @@
 #define TSENS_TM_TRDY(n)			((n) + 0xe4)
 #define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE	BIT(3)
 #define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT	3
+#define TSENS_TM_0C_INT_STATUS(n)	((n) + 0xe0)
+#define TSENS_TM_MIN_TEMP(n)	((n) + 0xec)
+#define TSENS_TM_0C_THRESHOLDS(n)		((n) + 0x1c)
+#define TSENS_MAX_READ_FAIL			50
 
 static void msm_tsens_convert_temp(int last_temp, int *temp)
 {
@@ -92,7 +99,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 			code, tmdev->trdy_fail_ctr);
 		tmdev->trdy_fail_ctr++;
 
-		if (tmdev->trdy_fail_ctr >= 50) {
+		if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
 			if (tmdev->ops->dbg)
 				tmdev->ops->dbg(tmdev, 0,
 					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
@@ -147,6 +154,75 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 	return 0;
 }
 
+int tsens_2xxx_get_min_temp(struct tsens_sensor *sensor, int *temp)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int code;
+	void __iomem *sensor_addr, *trdy;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, valid_bit;
+
+	if (!sensor)
+		return -EINVAL;
+
+	tmdev = sensor->tmdev;
+	trdy = TSENS_TM_TRDY(tmdev->tsens_tm_addr);
+
+	valid_bit = TSENS_TM_MIN_TEMP_VALID_BIT;
+	sensor_addr = TSENS_TM_MIN_TEMP(tmdev->tsens_tm_addr);
+
+	code = readl_relaxed_no_log(trdy);
+	if (!((code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) >>
+			TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT)) {
+		pr_err("tsens device first round not complete0x%x, ctr is %d\n",
+			code, tmdev->trdy_fail_ctr);
+		tmdev->trdy_fail_ctr++;
+		if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
+			if (tmdev->ops->dbg)
+				tmdev->ops->dbg(tmdev, 0,
+					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
+			BUG();
+		}
+		return -ENODATA;
+	}
+
+	tmdev->trdy_fail_ctr = 0;
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		last_temp = last_temp2;
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr);
+	last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & valid_bit) {
+		last_temp = last_temp3;
+		msm_tsens_convert_temp(last_temp, temp);
+		goto dbg;
+	}
+
+	if (last_temp == last_temp2)
+		last_temp = last_temp2;
+	else if (last_temp2 == last_temp3)
+		last_temp = last_temp3;
+
+	msm_tsens_convert_temp(last_temp, temp);
+
+dbg:
+	TSENS_DBG(tmdev, "Min temp: %d\n", *temp);
+
+	return 0;
+}
+
 static int tsens_tm_activate_trip_type(struct tsens_sensor *tm_sensor,
 			int trip, enum thermal_device_mode mode)
 {
@@ -518,6 +594,31 @@ static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t tsens_tm_0C_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	int status, thrs, set_thr, reset_thr;
+	void __iomem *srot_addr, *addr;
+
+	addr = TSENS_TM_0C_INT_STATUS(tm->tsens_tm_addr);
+	status = readl_relaxed(addr);
+
+	srot_addr = TSENS_CTRL_ADDR(tm->tsens_srot_addr);
+	thrs = readl_relaxed(TSENS_TM_0C_THRESHOLDS(srot_addr));
+
+	msm_tsens_convert_temp(thrs & TSENS_TM_0C_THR_MASK, &reset_thr);
+	msm_tsens_convert_temp(
+		((thrs >> TSENS_TM_0C_THR_OFFSET) &
+				TSENS_TM_0C_THR_MASK), &set_thr);
+
+	if (status)
+		of_thermal_handle_trip_temp(tm->min_temp.tzd, set_thr);
+	else
+		of_thermal_handle_trip_temp(tm->min_temp.tzd, reset_thr);
+
+	return IRQ_HANDLED;
+}
+
 static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
 					u32 sensor_id)
 {
@@ -602,19 +703,26 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 static const struct tsens_irqs tsens2xxx_irqs[] = {
 	{ "tsens-upper-lower", tsens_tm_irq_thread},
 	{ "tsens-critical", tsens_tm_critical_irq_thread},
+	{ "tsens-0C", tsens_tm_0C_irq_thread},
 };
 
 static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
 {
 	struct platform_device *pdev;
-	int i, rc;
+	int i, rc, irq_no;
+	unsigned long irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
 
 	if (!tmdev)
 		return -EINVAL;
 
+	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET)
+		irq_no = ARRAY_SIZE(tsens2xxx_irqs);
+	else
+		irq_no = ARRAY_SIZE(tsens2xxx_irqs) - 1;
+
 	pdev = tmdev->pdev;
 
-	for (i = 0; i < ARRAY_SIZE(tsens2xxx_irqs); i++) {
+	for (i = 0; i < irq_no; i++) {
 		int irq;
 
 		irq = platform_get_irq_byname(pdev, tsens2xxx_irqs[i].name);
@@ -624,10 +732,12 @@ static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
 			return irq;
 		}
 
+		if (i == 2)
+			irqflags = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+
 		rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 				tsens2xxx_irqs[i].handler,
-				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-				tsens2xxx_irqs[i].name, tmdev);
+				irqflags, tsens2xxx_irqs[i].name, tmdev);
 		if (rc) {
 			dev_err(&pdev->dev, "failed to get irq %s\n",
 					tsens2xxx_irqs[i].name);
@@ -677,3 +787,14 @@ const struct tsens_data data_tsens24xx = {
 	.ops				= &ops_tsens2xxx,
 	.mtc				= false,
 };
+
+const struct tsens_data data_tsens26xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_mask	= 1,
+	.wd_bark			= true,
+	.wd_bark_mask			= 0,
+	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
+	.ver_major			= 2,
+	.ver_minor			= 6,
+};
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index b989ca2..2f03729 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
 
 	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
 		tsk = waiter->task;
-		smp_mb();
-		waiter->task = NULL;
+		smp_store_release(&waiter->task, NULL);
 		wake_up_process(tsk);
 		put_task_struct(tsk);
 	}
@@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 
-		if (!waiter.task)
+		if (!smp_load_acquire(&waiter.task))
 			break;
 		if (!timeout)
 			break;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5b442bc..59675cc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1333,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf,
 	tty_port_init(&acm->port);
 	acm->port.ops = &acm_port_ops;
 
-	minor = acm_alloc_minor(acm);
-	if (minor < 0)
-		goto alloc_fail1;
-
 	ctrlsize = usb_endpoint_maxp(epctrl);
 	readsize = usb_endpoint_maxp(epread) *
 				(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1344,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf,
 	acm->writesize = usb_endpoint_maxp(epwrite) * 20;
 	acm->control = control_interface;
 	acm->data = data_interface;
+
+	usb_get_intf(acm->control); /* undone in destruct() */
+
+	minor = acm_alloc_minor(acm);
+	if (minor < 0)
+		goto alloc_fail1;
+
 	acm->minor = minor;
 	acm->dev = usb_dev;
 	if (h.usb_cdc_acm_descriptor)
@@ -1490,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf,
 	usb_driver_claim_interface(&acm_driver, data_interface, acm);
 	usb_set_intfdata(data_interface, acm);
 
-	usb_get_intf(control_interface);
 	tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
 			&control_interface->dev);
 	if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index ffccd40..29c6414 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1792,8 +1792,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	return 0;
 
  error:
-	if (as && as->usbm)
-		dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
 	kfree(isopkt);
 	kfree(dr);
 	if (as)
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f7..558890a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
 		intf->minor = minor;
 		break;
 	}
-	up_write(&minor_rwsem);
-	if (intf->minor < 0)
+	if (intf->minor < 0) {
+		up_write(&minor_rwsem);
 		return -EXFULL;
+	}
 
 	/* create a usb class device for this usb interface */
 	snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
 				      MKDEV(USB_MAJOR, minor), class_driver,
 				      "%s", kbasename(name));
 	if (IS_ERR(intf->usb_dev)) {
-		down_write(&minor_rwsem);
 		usb_minors[minor] = NULL;
 		intf->minor = -1;
-		up_write(&minor_rwsem);
 		retval = PTR_ERR(intf->usb_dev);
 	}
+	up_write(&minor_rwsem);
 	return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
 		return;
 
 	dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 
 	down_write(&minor_rwsem);
 	usb_minors[intf->minor] = NULL;
 	up_write(&minor_rwsem);
 
-	device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 	intf->usb_dev = NULL;
 	intf->minor = -1;
 	destroy_usb_class();
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4020ce8d..0d3fd20 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
 				(struct usb_cdc_dmm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_desc))
 				goto next_desc;
 			if (desc)
 				return -EINVAL;
 			desc = (struct usb_cdc_mdlm_desc *)buffer;
 			break;
 		case USB_CDC_MDLM_DETAIL_TYPE:
-			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+			if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
 				goto next_desc;
 			if (detail)
 				return -EINVAL;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 82c761d..1b0f981 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -145,6 +145,10 @@ void dwc3_en_sleep_mode(struct dwc3 *dwc)
 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 	reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+	reg |= DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+	dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
 }
 
 void dwc3_dis_sleep_mode(struct dwc3 *dwc)
@@ -154,6 +158,10 @@ void dwc3_dis_sleep_mode(struct dwc3 *dwc)
 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 	reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+	reg &= ~DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+	dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
 }
 
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c80f5d2..d2d3e16 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -258,6 +258,7 @@
 #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS	BIT(28)
 #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	BIT(24)
 #define DWC3_GUCTL1_IP_GAP_ADD_ON(n)	(n << 21)
+#define DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST	BIT(8)
 
 /* Global Status Register */
 #define DWC3_GSTS_OTG_IP	BIT(10)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index dbd869d..b678848 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -282,6 +282,32 @@ static const struct debugfs_reg32 dwc3_regs[] = {
 	dump_register(OSTS),
 };
 
+static int dwc3_regdump_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
+	debugfs_print_regs32(s, dwc->regset->regs, dwc->regset->nregs,
+				dwc->regset->base, "");
+	return 0;
+}
+
+static int dwc3_regdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_regdump_show, inode->i_private);
+}
+
+static const struct file_operations dwc3_regdump_fops = {
+	.open			= dwc3_regdump_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
 static int dwc3_mode_show(struct seq_file *s, void *unused)
 {
 	struct dwc3		*dwc = s->private;
@@ -997,7 +1023,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
 
 	dwc->root = root;
 
-	debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
+	debugfs_create_file("regdump", 0444, root, dwc, &dwc3_regdump_fops);
 
 	if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
 		debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc,
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 9ee7c95..f916f87 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1365,6 +1365,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
 		struct usb_endpoint_descriptor desc1, *desc;
 
 		switch (epfile->ffs->gadget->speed) {
+		case USB_SPEED_SUPER_PLUS:
 		case USB_SPEED_SUPER:
 			desc_idx = 2;
 			break;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index cf3dc1d..c0d0449 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1339,7 +1339,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 	unsigned long flags;
 	struct gsi_ctrl_pkt *cpkt;
 	struct gsi_ctrl_port *c_port;
-	struct usb_request *req;
 	enum ipa_usb_teth_prot prot_id =
 		*(enum ipa_usb_teth_prot *)(fp->private_data);
 	struct gsi_inst_status *inst_cur = &inst_status[prot_id];
@@ -1358,13 +1357,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
 
 	gsi = inst_cur->opts->gsi;
 	c_port = &gsi->c_port;
-	req = c_port->notify_req;
-
-	if (!c_port || !req || !req->buf) {
-		log_event_err("%s: c_port %pK req %p req->buf %p",
-			__func__, c_port, req, req ? req->buf : req);
-		return -ENODEV;
-	}
 
 	if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
 		log_event_err("error: ctrl pkt length %zu", count);
@@ -1439,9 +1431,9 @@ static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd,
 	gsi = inst_cur->opts->gsi;
 	c_port = &gsi->c_port;
 
-	if (!c_port) {
-		log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
-		return -ENODEV;
+	if (!atomic_read(&gsi->connected)) {
+		log_event_err("USB cable not connected\n");
+		return -ECONNRESET;
 	}
 
 	switch (cmd) {
@@ -1807,7 +1799,7 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi)
 	__le32 *data;
 	struct usb_cdc_notification *event;
 	struct usb_request *req = gsi->c_port.notify_req;
-	struct usb_composite_dev *cdev = gsi->function.config->cdev;
+	struct usb_composite_dev *cdev;
 	struct gsi_ctrl_pkt *cpkt;
 	unsigned long flags;
 	bool del_free_cpkt = false;
@@ -1838,6 +1830,7 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi)
 	log_event_dbg("%s: cpkt->type:%d\n", __func__, cpkt->type);
 
 	event = req->buf;
+	cdev = gsi->function.config->cdev;
 
 	switch (cpkt->type) {
 	case GSI_CTRL_NOTIFY_CONNECT:
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 0b64fbe..1212e7e 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -915,6 +915,9 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
 	req->buf = d_req->buf;
 	req->length = d_req->length;
 	req->context = d_req;
+	req->sg = d_req->sg;
+	req->num_sgs = d_req->num_sgs;
+	req->num_mapped_sgs = d_req->num_mapped_sgs;
 	if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
 		spin_lock_irqsave(&qdss->lock, flags);
 		list_add_tail(&req->list, &qdss->data_write_pool);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fea02c7..a5254e8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/sys_soc.h>
 #include <linux/uaccess.h>
 #include <linux/usb/ch9.h>
@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
 	if (usb3->forced_b_device)
 		return -EBUSY;
 
-	if (!strncmp(buf, "host", strlen("host")))
+	if (sysfs_streq(buf, "host"))
 		new_mode_is_host = true;
-	else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+	else if (sysfs_streq(buf, "peripheral"))
 		new_mode_is_host = false;
 	else
 		return -EINVAL;
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 671bce1..8616c52 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
 	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
 	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
 	 * xhci_gen_setup().
+	 *
+	 * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+	 * and the process speed is down when the roothub port enters U3,
+	 * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
 	 */
 	if (xhci_rcar_is_gen2(hcd->self.controller) ||
-			xhci_rcar_is_gen3(hcd->self.controller))
-		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+			xhci_rcar_is_gen3(hcd->self.controller)) {
+		xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
+	}
 
 	if (!xhci_rcar_wait_for_pll_active(hcd))
 		return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 438d8ce..1d8c40f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -450,7 +450,7 @@ struct xhci_op_regs {
  * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
  * or not program values < '4' if BLC = '0' and a BESL device is attached.
  */
-#define XHCI_DEFAULT_BESL	4
+#define XHCI_DEFAULT_BESL	0
 
 /*
  * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8..55db0fc 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
 	dev = usb_get_intfdata(interface);
 	mutex_lock(&iowarrior_open_disc_lock);
 	usb_set_intfdata(interface, NULL);
+	/* prevent device read, write and ioctl */
+	dev->present = 0;
 
 	minor = dev->minor;
+	mutex_unlock(&iowarrior_open_disc_lock);
+	/* give back our minor - this will call close() locks need to be dropped at this point*/
 
-	/* give back our minor */
 	usb_deregister_dev(interface, &iowarrior_class);
 
 	mutex_lock(&dev->mutex);
 
 	/* prevent device read, write and ioctl */
-	dev->present = 0;
 
 	mutex_unlock(&dev->mutex);
-	mutex_unlock(&iowarrior_open_disc_lock);
 
 	if (dev->opened) {
 		/* There is a process that holds a filedescriptor to the device ,
diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
index 3c97e40..39745c1 100644
--- a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
+++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
@@ -445,8 +445,7 @@ static int ssusb_redriver_vbus_notifier(struct notifier_block *nb,
 
 	redriver->vbus_active = event;
 
-	if (redriver->vbus_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
@@ -466,8 +465,7 @@ static int ssusb_redriver_id_notifier(struct notifier_block *nb,
 
 	redriver->host_active = host_active;
 
-	if (redriver->host_active)
-		queue_work(redriver->redriver_wq, &redriver->config_work);
+	queue_work(redriver->redriver_wq, &redriver->config_work);
 
 	return NOTIFY_DONE;
 }
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 7b306aa..6715a12 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
 
 	dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-	usb_put_dev(dev->udev);
 	if (dev->cntl_urb) {
 		usb_kill_urb(dev->cntl_urb);
 		kfree(dev->cntl_req);
@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
 				dev->int_buffer, dev->urb->transfer_dma);
 		usb_free_urb(dev->urb);
 	}
+	usb_put_dev(dev->udev);
 	kfree(dev);
 }
 
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 9107cc4..594c8ef 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -19,6 +19,7 @@
 #include <linux/regulator/machine.h>
 #include <linux/usb/phy.h>
 #include <linux/reset.h>
+#include <linux/debugfs.h>
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL0		(0x3c)
 #define OPMODE_MASK				(0x3 << 3)
@@ -59,6 +60,15 @@
 #define USB2PHY_USB_PHY_RTUNE_SEL		(0xb4)
 #define RTUNE_SEL				BIT(0)
 
+#define TXPREEMPAMPTUNE0(x)			(x << 6)
+#define TXPREEMPAMPTUNE0_MASK			(BIT(7) | BIT(6))
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0	0x6c
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1	0x70
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2	0x74
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3	0x78
+#define TXVREFTUNE0_MASK			0xF
+#define PARAM_OVRD_MASK			0xFF
+
 #define USB_HSPHY_3P3_VOL_MIN			3050000 /* uV */
 #define USB_HSPHY_3P3_VOL_MAX			3300000 /* uV */
 #define USB_HSPHY_3P3_HPM_LOAD			16000	/* uA */
@@ -87,7 +97,6 @@ struct msm_hsphy {
 	bool			suspended;
 	bool			cable_connected;
 	bool			dpdm_enable;
-	bool			no_rext_present;
 
 	int			*param_override_seq;
 	int			param_override_seq_cnt;
@@ -98,6 +107,15 @@ struct msm_hsphy {
 	struct mutex		phy_lock;
 	struct regulator_desc	dpdm_rdesc;
 	struct regulator_dev	*dpdm_rdev;
+
+	/* debugfs entries */
+	struct dentry		*root;
+	u8			txvref_tune0;
+	u8			pre_emphasis;
+	u8			param_ovrd0;
+	u8			param_ovrd1;
+	u8			param_ovrd2;
+	u8			param_ovrd3;
 };
 
 static void msm_hsphy_enable_clocks(struct msm_hsphy *phy, bool on)
@@ -360,6 +378,53 @@ static int msm_hsphy_init(struct usb_phy *uphy)
 		hsusb_phy_write_seq(phy->base, phy->param_override_seq,
 				phy->param_override_seq_cnt, 0);
 
+	if (phy->pre_emphasis) {
+		u8 val = TXPREEMPAMPTUNE0(phy->pre_emphasis) &
+				TXPREEMPAMPTUNE0_MASK;
+		if (val)
+			msm_usb_write_readback(phy->base,
+				USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+				TXPREEMPAMPTUNE0_MASK, val);
+	}
+
+	if (phy->txvref_tune0) {
+		u8 val = phy->txvref_tune0 & TXVREFTUNE0_MASK;
+
+		msm_usb_write_readback(phy->base,
+			USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+			TXVREFTUNE0_MASK, val);
+	}
+
+	if (phy->param_ovrd0) {
+		msm_usb_write_readback(phy->base,
+			USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0,
+			PARAM_OVRD_MASK, phy->param_ovrd0);
+	}
+
+	if (phy->param_ovrd1) {
+		msm_usb_write_readback(phy->base,
+			USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+			PARAM_OVRD_MASK, phy->param_ovrd1);
+	}
+
+	if (phy->param_ovrd2) {
+		msm_usb_write_readback(phy->base,
+			USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2,
+			PARAM_OVRD_MASK, phy->param_ovrd2);
+	}
+
+	if (phy->param_ovrd3) {
+		msm_usb_write_readback(phy->base,
+			USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3,
+			PARAM_OVRD_MASK, phy->param_ovrd3);
+	}
+
+	dev_dbg(uphy->dev, "x0:%08x x1:%08x x2:%08x x3:%08x\n",
+	readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0),
+	readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1),
+	readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2),
+	readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3));
+
 	if (phy->phy_rcal_reg) {
 		rcal_code = readl_relaxed(phy->phy_rcal_reg) & phy->rcal_mask;
 
@@ -367,15 +432,6 @@ static int msm_hsphy_init(struct usb_phy *uphy)
 				phy->rcal_mask, phy->phy_rcal_reg, rcal_code);
 	}
 
-	/*
-	 * Use external resistor value only if:
-	 * a. It is present and
-	 * b. efuse is not programmed.
-	 */
-	if (!phy->no_rext_present && !rcal_code)
-		msm_usb_write_readback(phy->base, USB2PHY_USB_PHY_RTUNE_SEL,
-			RTUNE_SEL, RTUNE_SEL);
-
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
 				VREGBYPASS, VREGBYPASS);
 
@@ -574,6 +630,17 @@ static int msm_hsphy_regulator_init(struct msm_hsphy *phy)
 	return 0;
 }
 
+static void msm_hsphy_create_debugfs(struct msm_hsphy *phy)
+{
+	phy->root = debugfs_create_dir(dev_name(phy->phy.dev), NULL);
+	debugfs_create_x8("pre_emphasis", 0644, phy->root, &phy->pre_emphasis);
+	debugfs_create_x8("txvref_tune0", 0644, phy->root, &phy->txvref_tune0);
+	debugfs_create_x8("param_ovrd0", 0644, phy->root, &phy->param_ovrd0);
+	debugfs_create_x8("param_ovrd1", 0644, phy->root, &phy->param_ovrd1);
+	debugfs_create_x8("param_ovrd2", 0644, phy->root, &phy->param_ovrd2);
+	debugfs_create_x8("param_ovrd3", 0644, phy->root, &phy->param_ovrd3);
+}
+
 static int msm_hsphy_probe(struct platform_device *pdev)
 {
 	struct msm_hsphy *phy;
@@ -655,9 +722,6 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 	if (IS_ERR(phy->phy_reset))
 		return PTR_ERR(phy->phy_reset);
 
-	phy->no_rext_present = of_property_read_bool(dev->of_node,
-					"qcom,no-rext-present");
-
 	phy->param_override_seq_cnt = of_property_count_elems_of_size(
 					dev->of_node,
 					"qcom,param-override-seq",
@@ -735,6 +799,8 @@ static int msm_hsphy_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	msm_hsphy_create_debugfs(phy);
+
 	return 0;
 
 err_ret:
@@ -748,6 +814,8 @@ static int msm_hsphy_remove(struct platform_device *pdev)
 	if (!phy)
 		return 0;
 
+	debugfs_remove_recursive(phy->root);
+
 	usb_remove_phy(&phy->phy);
 	clk_disable_unprepare(phy->ref_clk_src);
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e0a4749..56f572c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
+	/* Motorola devices */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) },	/* mdm6600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) },	/* mdm9600 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) },	/* mdm ram dl */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) },	/* mdm qc dl */
 
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
 	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff),			/* D-Link DWM-222 A2 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },	/* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/A3 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),			/* Olicard 600 */
 	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff),			/* BroadMobi BM818 */
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },			/* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 3457c1f..5f29ce8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -378,7 +378,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
 			return SNK_UNATTACHED;
 		else if (port->try_role == TYPEC_SOURCE)
 			return SRC_UNATTACHED;
-		else if (port->tcpc->config->default_role == TYPEC_SINK)
+		else if (port->tcpc->config &&
+			 port->tcpc->config->default_role == TYPEC_SINK)
 			return SNK_UNATTACHED;
 		/* Fall through to return SRC_UNATTACHED */
 	} else if (port->port_type == TYPEC_PORT_SNK) {
@@ -585,7 +586,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
 
 static void tcpm_debugfs_exit(struct tcpm_port *port)
 {
+	int i;
+
+	mutex_lock(&port->logbuffer_lock);
+	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
+		kfree(port->logbuffer[i]);
+		port->logbuffer[i] = NULL;
+	}
+	mutex_unlock(&port->logbuffer_lock);
+
 	debugfs_remove(port->dentry);
+	if (list_empty(&rootdir->d_subdirs)) {
+		debugfs_remove(rootdir);
+		rootdir = NULL;
+	}
 }
 
 #else
@@ -1094,7 +1108,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 			break;
 		case CMD_ATTENTION:
 			/* Attention command does not have response */
-			typec_altmode_attention(adev, p[1]);
+			if (adev)
+				typec_altmode_attention(adev, p[1]);
 			return 0;
 		default:
 			break;
@@ -1146,20 +1161,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 			}
 			break;
 		case CMD_ENTER_MODE:
-			typec_altmode_update_active(pdev, true);
+			if (adev && pdev) {
+				typec_altmode_update_active(pdev, true);
 
-			if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
-				response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
-				response[0] |= VDO_OPOS(adev->mode);
-				return 1;
+				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+					response[0] = VDO(adev->svid, 1,
+							  CMD_EXIT_MODE);
+					response[0] |= VDO_OPOS(adev->mode);
+					return 1;
+				}
 			}
 			return 0;
 		case CMD_EXIT_MODE:
-			typec_altmode_update_active(pdev, false);
+			if (adev && pdev) {
+				typec_altmode_update_active(pdev, false);
 
-			/* Back to USB Operation */
-			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-						     NULL));
+				/* Back to USB Operation */
+				WARN_ON(typec_altmode_notify(adev,
+							     TYPEC_STATE_USB,
+							     NULL));
+			}
 			break;
 		default:
 			break;
@@ -1169,8 +1190,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 		switch (cmd) {
 		case CMD_ENTER_MODE:
 			/* Back to USB Operation */
-			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-						     NULL));
+			if (adev)
+				WARN_ON(typec_altmode_notify(adev,
+							     TYPEC_STATE_USB,
+							     NULL));
 			break;
 		default:
 			break;
@@ -1181,7 +1204,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
 	}
 
 	/* Informing the alternate mode drivers about everything */
-	typec_altmode_vdm(adev, p[0], &p[1], cnt);
+	if (adev)
+		typec_altmode_vdm(adev, p[0], &p[1], cnt);
 
 	return rlen;
 }
@@ -4083,7 +4107,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
 	mutex_lock(&port->lock);
 	if (tcpc->try_role)
 		ret = tcpc->try_role(tcpc, role);
-	if (!ret && !tcpc->config->try_role_hw)
+	if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
 		port->try_role = role;
 	port->try_src_count = 0;
 	port->try_snk_count = 0;
@@ -4730,7 +4754,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
 	port->typec_caps.prefer_role = tcfg->default_role;
 	port->typec_caps.type = tcfg->type;
 	port->typec_caps.data = tcfg->data;
-	port->self_powered = port->tcpc->config->self_powered;
+	port->self_powered = tcfg->self_powered;
 
 	return 0;
 }
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e..e569413 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
 	int err;
 	u16 old_value;
-	pci_power_t new_state, old_state;
+	pci_power_t new_state;
 
 	err = pci_read_config_word(dev, offset, &old_value);
 	if (err)
 		goto out;
 
-	old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
 	new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
 	new_value &= PM_OK_BITS;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ac6c383..1985565 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
 		goto out;
 	}
 
-	trans = btrfs_attach_transaction(root);
+	trans = btrfs_join_transaction_nostart(root);
 	if (IS_ERR(trans)) {
 		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
 			ret = PTR_ERR(trans);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f1ca53a..26317bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
 					   __TRANS_ATTACH |
-					   __TRANS_JOIN),
+					   __TRANS_JOIN |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
 					   __TRANS_ATTACH |
 					   __TRANS_JOIN |
-					   __TRANS_JOIN_NOLOCK),
+					   __TRANS_JOIN_NOLOCK |
+					   __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -531,7 +534,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 		ret = join_transaction(fs_info, type);
 		if (ret == -EBUSY) {
 			wait_current_trans(fs_info);
-			if (unlikely(type == TRANS_ATTACH))
+			if (unlikely(type == TRANS_ATTACH ||
+				     type == TRANS_JOIN_NOSTART))
 				ret = -ENOENT;
 		}
 	} while (ret == -EBUSY);
@@ -648,6 +652,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
 }
 
 /*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+				 BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
  * btrfs_attach_transaction() - catch the running transaction
  *
  * It is used when we want to commit the current the transaction, but
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4cbb1b5..c1d34cc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -97,11 +97,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN		(1U << 11)
 #define __TRANS_JOIN_NOLOCK	(1U << 12)
 #define __TRANS_DUMMY		(1U << 13)
+#define __TRANS_JOIN_NOSTART	(1U << 14)
 
 #define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH		(__TRANS_ATTACH)
 #define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
 
@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 					int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
 					struct btrfs_root *root);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c181f16..2bc47eb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 	if (tcon == NULL)
 		return 0;
 
-	if (smb2_command == SMB2_TREE_CONNECT)
+	if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
 		return 0;
 
 	if (tcon->tidStatus == CifsExiting) {
@@ -1006,7 +1006,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
 	else
 		req->SecurityMode = 0;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
 	req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
 	req->Channel = 0; /* MBZ */
 
 	sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/dax.c b/fs/dax.c
index 75a289c..f0d932f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -659,7 +659,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 	 * guaranteed to either see new references or prevent new
 	 * references from being established.
 	 */
-	unmap_mapping_range(mapping, 0, 0, 1);
+	unmap_mapping_range(mapping, 0, 0, 0);
 
 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7f8bb08..d14d71d 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -392,6 +392,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
 	return mp->mp_aheight - x - 1;
 }
 
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+	sector_t factor = 1, block = 0;
+	int hgt;
+
+	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+		if (hgt < mp->mp_aheight)
+			block += mp->mp_list[hgt] * factor;
+		factor *= sdp->sd_inptrs;
+	}
+	return block;
+}
+
 static void release_metapath(struct metapath *mp)
 {
 	int i;
@@ -432,60 +445,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
 	return ptr - first;
 }
 
-typedef const __be64 *(*gfs2_metadata_walker)(
-		struct metapath *mp,
-		const __be64 *start, const __be64 *end,
-		u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
 
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+						   unsigned int ptrs);
 
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
-		u64 len, struct metapath *mp, gfs2_metadata_walker walker,
-		void *data)
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
+
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+		u64 max_len, gfs2_metadata_walker walker)
 {
-	struct metapath clone;
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	const __be64 *start, *end, *ptr;
 	u64 factor = 1;
 	unsigned int hgt;
-	int ret = 0;
+	int ret;
 
-	for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+	/*
+	 * The walk starts in the lowest allocated indirect block, which may be
+	 * before the position indicated by @mp.  Adjust @max_len accordingly
+	 * to avoid a short walk.
+	 */
+	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+		max_len += mp->mp_list[hgt] * factor;
+		mp->mp_list[hgt] = 0;
 		factor *= sdp->sd_inptrs;
+	}
 
 	for (;;) {
-		u64 step;
+		u16 start = mp->mp_list[hgt];
+		enum walker_status status;
+		unsigned int ptrs;
+		u64 len;
 
 		/* Walk indirect block. */
-		start = metapointer(hgt, mp);
-		end = metaend(hgt, mp);
-
-		step = (end - start) * factor;
-		if (step > len)
-			end = start + DIV_ROUND_UP_ULL(len, factor);
-
-		ptr = walker(mp, start, end, factor, data);
-		if (ptr == WALK_STOP)
+		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+		len = ptrs * factor;
+		if (len > max_len)
+			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+		status = walker(mp, ptrs);
+		switch (status) {
+		case WALK_STOP:
+			return 1;
+		case WALK_FOLLOW:
+			BUG_ON(mp->mp_aheight == mp->mp_fheight);
+			ptrs = mp->mp_list[hgt] - start;
+			len = ptrs * factor;
 			break;
-		if (step >= len)
+		case WALK_CONTINUE:
 			break;
-		len -= step;
-		if (ptr != WALK_NEXT) {
-			BUG_ON(!*ptr);
-			mp->mp_list[hgt] += ptr - start;
-			goto fill_up_metapath;
 		}
+		if (len >= max_len)
+			break;
+		max_len -= len;
+		if (status == WALK_FOLLOW)
+			goto fill_up_metapath;
 
 lower_metapath:
 		/* Decrease height of metapath. */
-		if (mp != &clone) {
-			clone_metapath(&clone, mp);
-			mp = &clone;
-		}
 		brelse(mp->mp_bh[hgt]);
 		mp->mp_bh[hgt] = NULL;
+		mp->mp_list[hgt] = 0;
 		if (!hgt)
 			break;
 		hgt--;
@@ -493,10 +530,7 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
 
 		/* Advance in metadata tree. */
 		(mp->mp_list[hgt])++;
-		start = metapointer(hgt, mp);
-		end = metaend(hgt, mp);
-		if (start >= end) {
-			mp->mp_list[hgt] = 0;
+		if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
 			if (!hgt)
 				break;
 			goto lower_metapath;
@@ -504,44 +538,36 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
 
 fill_up_metapath:
 		/* Increase height of metapath. */
-		if (mp != &clone) {
-			clone_metapath(&clone, mp);
-			mp = &clone;
-		}
 		ret = fillup_metapath(ip, mp, ip->i_height - 1);
 		if (ret < 0)
-			break;
+			return ret;
 		hgt += ret;
 		for (; ret; ret--)
 			do_div(factor, sdp->sd_inptrs);
 		mp->mp_aheight = hgt + 1;
 	}
-	if (mp == &clone)
-		release_metapath(mp);
-	return ret;
+	return 0;
 }
 
-struct gfs2_hole_walker_args {
-	u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
-		const __be64 *start, const __be64 *end,
-		u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+					   unsigned int ptrs)
 {
-	struct gfs2_hole_walker_args *args = data;
-	const __be64 *ptr;
+	const __be64 *start, *ptr, *end;
+	unsigned int hgt;
+
+	hgt = mp->mp_aheight - 1;
+	start = metapointer(hgt, mp);
+	end = start + ptrs;
 
 	for (ptr = start; ptr < end; ptr++) {
 		if (*ptr) {
-			args->blocks += (ptr - start) * factor;
+			mp->mp_list[hgt] += ptr - start;
 			if (mp->mp_aheight == mp->mp_fheight)
 				return WALK_STOP;
-			return ptr;  /* increase height */
+			return WALK_FOLLOW;
 		}
 	}
-	args->blocks += (end - start) * factor;
-	return WALK_NEXT;
+	return WALK_CONTINUE;
 }
 
 /**
@@ -559,12 +585,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
 			  struct metapath *mp, struct iomap *iomap)
 {
-	struct gfs2_hole_walker_args args = { };
-	int ret = 0;
+	struct metapath clone;
+	u64 hole_size;
+	int ret;
 
-	ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
-	if (!ret)
-		iomap->length = args.blocks << inode->i_blkbits;
+	clone_metapath(&clone, mp);
+	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+	if (ret < 0)
+		goto out;
+
+	if (ret == 1)
+		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+	else
+		hole_size = len;
+	iomap->length = hole_size << inode->i_blkbits;
+	ret = 0;
+
+out:
+	release_metapath(&clone);
 	return ret;
 }
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 904e08b..31ae3bd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3133,7 +3133,7 @@ static int _nfs4_do_setattr(struct inode *inode,
 
 	if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
 		/* Use that stateid */
-	} else if (ctx != NULL) {
+	} else if (ctx != NULL && ctx->state) {
 		struct nfs_lock_context *l_ctx;
 		if (!nfs4_valid_open_stateid(ctx->state))
 			return -EBADF;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3a24ce3..c146e12 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 	u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
 	int low_bucket = 0, bucket, high_bucket;
 	struct ocfs2_xattr_bucket *search;
-	u32 last_hash;
 	u64 blkno, lower_blkno = 0;
 
 	search = ocfs2_xattr_bucket_new(inode);
@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
 		if (xh->xh_count)
 			xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-		last_hash = le32_to_cpu(xe->xe_name_hash);
-
 		/* record lower_blkno which may be the insert place. */
 		lower_blkno = blkno;
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1dea7a8..05e58b5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		if (seq_has_overflowed(m))
 			goto Eoverflow;
+		p = m->op->next(m, p, &m->index);
 		if (pos + m->count > offset) {
 			m->from = offset - pos;
 			m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
 		}
 		pos += m->count;
 		m->count = 0;
-		p = m->op->next(m, p, &m->index);
 		if (pos == offset)
 			break;
 	}
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7..e9f20b8 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-	int order;
-
-	size--;
-	size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-	order = fls(size);
-#else
-	order = fls64(size);
-#endif
-	return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)						\
-(								\
-	__builtin_constant_p(n) ? (				\
-		((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :	\
-		(((n) < (1UL << PAGE_SHIFT)) ? 0 :		\
-		 ilog2((n) - 1) - PAGE_SHIFT + 1)		\
-	) :							\
-	__get_order(n)						\
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+	if (__builtin_constant_p(size)) {
+		if (!size)
+			return BITS_PER_LONG - PAGE_SHIFT;
+
+		if (size < (1UL << PAGE_SHIFT))
+			return 0;
+
+		return ilog2((size) - 1) - PAGE_SHIFT + 1;
+	}
+
+	size--;
+	size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+	return fls(size);
+#else
+	return fls64(size);
+#endif
+}
 
 #endif	/* __ASSEMBLY__ */
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b3..fd965ff 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
 	INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+	INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
 	INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
diff --git a/include/dt-bindings/clock/qcom,dispcc-bengal.h b/include/dt-bindings/clock/qcom,dispcc-bengal.h
index 581f827..223ab5a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,dispcc-bengal.h
@@ -32,8 +32,4 @@
 #define DISP_CC_XO_CLK						22
 #define DISP_CC_XO_CLK_SRC					23
 
-/* DISP_CC resets */
-#define DISP_CC_MDSS_CORE_BCR					0
-#define DISP_CC_MDSS_RSCC_BCR					1
-
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-bengal.h b/include/dt-bindings/clock/qcom,gcc-bengal.h
index 0d403fc..6e07413 100644
--- a/include/dt-bindings/clock/qcom,gcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gcc-bengal.h
@@ -65,8 +65,6 @@
 #define GCC_CAMSS_TOP_AHB_CLK_SRC				55
 #define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				56
 #define GCC_CPUSS_AHB_CLK					57
-#define GCC_CPUSS_AHB_CLK_SRC					58
-#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC				59
 #define GCC_CPUSS_GNOC_CLK					60
 #define GCC_CPUSS_THROTTLE_CORE_CLK				61
 #define GCC_CPUSS_THROTTLE_XO_CLK				62
@@ -89,123 +87,101 @@
 #define GCC_GPU_SNOC_DVM_GFX_CLK				79
 #define GCC_GPU_THROTTLE_CORE_CLK				80
 #define GCC_GPU_THROTTLE_XO_CLK					81
-#define GCC_MSS_VS_CLK						82
-#define GCC_PDM2_CLK						83
-#define GCC_PDM2_CLK_SRC					84
-#define GCC_PDM_AHB_CLK						85
-#define GCC_PDM_XO4_CLK						86
-#define GCC_PRNG_AHB_CLK					87
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK				88
-#define GCC_QMIP_CAMERA_RT_AHB_CLK				89
-#define GCC_QMIP_CPUSS_CFG_AHB_CLK				90
-#define GCC_QMIP_DISP_AHB_CLK					91
-#define GCC_QMIP_GPU_CFG_AHB_CLK				92
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				93
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				94
-#define GCC_QUPV3_WRAP0_CORE_CLK				95
-#define GCC_QUPV3_WRAP0_S0_CLK					96
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				97
-#define GCC_QUPV3_WRAP0_S1_CLK					98
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				99
-#define GCC_QUPV3_WRAP0_S2_CLK					100
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				101
-#define GCC_QUPV3_WRAP0_S3_CLK					102
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				103
-#define GCC_QUPV3_WRAP0_S4_CLK					104
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				105
-#define GCC_QUPV3_WRAP0_S5_CLK					106
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				107
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
-#define GCC_SDCC1_AHB_CLK					110
-#define GCC_SDCC1_APPS_CLK					111
-#define GCC_SDCC1_APPS_CLK_SRC					112
-#define GCC_SDCC1_ICE_CORE_CLK					113
-#define GCC_SDCC1_ICE_CORE_CLK_SRC				114
-#define GCC_SDCC2_AHB_CLK					115
-#define GCC_SDCC2_APPS_CLK					116
-#define GCC_SDCC2_APPS_CLK_SRC					117
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				118
-#define GCC_SYS_NOC_UFS_PHY_AXI_CLK				119
-#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK				120
-#define GCC_UFS_PHY_AHB_CLK					121
-#define GCC_UFS_PHY_AXI_CLK					122
-#define GCC_UFS_PHY_AXI_CLK_SRC					123
-#define GCC_UFS_PHY_ICE_CORE_CLK				124
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				125
-#define GCC_UFS_PHY_PHY_AUX_CLK					126
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				127
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				128
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				129
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				130
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				131
-#define GCC_USB30_PRIM_MASTER_CLK				132
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				133
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				134
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			135
-#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		136
-#define GCC_USB30_PRIM_SLEEP_CLK				137
-#define GCC_USB3_PRIM_CLKREF_CLK				138
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				139
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				140
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				141
-#define GCC_VCODEC0_AXI_CLK					142
-#define GCC_VDDA_VS_CLK						143
-#define GCC_VDDCX_VS_CLK					144
-#define GCC_VDDMX_VS_CLK					145
-#define GCC_VENUS_AHB_CLK					146
-#define GCC_VENUS_CTL_AXI_CLK					147
-#define GCC_VIDEO_AHB_CLK					148
-#define GCC_VIDEO_AXI0_CLK					149
-#define GCC_VIDEO_THROTTLE_CORE_CLK				150
-#define GCC_VIDEO_VCODEC0_SYS_CLK				151
-#define GCC_VIDEO_VENUS_CLK_SRC					152
-#define GCC_VIDEO_VENUS_CTL_CLK					153
-#define GCC_VIDEO_XO_CLK					154
-#define GCC_VS_CTRL_AHB_CLK					155
-#define GCC_VS_CTRL_CLK						156
-#define GCC_VS_CTRL_CLK_SRC					157
-#define GCC_VSENSOR_CLK_SRC					158
-#define GCC_WCSS_VS_CLK						159
-#define GCC_AHB2PHY_CSI_CLK					160
-#define GCC_AHB2PHY_USB_CLK					161
-#define GCC_APC_VS_CLK						162
-#define GCC_BIMC_GPU_AXI_CLK					163
-#define GCC_BOOT_ROM_AHB_CLK					164
-#define GCC_CAM_THROTTLE_NRT_CLK				165
-#define GCC_CAM_THROTTLE_RT_CLK					166
-#define GCC_CAMERA_AHB_CLK					167
-#define GCC_CAMERA_XO_CLK					168
-#define GCC_CAMSS_AXI_CLK					169
-#define GCC_CAMSS_AXI_CLK_SRC					170
-#define GCC_CAMSS_CAMNOC_ATB_CLK				171
-#define GCC_CAMSS_CAMNOC_NTS_XO_CLK				172
-#define GCC_CAMSS_CCI_0_CLK					173
-#define GCC_CAMSS_CCI_CLK_SRC					174
-#define GCC_CAMSS_CPHY_0_CLK					175
-#define GCC_CAMSS_CPHY_1_CLK					176
-#define GCC_CAMSS_CPHY_2_CLK					177
+#define GCC_PDM2_CLK						82
+#define GCC_PDM2_CLK_SRC					83
+#define GCC_PDM_AHB_CLK						84
+#define GCC_PDM_XO4_CLK						85
+#define GCC_PRNG_AHB_CLK					86
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK				87
+#define GCC_QMIP_CAMERA_RT_AHB_CLK				88
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK				89
+#define GCC_QMIP_DISP_AHB_CLK					90
+#define GCC_QMIP_GPU_CFG_AHB_CLK				91
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				92
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				93
+#define GCC_QUPV3_WRAP0_CORE_CLK				94
+#define GCC_QUPV3_WRAP0_S0_CLK					95
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				96
+#define GCC_QUPV3_WRAP0_S1_CLK					97
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				98
+#define GCC_QUPV3_WRAP0_S2_CLK					99
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				100
+#define GCC_QUPV3_WRAP0_S3_CLK					101
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				102
+#define GCC_QUPV3_WRAP0_S4_CLK					103
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				104
+#define GCC_QUPV3_WRAP0_S5_CLK					105
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				106
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				107
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				108
+#define GCC_SDCC1_AHB_CLK					109
+#define GCC_SDCC1_APPS_CLK					110
+#define GCC_SDCC1_APPS_CLK_SRC					111
+#define GCC_SDCC1_ICE_CORE_CLK					112
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				113
+#define GCC_SDCC2_AHB_CLK					114
+#define GCC_SDCC2_APPS_CLK					115
+#define GCC_SDCC2_APPS_CLK_SRC					116
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				117
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK				118
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK				119
+#define GCC_UFS_PHY_AHB_CLK					120
+#define GCC_UFS_PHY_AXI_CLK					121
+#define GCC_UFS_PHY_AXI_CLK_SRC					122
+#define GCC_UFS_PHY_ICE_CORE_CLK				123
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				124
+#define GCC_UFS_PHY_PHY_AUX_CLK					125
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				126
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				127
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				128
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				129
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				130
+#define GCC_USB30_PRIM_MASTER_CLK				131
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				132
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				133
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			134
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		135
+#define GCC_USB30_PRIM_SLEEP_CLK				136
+#define GCC_USB3_PRIM_CLKREF_CLK				137
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				138
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				139
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				140
+#define GCC_VCODEC0_AXI_CLK					141
+#define GCC_VENUS_AHB_CLK					142
+#define GCC_VENUS_CTL_AXI_CLK					143
+#define GCC_VIDEO_AHB_CLK					144
+#define GCC_VIDEO_AXI0_CLK					145
+#define GCC_VIDEO_THROTTLE_CORE_CLK				146
+#define GCC_VIDEO_VCODEC0_SYS_CLK				147
+#define GCC_VIDEO_VENUS_CLK_SRC					148
+#define GCC_VIDEO_VENUS_CTL_CLK					149
+#define GCC_VIDEO_XO_CLK					150
+#define GCC_AHB2PHY_CSI_CLK					151
+#define GCC_AHB2PHY_USB_CLK					152
+#define GCC_BIMC_GPU_AXI_CLK					153
+#define GCC_BOOT_ROM_AHB_CLK					154
+#define GCC_CAM_THROTTLE_NRT_CLK				155
+#define GCC_CAM_THROTTLE_RT_CLK					156
+#define GCC_CAMERA_AHB_CLK					157
+#define GCC_CAMERA_XO_CLK					158
+#define GCC_CAMSS_AXI_CLK					159
+#define GCC_CAMSS_AXI_CLK_SRC					160
+#define GCC_CAMSS_CAMNOC_ATB_CLK				161
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK				162
+#define GCC_CAMSS_CCI_0_CLK					163
+#define GCC_CAMSS_CCI_CLK_SRC					164
+#define GCC_CAMSS_CPHY_0_CLK					165
+#define GCC_CAMSS_CPHY_1_CLK					166
+#define GCC_CAMSS_CPHY_2_CLK					167
 
 /* GCC resets */
-#define GCC_CAMSS_OPE_BCR					0
-#define GCC_CAMSS_TFE_BCR					1
-#define GCC_CAMSS_TOP_BCR					2
-#define GCC_GPU_BCR						3
-#define GCC_MMSS_BCR						4
-#define GCC_PDM_BCR						5
-#define GCC_PRNG_BCR						6
-#define GCC_QUPV3_WRAPPER_0_BCR					7
-#define GCC_QUPV3_WRAPPER_1_BCR					8
-#define GCC_QUSB2PHY_PRIM_BCR					9
-#define GCC_QUSB2PHY_SEC_BCR					10
-#define GCC_SDCC1_BCR						11
-#define GCC_SDCC2_BCR						12
-#define GCC_UFS_PHY_BCR						13
-#define GCC_USB30_PRIM_BCR					14
-#define GCC_USB_PHY_CFG_AHB2PHY_BCR				15
-#define GCC_VCODEC0_BCR						16
-#define GCC_VENUS_BCR						17
-#define GCC_VIDEO_INTERFACE_BCR					18
-#define GCC_VS_BCR						19
+#define GCC_QUSB2PHY_PRIM_BCR					0
+#define GCC_QUSB2PHY_SEC_BCR					2
+#define GCC_UFS_PHY_BCR						3
+#define GCC_USB30_PRIM_BCR					4
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR				5
+#define GCC_VCODEC0_BCR						6
+#define GCC_VENUS_BCR						7
+#define GCC_VIDEO_INTERFACE_BCR					8
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-bengal.h b/include/dt-bindings/clock/qcom,gpucc-bengal.h
index 19a39e2..181005b 100644
--- a/include/dt-bindings/clock/qcom,gpucc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gpucc-bengal.h
@@ -7,25 +7,22 @@
 #define _DT_BINDINGS_CLK_QCOM_GPU_CC_BENGAL_H
 
 /* GPU_CC clocks */
-#define GPU_CC_AHB_CLK						0
-#define GPU_CC_CRC_AHB_CLK					1
-#define GPU_CC_CX_APB_CLK					2
-#define GPU_CC_CX_GFX3D_CLK					3
-#define GPU_CC_CX_GFX3D_SLV_CLK					4
-#define GPU_CC_CX_GMU_CLK					5
-#define GPU_CC_CX_SNOC_DVM_CLK					9
-#define GPU_CC_CXO_AON_CLK					10
-#define GPU_CC_CXO_CLK						11
-#define GPU_CC_GMU_CLK_SRC					12
-#define GPU_CC_GX_CXO_CLK					13
-#define GPU_CC_GX_GFX3D_CLK					14
-#define GPU_CC_SLEEP_CLK					16
-
-/* GPU_CC resets */
-#define GPUCC_GPU_CC_CX_BCR					0
-#define GPUCC_GPU_CC_GFX3D_AON_BCR				1
-#define GPUCC_GPU_CC_GMU_BCR					2
-#define GPUCC_GPU_CC_GX_BCR					3
-#define GPUCC_GPU_CC_XO_BCR					4
+#define GPU_CC_PLL0						0
+#define GPU_CC_PLL0_OUT_AUX2					1
+#define GPU_CC_PLL1						2
+#define GPU_CC_PLL1_OUT_AUX					3
+#define GPU_CC_AHB_CLK						4
+#define GPU_CC_CRC_AHB_CLK					5
+#define GPU_CC_CX_GFX3D_CLK					6
+#define GPU_CC_CX_GMU_CLK					7
+#define GPU_CC_CX_SNOC_DVM_CLK					8
+#define GPU_CC_CXO_AON_CLK					9
+#define GPU_CC_CXO_CLK						10
+#define GPU_CC_GMU_CLK_SRC					11
+#define GPU_CC_GX_CXO_CLK					12
+#define GPU_CC_GX_GFX3D_CLK					13
+#define GPU_CC_GX_GFX3D_CLK_SRC					14
+#define GPU_CC_SLEEP_CLK					15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				16
 
 #endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 90ac450..561fefc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu);
 void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
 
 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)	((k)->arch.vgic.initialized)
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7e9c991..43ed9e7 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -173,6 +173,8 @@ struct ccp_aes_engine {
 	enum ccp_aes_mode mode;
 	enum ccp_aes_action action;
 
+	u32 authsize;
+
 	struct scatterlist *key;
 	u32 key_len;		/* In bytes */
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 30efb36..d42a36e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -818,6 +818,7 @@ void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 93a4778..9c1c709 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -17,28 +17,28 @@ struct mhi_buf_info;
  * enum MHI_CB - MHI callback
  * @MHI_CB_IDLE: MHI entered idle state
  * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_DTR_SIGNAL: DTR signaling update
  * @MHI_CB_LPM_ENTER: MHI host entered low power mode
  * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
  * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
  * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
  * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
  * @MHI_CB_FATAL_ERROR: MHI device entered fatal error
- * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
  */
 enum MHI_CB {
 	MHI_CB_IDLE,
 	MHI_CB_PENDING_DATA,
+	MHI_CB_DTR_SIGNAL,
 	MHI_CB_LPM_ENTER,
 	MHI_CB_LPM_EXIT,
 	MHI_CB_EE_RDDM,
 	MHI_CB_EE_MISSION_MODE,
 	MHI_CB_SYS_ERROR,
 	MHI_CB_FATAL_ERROR,
-	MHI_CB_BW_REQ,
 };
 
 /**
- * enum MHI_DEBUG_LEVL - various debugging level
+ * enum MHI_DEBUG_LEVEL - various debugging level
  */
 enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_VERBOSE,
@@ -46,6 +46,7 @@ enum MHI_DEBUG_LEVEL {
 	MHI_MSG_LVL_ERROR,
 	MHI_MSG_LVL_CRITICAL,
 	MHI_MSG_LVL_MASK_ALL,
+	MHI_MSG_LVL_MAX,
 };
 
 /**
@@ -119,10 +120,12 @@ enum mhi_dev_state {
  * struct mhi_link_info - bw requirement
  * target_link_speed - as defined by TLS bits in LinkControl reg
  * target_link_width - as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
  */
 struct mhi_link_info {
 	unsigned int target_link_speed;
 	unsigned int target_link_width;
+	int sequence_num;
 };
 
 /**
@@ -198,6 +201,7 @@ struct mhi_controller {
 	void __iomem *bhi;
 	void __iomem *bhie;
 	void __iomem *wake_db;
+	void __iomem *bw_scale_db;
 
 	/* device topology */
 	u32 dev_id;
@@ -240,6 +244,7 @@ struct mhi_controller {
 	u32 msi_allocated;
 	int *irq; /* interrupt table */
 	struct mhi_event *mhi_event;
+	struct list_head lp_ev_rings; /* low priority event rings */
 
 	/* cmd rings */
 	struct mhi_cmd *mhi_cmd;
@@ -278,6 +283,7 @@ struct mhi_controller {
 	struct work_struct st_worker;
 	struct work_struct fw_worker;
 	struct work_struct syserr_worker;
+	struct work_struct low_priority_worker;
 	wait_queue_head_t state_event;
 
 	/* shadow functions */
@@ -297,6 +303,8 @@ struct mhi_controller {
 	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
 			     struct mhi_buf_info *buf);
 	void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
+	int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+			struct mhi_link_info *link_info);
 
 	/* channel to control DTR messaging */
 	struct mhi_device *dtr_dev;
diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h
index 9bc215d..d42bd54 100644
--- a/include/linux/usb/usb_qdss.h
+++ b/include/linux/usb/usb_qdss.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2013, 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __LINUX_USB_QDSS_H
@@ -17,6 +17,9 @@ struct qdss_request {
 	int actual;
 	int status;
 	void *context;
+	struct scatterlist *sg;
+	unsigned int num_sgs;
+	unsigned int num_mapped_sgs;
 };
 
 struct usb_qdss_ch {
diff --git a/include/soc/qcom/rmnet_ctl.h b/include/soc/qcom/rmnet_ctl.h
new file mode 100644
index 0000000..0080560
--- /dev/null
+++ b/include/soc/qcom/rmnet_ctl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL header
+ *
+ */
+
+#ifndef _RMNET_CTL_H_
+#define _RMNET_CTL_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_client_hooks {
+	void (*ctl_dl_client_hook)(struct sk_buff *skb);
+};
+
+#ifdef CONFIG_RMNET_CTL
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
+int rmnet_ctl_unregister_client(void *handle);
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
+
+#else
+
+static inline void *rmnet_ctl_register_client(
+			struct rmnet_ctl_client_hooks *hook)
+{
+	return NULL;
+}
+
+static inline int rmnet_ctl_unregister_client(void *handle)
+{
+	return -EINVAL;
+}
+
+static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+	return -EINVAL;
+}
+
+#endif /* CONFIG_RMNET_CTL */
+
+#endif /* _RMNET_CTL_H_ */
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index 9096b10..ffcef3f 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -24,6 +24,7 @@ void rmnet_set_powersave_format(void *port);
 void rmnet_clear_powersave_format(void *port);
 void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
 int rmnet_get_powersave_notif(void *port);
+struct net_device *rmnet_get_real_dev(void *port);
 #else
 static inline void *rmnet_get_qmi_pt(void *port)
 {
@@ -76,5 +77,9 @@ static inline int rmnet_get_powersave_notif(void *port)
 	return 0;
 }
 
+static inline struct net_device *rmnet_get_real_dev(void *port)
+{
+	return NULL;
+}
 #endif /* CONFIG_QCOM_QMI_RMNET */
 #endif /*_RMNET_QMI_H*/
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index b52d4a0..78a2291 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -177,10 +177,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
 	if (snd_BUG_ON(!stream))
 		return;
 
-	if (stream->direction == SND_COMPRESS_PLAYBACK)
-		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-	else
-		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 
 	wake_up(&stream->runtime->sleep);
 }
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 375a156..fb092bb 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -236,6 +236,29 @@ TRACE_EVENT(dfc_tx_link_status_ind,
 		__entry->mid, __entry->bid)
 );
 
+TRACE_EVENT(dfc_qmap,
+
+	TP_PROTO(const void *data, size_t len, bool in),
+
+	TP_ARGS(data, len, in),
+
+	TP_STRUCT__entry(
+		__field(bool, in)
+		__field(size_t, len)
+		__dynamic_array(u8, data, len)
+	),
+
+	TP_fast_assign(
+		__entry->in = in;
+		__entry->len = len;
+		memcpy(__get_dynamic_array(data), data, len);
+	),
+
+	TP_printk("%s [%s]",
+		__entry->in ? "<--" : "-->",
+		__print_hex(__get_dynamic_array(data), __entry->len))
+);
+
 #endif /* _TRACE_DFC_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 9a8a3e2..7924470 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef _UAPI_ESOC_CTRL_H_
 #define _UAPI_ESOC_CTRL_H_
@@ -57,6 +57,7 @@ enum esoc_evt {
 	ESOC_CMD_ENG_OFF,
 	ESOC_INVALID_STATE,
 	ESOC_RETRY_PON_EVT,
+	ESOC_BOOT_STATE,
 };
 
 enum esoc_cmd {
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 16bf3a5..fc9dfce 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2779,7 +2779,7 @@ enum nl80211_attrs {
 #define NL80211_HT_CAPABILITY_LEN		26
 #define NL80211_VHT_CAPABILITY_LEN		12
 #define NL80211_HE_MIN_CAPABILITY_LEN           16
-#define NL80211_HE_MAX_CAPABILITY_LEN           51
+#define NL80211_HE_MAX_CAPABILITY_LEN           54
 #define NL80211_MAX_NR_CIPHER_SUITES		5
 #define NL80211_MAX_NR_AKM_SUITES		2
 
diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h
index 14ee584..18fdddb 100644
--- a/include/uapi/media/msm_vidc_utils.h
+++ b/include/uapi/media/msm_vidc_utils.h
@@ -348,4 +348,25 @@ enum msm_vidc_hdr_info_types {
 	MSM_VIDC_RGB_MAX_FLL,
 };
 
+enum msm_vidc_plane_reserved_field_types {
+	MSM_VIDC_BUFFER_FD,
+	MSM_VIDC_DATA_OFFSET,
+	MSM_VIDC_COMP_RATIO,
+	MSM_VIDC_INPUT_TAG_1,
+	MSM_VIDC_INPUT_TAG_2,
+};
+
+enum msm_vidc_cb_event_types {
+	MSM_VIDC_HEIGHT,
+	MSM_VIDC_WIDTH,
+	MSM_VIDC_BIT_DEPTH,
+	MSM_VIDC_PIC_STRUCT,
+	MSM_VIDC_COLOR_SPACE,
+	MSM_VIDC_CROP_TOP,
+	MSM_VIDC_CROP_LEFT,
+	MSM_VIDC_CROP_HEIGHT,
+	MSM_VIDC_CROP_WIDTH,
+	MSM_VIDC_PROFILE,
+	MSM_VIDC_LEVEL,
+};
 #endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 888d93c..6d7d708 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11292,7 +11292,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 		goto err_unlock;
 	}
 
-	perf_install_in_context(ctx, event, cpu);
+	perf_install_in_context(ctx, event, event->cpu);
 	perf_unpin_context(ctx);
 	mutex_unlock(&ctx->mutex);
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6c775be..5a29adf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6381,6 +6381,7 @@ int sched_cpu_starting(unsigned int cpu)
 {
 	sched_rq_cpu_starting(cpu);
 	sched_tick_start(cpu);
+	clear_walt_request(cpu);
 	return 0;
 }
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 31decf0..35e8185 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -56,6 +56,7 @@ struct sugov_policy {
 	struct task_struct	*thread;
 	bool			work_in_progress;
 
+	bool			limits_changed;
 	bool			need_freq_update;
 };
 
@@ -113,8 +114,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 	    !cpufreq_this_cpu_can_update(sg_policy->policy))
 		return false;
 
-	if (unlikely(sg_policy->need_freq_update))
+	if (unlikely(sg_policy->limits_changed)) {
+		sg_policy->limits_changed = false;
+		sg_policy->need_freq_update = true;
 		return true;
+	}
 
 	/* No need to recalculate next freq for min_rate_limit_us
 	 * at least. However we might still decide to further rate
@@ -595,7 +599,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-		sg_policy->need_freq_update = true;
+		sg_policy->limits_changed = true;
 }
 
 static inline unsigned long target_util(struct sugov_policy *sg_policy,
@@ -628,7 +632,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 	if (!sugov_should_update_freq(sg_policy, time))
 		return;
 
-	busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
+	/* Limits may have changed, don't skip frequency update */
+	busy = use_pelt() && !sg_policy->need_freq_update &&
+		sugov_cpu_is_busy(sg_cpu);
 
 	sg_cpu->util = util = sugov_get_util(sg_cpu);
 	max = sg_cpu->max;
@@ -1286,6 +1292,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 	sg_policy->last_freq_update_time	= 0;
 	sg_policy->next_freq			= 0;
 	sg_policy->work_in_progress		= false;
+	sg_policy->limits_changed		= false;
 	sg_policy->need_freq_update		= false;
 	sg_policy->cached_raw_freq		= 0;
 
@@ -1356,7 +1363,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	}
 
-	sg_policy->need_freq_update = true;
+	sg_policy->limits_changed = true;
 }
 
 static struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e0d79f..75f36e5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3937,6 +3937,7 @@ struct find_best_target_env {
 	bool is_rtg;
 	int placement_boost;
 	bool need_idle;
+	bool boosted;
 	int fastpath;
 	int start_cpu;
 };
@@ -6872,7 +6873,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 	unsigned long best_active_cuml_util = ULONG_MAX;
 	unsigned long best_idle_cuml_util = ULONG_MAX;
 	bool prefer_idle = schedtune_prefer_idle(p);
-	bool boosted = schedtune_task_boost(p) > 0 || per_task_boost(p) > 0;
+	bool boosted = fbt_env->boosted;
 	/* Initialise with deepest possible cstate (INT_MAX) */
 	int shallowest_idle_cstate = INT_MAX;
 	struct sched_domain *start_sd;
@@ -6912,9 +6913,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 		!cpu_isolated(prev_cpu) && cpu_online(prev_cpu) &&
 		idle_cpu(prev_cpu)) {
 
-		if (idle_get_state_idx(cpu_rq(prev_cpu)) <=
-			(is_min_capacity_cpu(prev_cpu) ? 1 : 0)) {
-
+		if (idle_get_state_idx(cpu_rq(prev_cpu)) <= 1) {
 			target_cpu = prev_cpu;
 
 			fbt_env->fastpath = PREV_CPU_FASTPATH;
@@ -7652,6 +7651,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
 		fbt_env.placement_boost = placement_boost;
 		fbt_env.need_idle = need_idle;
 		fbt_env.start_cpu = start_cpu;
+		fbt_env.boosted = boosted;
 
 		find_best_target(NULL, candidates, p, &fbt_env);
 	} else {
@@ -8745,7 +8745,17 @@ static int detach_tasks(struct lb_env *env)
 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
 			goto next;
 
-		if ((load / 2) > env->imbalance)
+		/*
+		 * p is not running task when we goes until here, so if p is one
+		 * of the 2 task in src cpu rq and not the running one,
+		 * that means it is the only task that can be balanced.
+		 * So only when there is other tasks can be balanced or
+		 * there is situation to ignore big task, it is needed
+		 * to skip the task load bigger than 2*imbalance.
+		 */
+		if (((cpu_rq(env->src_cpu)->nr_running > 2) ||
+			(env->flags & LBF_IGNORE_BIG_TASKS)) &&
+			((load / 2) > env->imbalance))
 			goto next;
 
 		detach_task(p, env);
@@ -12589,6 +12599,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 	int active_balance;
 	int new_cpu = -1;
 	int prev_cpu = task_cpu(p);
+	int ret;
 
 	if (rq->misfit_task_load) {
 		if (rq->curr->state != TASK_RUNNING ||
@@ -12608,9 +12619,13 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 			if (active_balance) {
 				mark_reserved(new_cpu);
 				raw_spin_unlock(&migration_lock);
-				stop_one_cpu_nowait(prev_cpu,
+				ret = stop_one_cpu_nowait(prev_cpu,
 					active_load_balance_cpu_stop, rq,
 					&rq->active_balance_work);
+				if (!ret)
+					clear_reserved(new_cpu);
+				else
+					wake_up_if_idle(new_cpu);
 				return;
 			}
 		} else {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2c8719f..c349976 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -60,7 +60,8 @@ static noinline int __cpuidle cpu_idle_poll(void)
 	stop_critical_timings();
 
 	while (!tif_need_resched() &&
-		(cpu_idle_force_poll || tick_check_broadcast_expired()))
+		(cpu_idle_force_poll || tick_check_broadcast_expired() ||
+		is_reserved(smp_processor_id())))
 		cpu_relax();
 	start_critical_timings();
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
@@ -256,7 +257,8 @@ static void do_idle(void)
 		 * broadcast device expired for us, we don't want to go deep
 		 * idle as we know that the IPI is going to arrive right away.
 		 */
-		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
+		if (cpu_idle_force_poll || tick_check_broadcast_expired() ||
+				is_reserved(smp_processor_id())) {
 			tick_nohz_idle_restart_tick();
 			cpu_idle_poll();
 		} else {
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8497914..85847b6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -426,7 +426,7 @@ static struct ctl_table kern_table[] = {
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler   = proc_dointvec_minmax,
-		.extra1         = &zero,
+		.extra1         = &one,
 		.extra2		= &two_hundred_fifty_five,
 	},
 #endif
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index a578d8f51..2dac07a 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -798,6 +798,10 @@ static int taskstats2_foreach(struct sk_buff *skb, struct netlink_callback *cb)
 	nla = nla_find(nlmsg_attrdata(cb->nlh, GENL_HDRLEN),
 			nlmsg_attrlen(cb->nlh, GENL_HDRLEN),
 			TASKSTATS_TYPE_FOREACH);
+
+	if (!nla)
+		goto out;
+
 	buf  = nla_get_u32(nla);
 	oom_score_min = (short) (buf & 0xFFFF);
 	oom_score_max = (short) ((buf >> 16) & 0xFFFF);
@@ -854,6 +858,7 @@ static int taskstats2_foreach(struct sk_buff *skb, struct netlink_callback *cb)
 	}
 
 	cb->args[0] = iter.tgid;
+out:
 	return skb->len;
 }
 
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index fd48a15..a74b1aa 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -894,8 +894,11 @@ static int __init test_firmware_init(void)
 		return -ENOMEM;
 
 	rc = __test_firmware_config_init();
-	if (rc)
+	if (rc) {
+		kfree(test_fw_config);
+		pr_err("could not init firmware test config: %d\n", rc);
 		return rc;
+	}
 
 	rc = misc_register(&test_fw_misc_device);
 	if (rc) {
diff --git a/mm/cma.c b/mm/cma.c
index 81d7567..d3973af 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -18,11 +18,6 @@
 
 #define pr_fmt(fmt) "cma: " fmt
 
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-#  define DEBUG
-#endif
-#endif
 #define CREATE_TRACE_POINTS
 
 #include <linux/memblock.h>
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index aa0338c..1589165 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -126,7 +126,7 @@
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-				 __GFP_NOWARN | __GFP_NOFAIL)
+				 __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e7cc0c..ecde75f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
 		css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+					struct mem_cgroup *dead_memcg)
 {
-	struct mem_cgroup *memcg = dead_memcg;
 	struct mem_cgroup_reclaim_iter *iter;
 	struct mem_cgroup_per_node *mz;
 	int nid;
 	int i;
 
-	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-		for_each_node(nid) {
-			mz = mem_cgroup_nodeinfo(memcg, nid);
-			for (i = 0; i <= DEF_PRIORITY; i++) {
-				iter = &mz->iter[i];
-				cmpxchg(&iter->position,
-					dead_memcg, NULL);
-			}
+	for_each_node(nid) {
+		mz = mem_cgroup_nodeinfo(from, nid);
+		for (i = 0; i <= DEF_PRIORITY; i++) {
+			iter = &mz->iter[i];
+			cmpxchg(&iter->position,
+				dead_memcg, NULL);
 		}
 	}
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+	struct mem_cgroup *memcg = dead_memcg;
+	struct mem_cgroup *last;
+
+	do {
+		__invalidate_reclaim_iterators(memcg, dead_memcg);
+		last = memcg;
+	} while ((memcg = parent_mem_cgroup(memcg)));
+
+	/*
+	 * When cgruop1 non-hierarchy mode is used,
+	 * parent_mem_cgroup() does not walk all the way up to the
+	 * cgroup root (root_mem_cgroup). So we have to handle
+	 * dead_memcg from cgroup root separately.
+	 */
+	if (last != root_mem_cgroup)
+		__invalidate_reclaim_iterators(root_mem_cgroup,
+						dead_memcg);
+}
+
 /**
  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
  * @memcg: hierarchy root
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d78b843..4b81d09 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -406,7 +406,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 	},
 };
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags);
 
 struct queue_pages {
@@ -432,11 +432,14 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- *        page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ *        existing page was already on a node that does not follow the
+ *        policy.
  */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
@@ -454,23 +457,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 	if (is_huge_zero_page(page)) {
 		spin_unlock(ptl);
 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+		ret = 2;
 		goto out;
 	}
-	if (!queue_pages_required(page, qp)) {
-		ret = 1;
+	if (!queue_pages_required(page, qp))
 		goto unlock;
-	}
 
-	ret = 1;
 	flags = qp->flags;
 	/* go to thp migration */
 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-		if (!vma_migratable(walk->vma)) {
-			ret = -EIO;
+		if (!vma_migratable(walk->vma) ||
+		    migrate_page_add(page, qp->pagelist, flags)) {
+			ret = 1;
 			goto unlock;
 		}
-
-		migrate_page_add(page, qp->pagelist, flags);
 	} else
 		ret = -EIO;
 unlock:
@@ -482,6 +482,13 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 /*
  * Scan through pages checking if pages follow certain conditions,
  * and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ *        on a node that does not follow the policy.
  */
 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 			unsigned long end, struct mm_walk *walk)
@@ -491,17 +498,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 	struct queue_pages *qp = walk->private;
 	unsigned long flags = qp->flags;
 	int ret;
+	bool has_unmovable = false;
 	pte_t *pte;
 	spinlock_t *ptl;
 
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-		if (ret > 0)
-			return 0;
-		else if (ret < 0)
+		if (ret != 2)
 			return ret;
 	}
+	/* THP was split, fall through to pte walk */
 
 	if (pmd_trans_unstable(pmd))
 		return 0;
@@ -522,14 +529,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 		if (!queue_pages_required(page, qp))
 			continue;
 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-			if (!vma_migratable(vma))
+			/* MPOL_MF_STRICT must be specified if we get here */
+			if (!vma_migratable(vma)) {
+				has_unmovable = true;
 				break;
-			migrate_page_add(page, qp->pagelist, flags);
+			}
+
+			/*
+			 * Do not abort immediately since there may be
+			 * temporary off LRU pages in the range.  Still
+			 * need migrate other LRU pages.
+			 */
+			if (migrate_page_add(page, qp->pagelist, flags))
+				has_unmovable = true;
 		} else
 			break;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
+
+	if (has_unmovable)
+		return 1;
+
 	return addr != end ? -EIO : 0;
 }
 
@@ -644,7 +665,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  *
  * If pages found in a given range are on a set of nodes (determined by
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -939,7 +966,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 /*
  * page migration, thp tail pages can be passed.
  */
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
 	struct page *head = compound_head(page);
@@ -952,8 +979,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 			mod_node_page_state(page_pgdat(head),
 				NR_ISOLATED_ANON + page_is_file_cache(head),
 				hpage_nr_pages(head));
+		} else if (flags & MPOL_MF_STRICT) {
+			/*
+			 * Non-movable page may reach here.  And, there may be
+			 * temporary off LRU pages or non-LRU movable pages.
+			 * Treat them as unmovable pages since they can't be
+			 * isolated, so they can't be moved at the moment.  It
+			 * should return -EIO for this case too.
+			 */
+			return -EIO;
 		}
 	}
+
+	return 0;
 }
 
 /* page allocation callback for NUMA node migration */
@@ -1156,9 +1194,10 @@ static struct page *new_page(struct page *page, unsigned long start)
 }
 #else
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
 				unsigned long flags)
 {
+	return -EIO;
 }
 
 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1181,6 +1220,7 @@ static long do_mbind(unsigned long start, unsigned long len,
 	struct mempolicy *new;
 	unsigned long end;
 	int err;
+	int ret;
 	LIST_HEAD(pagelist);
 
 	if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1242,10 +1282,15 @@ static long do_mbind(unsigned long start, unsigned long len,
 	if (err)
 		goto mpol_out;
 
-	err = queue_pages_range(mm, start, end, nmask,
+	ret = queue_pages_range(mm, start, end, nmask,
 			  flags | MPOL_MF_INVERT, &pagelist);
-	if (!err)
-		err = mbind_range(mm, start, end, new);
+
+	if (ret < 0) {
+		err = -EIO;
+		goto up_out;
+	}
+
+	err = mbind_range(mm, start, end, new);
 
 	if (!err) {
 		int nr_failed = 0;
@@ -1258,13 +1303,14 @@ static long do_mbind(unsigned long start, unsigned long len,
 				putback_movable_pages(&pagelist);
 		}
 
-		if (nr_failed && (flags & MPOL_MF_STRICT))
+		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
 			err = -EIO;
 	} else
 		putback_movable_pages(&pagelist);
 
+up_out:
 	up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
 	mpol_put(new);
 	return err;
 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 0af8992..4272af2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -52,11 +52,13 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
 
+#define ULMK_MAGIC "lmkd"
+
 int sysctl_panic_on_oom =
 IS_ENABLED(CONFIG_DEBUG_PANIC_ON_OOM) ? 2 : 0;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
-int sysctl_reap_mem_on_sigkill;
+int sysctl_reap_mem_on_sigkill = 1;
 
 /*
  * Serializes oom killer invocations (out_of_memory()) from all contexts to
@@ -1221,6 +1223,10 @@ void pagefault_out_of_memory(void)
 		.order = 0,
 	};
 
+	if (IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER) ||
+	    IS_ENABLED(CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER))
+		return;
+
 	if (mem_cgroup_oom_synchronize(true))
 		return;
 
@@ -1230,30 +1236,6 @@ void pagefault_out_of_memory(void)
 	mutex_unlock(&oom_lock);
 }
 
-/* Call this function with task_lock being held as we're accessing ->mm */
-void dump_killed_info(struct task_struct *selected)
-{
-	int selected_tasksize = get_mm_rss(selected->mm);
-
-	pr_info_ratelimited("Killing '%s' (%d), adj %hd,\n"
-			"   to free %ldkB on behalf of '%s' (%d)\n"
-			"   Free CMA is %ldkB\n"
-			"   Total reserve is %ldkB\n"
-			"   Total free pages is %ldkB\n"
-			"   Total file cache is %ldkB\n",
-			selected->comm, selected->pid,
-			selected->signal->oom_score_adj,
-			selected_tasksize * (long)(PAGE_SIZE / 1024),
-			current->comm, current->pid,
-			global_zone_page_state(NR_FREE_CMA_PAGES) *
-				(long)(PAGE_SIZE / 1024),
-			totalreserve_pages * (long)(PAGE_SIZE / 1024),
-			global_zone_page_state(NR_FREE_PAGES) *
-				(long)(PAGE_SIZE / 1024),
-			global_node_page_state(NR_FILE_PAGES) *
-				(long)(PAGE_SIZE / 1024));
-}
-
 void add_to_oom_reaper(struct task_struct *p)
 {
 	static DEFINE_RATELIMIT_STATE(reaper_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -1272,10 +1254,10 @@ void add_to_oom_reaper(struct task_struct *p)
 		wake_oom_reaper(p);
 	}
 
-	dump_killed_info(p);
 	task_unlock(p);
 
-	if (__ratelimit(&reaper_rs) && p->signal->oom_score_adj == 0) {
+	if (strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
+			&& p->signal->oom_score_adj == 0) {
 		show_mem(SHOW_MEM_FILTER_NODES, NULL);
 		show_mem_call_notifiers();
 		if (sysctl_oom_dump_tasks)
diff --git a/mm/rmap.c b/mm/rmap.c
index a77f9b2..94e2488 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1466,7 +1466,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 			/*
 			 * No need to invalidate here it will synchronize on
 			 * against the special swap migration pte.
+			 *
+			 * The assignment to subpage above was computed from a
+			 * swap PTE which results in an invalid pointer.
+			 * Since only PAGE_SIZE pages can currently be
+			 * migrated, just set it to page. This will need to be
+			 * changed when hugepage migrations to device private
+			 * memory are supported.
 			 */
+			subpage = page;
 			goto discard;
 		}
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8721360..d515d13 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2364,6 +2364,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 		return NULL;
 
 	/*
+	 * First make sure the mappings are removed from all page-tables
+	 * before they are freed.
+	 */
+	vmalloc_sync_all();
+
+	/*
 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
 	 * flag. It means that vm_struct is not fully initialized.
 	 * Now, it is fully initialized, so remove this flag here.
@@ -2908,6 +2914,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 /*
  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  * have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
  */
 void __weak vmalloc_sync_all(void)
 {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0bb4d71..995b384 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
 	return 0;
 }
 
+static int ebt_compat_init_offsets(unsigned int number)
+{
+	if (number > INT_MAX)
+		return -EINVAL;
+
+	/* also count the base chain policies */
+	number += NF_BR_NUMHOOKS;
+
+	return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
 
 static int compat_table_info(const struct ebt_table_info *info,
 			     struct compat_ebt_replace *newinfo)
 {
 	unsigned int size = info->entries_size;
 	const void *entries = info->entries;
+	int ret;
 
 	newinfo->entries_size = size;
-	if (info->nentries) {
-		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
-						 info->nentries);
-		if (ret)
-			return ret;
-	}
+	ret = ebt_compat_init_offsets(info->nentries);
+	if (ret)
+		return ret;
 
 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
 							entries, newinfo);
@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
 
 	xt_compat_lock(NFPROTO_BRIDGE);
 
-	if (tmp.nentries) {
-		ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-		if (ret < 0)
-			goto out_unlock;
-	}
+	ret = ebt_compat_init_offsets(tmp.nentries);
+	if (ret < 0)
+		goto out_unlock;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
 	if (ret < 0)
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 142b294..b0b9413 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
 {
 	int port;
 
+	if (!ds->ops->port_mdb_add)
+		return;
+
 	for_each_set_bit(port, bitmap, ds->num_ports)
 		ds->ops->port_mdb_add(ds, port, mdb);
 }
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 12843c9..74b19a5 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
 	flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
 	flow.flowi4_tos = RT_TOS(iph->tos);
 	flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+	flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
 	return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
 }
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index c3c6b09..0f3407f 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
 	if (rpfilter_addr_linklocal(&iph->saddr)) {
 		lookup_flags |= RT6_LOOKUP_F_IFACE;
 		fl6.flowi6_oif = dev->ifindex;
-	} else if ((flags & XT_RPFILTER_LOOSE) == 0)
+	/* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
+	} else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
+		  (flags & XT_RPFILTER_LOOSE) == 0)
 		fl6.flowi6_oif = dev->ifindex;
 
 	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
 		goto out;
 	}
 
-	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+	if (rt->rt6i_idev->dev == dev ||
+	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
+	    (flags & XT_RPFILTER_LOOSE))
 		ret = true;
  out:
 	ip6_rt_put(rt);
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index bb886e7..f783d13 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
-	if (WARN_ONCE(params->cw_min == 0 ||
-		      params->cw_min > params->cw_max,
-		      "%s: invalid CW_min/CW_max: %d/%d\n",
-		      sdata->name, params->cw_min, params->cw_max))
+	if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+		/*
+		 * If we can't configure hardware anyway, don't warn. We may
+		 * never have initialized the CW parameters.
+		 */
+		WARN_ONCE(local->ops->conf_tx,
+			  "%s: invalid CW_min/CW_max: %d/%d\n",
+			  sdata->name, params->cw_min, params->cw_max);
 		return -EINVAL;
+	}
 
 	trace_drv_conf_tx(local, sdata, ac, params);
 	if (local->ops->conf_tx)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1aaa73fa..b5c0624 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1967,6 +1967,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
 		ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
 	}
 
+	/* WMM specification requires all 4 ACIs. */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		if (params[ac].cw_min == 0) {
+			sdata_info(sdata,
+				   "AP has invalid WMM params (missing AC %d), using defaults\n",
+				   ac);
+			return false;
+		}
+	}
+
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		mlme_dbg(sdata,
 			 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27eff89..c6073d1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  * table location, we assume id gets exposed to userspace.
  *
  * Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
  *
  * 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
  */
 u32 nf_ct_get_id(const struct nf_conn *ct)
 {
@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
 	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
 
 	a = (unsigned long)ct;
-	b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
-	c = (unsigned long)ct->ext;
-	d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+	b = (unsigned long)ct->master;
+	c = (unsigned long)nf_ct_net(ct);
+	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
 				   &ct_id_seed);
 #ifdef CONFIG_64BIT
 	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 842f3f8..7011ab2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -480,6 +480,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
 	__u32 seq, ack, sack, end, win, swin;
+	u16 win_raw;
 	s32 receiver_offset;
 	bool res, in_recv_win;
 
@@ -488,7 +489,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
 	 */
 	seq = ntohl(tcph->seq);
 	ack = sack = ntohl(tcph->ack_seq);
-	win = ntohs(tcph->window);
+	win_raw = ntohs(tcph->window);
+	win = win_raw;
 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
 
 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -663,14 +665,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
 			    && state->last_seq == seq
 			    && state->last_ack == ack
 			    && state->last_end == end
-			    && state->last_win == win)
+			    && state->last_win == win_raw)
 				state->retrans++;
 			else {
 				state->last_dir = dir;
 				state->last_seq = seq;
 				state->last_ack = ack;
 				state->last_end = end;
-				state->last_win = win;
+				state->last_win = win_raw;
 				state->retrans = 0;
 			}
 		}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 9169134..7f2c191 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -575,7 +575,7 @@ static int nfnetlink_bind(struct net *net, int group)
 	ss = nfnetlink_get_subsys(type << 8);
 	rcu_read_unlock();
 	if (!ss)
-		request_module("nfnetlink-subsys-%d", type);
+		request_module_nowait("nfnetlink-subsys-%d", type);
 	return 0;
 }
 #endif
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c2d2371..b8f23f7 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -196,7 +196,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
 	priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
 
 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
-	if (priv->modulus <= 1)
+	if (priv->modulus < 1)
 		return -ERANGE;
 
 	if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e982b..7e25a6a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 
 	mutex_lock(&po->pg_vec_lock);
 
+	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+	 * we need to confirm it under protection of pg_vec_lock.
+	 */
+	if (unlikely(!po->tx_ring.pg_vec)) {
+		err = -EBUSY;
+		goto out;
+	}
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
index 0a494a6..4a1dd07 100644
--- a/net/qrtr/fifo.c
+++ b/net/qrtr/fifo.c
@@ -341,7 +341,7 @@ static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
 	qrtr_fifo_config_init(xprtp);
 
 	xprtp->ep.xmit = xprt_write;
-	ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+	ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO, false);
 	if (ret)
 		return ret;
 
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index b864b6b..788177e 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -145,6 +145,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
 {
 	struct qrtr_mhi_dev *qdev;
 	u32 net_id;
+	bool rt;
 	int rc;
 
 	qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
@@ -160,10 +161,12 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
 	if (rc < 0)
 		net_id = QRTR_EP_NET_ID_AUTO;
 
+	rt = of_property_read_bool(mhi_dev->dev.of_node, "qcom,low-latency");
+
 	INIT_LIST_HEAD(&qdev->ul_pkts);
 	spin_lock_init(&qdev->ul_lock);
 
-	rc = qrtr_endpoint_register(&qdev->ep, net_id);
+	rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
 	if (rc)
 		return rc;
 
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index d994a90..2c9d7de 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -20,8 +20,10 @@
 #include <linux/rwsem.h>
 #include <linux/ipc_logging.h>
 #include <linux/uidgid.h>
+#include <linux/pm_wakeup.h>
 
 #include <net/sock.h>
+#include <uapi/linux/sched/types.h>
 
 #include "qrtr.h"
 
@@ -149,6 +151,7 @@ static DEFINE_MUTEX(qrtr_port_lock);
  * @kworker: worker thread for recv work
  * @task: task to run the worker thread
  * @read_data: scheduled work for recv work
+ * @ws: wakeupsource avoid system suspend
  * @ilc: ipc logging context reference
  */
 struct qrtr_node {
@@ -170,6 +173,8 @@ struct qrtr_node {
 	struct task_struct *task;
 	struct kthread_work read_data;
 
+	struct wakeup_source *ws;
+
 	void *ilc;
 };
 
@@ -346,6 +351,7 @@ static void __qrtr_node_release(struct kref *kref)
 	}
 	mutex_unlock(&node->qrtr_tx_lock);
 
+	wakeup_source_unregister(node->ws);
 	kthread_flush_worker(&node->kworker);
 	kthread_stop(node->task);
 
@@ -610,10 +616,16 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
 		node->nid = nid;
 	up_write(&qrtr_node_lock);
 
+	snprintf(name, sizeof(name), "qrtr_%d", nid);
 	if (!node->ilc) {
-		snprintf(name, sizeof(name), "qrtr_%d", nid);
 		node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
 	}
+	/* create wakeup source for only NID = 3,0 or 7.
+	 * From other nodes sensor service stream samples
+	 * cause APPS suspend problems and power drain issue.
+	 */
+	if (!node->ws && (nid == 0 || nid == 3 || nid == 7))
+		node->ws = wakeup_source_register(name);
 }
 
 /**
@@ -744,6 +756,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 	    cb->type != QRTR_TYPE_RESUME_TX)
 		goto err;
 
+	pm_wakeup_ws_event(node->ws, 0, true);
+
 	if (frag) {
 		skb->data_len = size;
 		skb->len = size;
@@ -921,13 +935,16 @@ static void qrtr_node_rx_work(struct kthread_work *work)
  * qrtr_endpoint_register() - register a new endpoint
  * @ep: endpoint to register
  * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
+ * @rt: flag to notify real time low latency endpoint
  * Return: 0 on success; negative error code on failure
  *
  * The specified endpoint must have the xmit function pointer set on call.
  */
-int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id)
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
+			   bool rt)
 {
 	struct qrtr_node *node;
+	struct sched_param param = {.sched_priority = 1};
 
 	if (!ep || !ep->xmit)
 		return -EINVAL;
@@ -950,6 +967,8 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id)
 		kfree(node);
 		return -ENOMEM;
 	}
+	if (rt)
+		sched_setscheduler(node->task, SCHED_FIFO, &param);
 
 	mutex_init(&node->qrtr_tx_lock);
 	INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index f9aede4..6a2cccb 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -26,7 +26,8 @@ struct qrtr_endpoint {
 	struct qrtr_node *node;
 };
 
-int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id);
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
+			   bool rt);
 
 void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
 
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 0c78f15..fff9720 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -60,6 +60,7 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
 {
 	struct qrtr_smd_dev *qdev;
 	u32 net_id;
+	bool rt;
 	int rc;
 
 	qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
@@ -74,7 +75,9 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
 	if (rc < 0)
 		net_id = QRTR_EP_NET_ID_AUTO;
 
-	rc = qrtr_endpoint_register(&qdev->ep, net_id);
+	rt = of_property_read_bool(rpdev->dev.of_node, "qcom,low-latency");
+
+	rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
 	if (rc)
 		return rc;
 
diff --git a/net/qrtr/usb.c b/net/qrtr/usb.c
index fd71df9..3daecca 100644
--- a/net/qrtr/usb.c
+++ b/net/qrtr/usb.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -213,7 +213,7 @@ static int qcom_usb_qrtr_probe(struct usb_interface *interface,
 
 	init_usb_anchor(&qdev->submitted);
 
-	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO, false);
 	if (rc)
 		return rc;
 
@@ -263,7 +263,7 @@ static int qcom_usb_qrtr_reset_resume(struct usb_interface *intf)
 	int rc = 0;
 
 	qrtr_endpoint_unregister(&qdev->ep);
-	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO, false);
 	if (rc)
 		return rc;
 
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 3131b41..28adac3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
 	 */
 	if (net->sctp.pf_enable &&
 	   (transport->state == SCTP_ACTIVE) &&
-	   (asoc->pf_retrans < transport->pathmaxrxt) &&
+	   (transport->error_count < transport->pathmaxrxt) &&
 	   (transport->error_count > asoc->pf_retrans)) {
 
 		sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 0da5793..87061a4b 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
 		nstr_list[i] = htons(str_list[i]);
 
 	if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+		kfree(nstr_list);
 		retval = -EAGAIN;
 		goto out;
 	}
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d..0f1eaed 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
 		tipc_set_node_id(net, node_id);
 	}
 	tn->trial_addr = addr;
+	tn->addr_trial_end = jiffies;
 	pr_info("32-bit node address hash set to %x\n", addr);
 }
 
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index dad5583..3b2861f 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -20,7 +20,7 @@
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1771a31..15dd58a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -75,7 +75,7 @@
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)  \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6ac3685..a5fe929 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3118,7 +3118,7 @@
 				$compat2 =~ s/\,[a-zA-Z0-9]*\-/\,<\.\*>\-/;
 				my $compat3 = $compat;
 				$compat3 =~ s/\,([a-z]*)[0-9]*\-/\,$1<\.\*>\-/;
-				`grep -Erq "$compat|$compat2|$compat3" $dt_path`;
+				`grep -ERq "$compat|$compat2|$compat3" $dt_path`;
 				if ( $? >> 8 ) {
 					WARN("UNDOCUMENTED_DT_STRING",
 					     "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 09b9fa7..63fa0cb 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -413,3 +413,14 @@
 		exit 1
 	fi
 fi
+
+# Starting Android Q, the DTB's are part of dtb.img and not part
+# of the kernel image. RTIC DTS relies on the kernel environment
+# and could not build outside of the kernel. Generate RTIC DTS after
+# successful kernel build if MPGen is enabled. The DTB will be
+# generated with dtb.img in kernel_definitions.mk.
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+	${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+		--binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
+		--cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts
+fi
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 0674597..3524dbc 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -301,7 +301,7 @@
 	#
 	# Checks valid for RHEL/CentOS version 7.x.
 	#
-	if (! $system_release =~ /Fedora/) {
+	if (!($system_release =~ /Fedora/)) {
 		$map{"virtualenv"} = "python-virtualenv";
 	}
 
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 009e469..8e547e2 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -578,10 +578,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 		stream->metadata_set = false;
 		stream->next_track = false;
 
-		if (stream->direction == SND_COMPRESS_PLAYBACK)
-			stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-		else
-			stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 	} else {
 		return -EPERM;
 	}
@@ -698,8 +695,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
 {
 	int retval;
 
-	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_SETUP:
+		if (stream->direction != SND_COMPRESS_CAPTURE)
+			return -EPERM;
+		break;
+	case SNDRV_PCM_STATE_PREPARED:
+		break;
+	default:
 		return -EPERM;
+	}
+
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 	if (!retval)
 		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -710,9 +716,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 {
 	int retval;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
 		return -EPERM;
+	default:
+		break;
+	}
+
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
@@ -771,10 +783,18 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
 	int retval;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
-		retval = -EPERM;
-		goto ret;
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
+	case SNDRV_PCM_STATE_PAUSED:
+		mutex_unlock(&stream->device->lock);
+		return -EPERM;
+	case SNDRV_PCM_STATE_XRUN:
+		mutex_unlock(&stream->device->lock);
+		return -EPIPE;
+	default:
+		break;
 	}
 	mutex_unlock(&stream->device->lock);
 	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
@@ -798,6 +818,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
 	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 		return -EPERM;
 
+	/* next track doesn't have any meaning for capture streams */
+	if (stream->direction == SND_COMPRESS_CAPTURE)
+		return -EPERM;
+
 	/* you can signal next track if this is intended to be a gapless stream
 	 * and current track metadata is set
 	 */
@@ -817,12 +841,25 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 	int retval;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
+	case SNDRV_PCM_STATE_PAUSED:
 		mutex_unlock(&stream->device->lock);
 		return -EPERM;
+	case SNDRV_PCM_STATE_XRUN:
+		mutex_unlock(&stream->device->lock);
+		return -EPIPE;
+	default:
+		break;
 	}
 	mutex_unlock(&stream->device->lock);
+
+	/* partial drain doesn't have any meaning for capture streams */
+	if (stream->direction == SND_COMPRESS_CAPTURE)
+		return -EPERM;
+
 	/* stream can be drained only when next track has been signalled */
 	if (stream->next_track == false)
 		return -EPERM;
diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
index 1ebf00c..715cd99 100644
--- a/sound/firewire/packets-buffer.c
+++ b/sound/firewire/packets-buffer.c
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
 	packets_per_page = PAGE_SIZE / packet_size;
 	if (WARN_ON(!packets_per_page)) {
 		err = -EINVAL;
-		goto error;
+		goto err_packets;
 	}
 	pages = DIV_ROUND_UP(count, packets_per_page);
 
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a12e594..a41c1be 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
 	}
 	runtime->private_data = azx_dev;
 
-	if (chip->gts_present)
-		azx_pcm_hw.info = azx_pcm_hw.info |
-			SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
-
 	runtime->hw = azx_pcm_hw;
+	if (chip->gts_present)
+		runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
 	runtime->hw.channels_min = hinfo->channels_min;
 	runtime->hw.channels_max = hinfo->channels_max;
 	runtime->hw.formats = hinfo->formats;
@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
 				     20,
 				     178000000);
 
+	/* by some reason, the playback stream stalls on PulseAudio with
+	 * tsched=1 when a capture stream triggers.  Until we figure out the
+	 * real cause, disable tsched mode by telling the PCM info flag.
+	 */
+	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
+		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
+
 	if (chip->align_buffer_size)
 		/* constrain buffer sizes to be multiple of 128
 		   bytes. This is more efficient in terms of memory
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 53c3cd2..8a9dd47 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -40,7 +40,7 @@
 /* 14 unused */
 #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)	/* X-Fi workaround */
 #define AZX_DCAPS_POSFIX_LPIB	(1 << 16)	/* Use LPIB as default */
-/* 17 unused */
+#define AZX_DCAPS_AMD_WORKAROUND (1 << 17)	/* AMD-specific workaround */
 #define AZX_DCAPS_NO_64BIT	(1 << 18)	/* No 64bit address */
 #define AZX_DCAPS_SYNC_WRITE	(1 << 19)	/* sync each cmd write */
 #define AZX_DCAPS_OLD_SSYNC	(1 << 20)	/* Old SSYNC reg for ICH */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984e..bb2bd33 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
 
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+	/* Make the codec enter D3 to avoid spurious noises from the internal
+	 * speaker during (and after) reboot
+	 */
+	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+	snd_hda_codec_write(codec, codec->core.afg, 0,
+			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+	msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
 #ifdef CONFIG_PM
 /**
  * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
 	.init = snd_hda_gen_init,
 	.free = snd_hda_gen_free,
 	.unsol_event = snd_hda_jack_unsol_event,
+	.reboot_notify = snd_hda_gen_reboot_notify,
 #ifdef CONFIG_PM
 	.check_power_status = snd_hda_gen_check_power_status,
 #endif
@@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
 	err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
 	if (err < 0)
-		return err;
+		goto error;
 
 	err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
 	if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 1012366..ce9c293 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
 				  struct auto_pin_cfg *cfg);
 int snd_hda_gen_build_controls(struct hda_codec *codec);
 int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
 
 /* standard jack event callbacks */
 void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 308ce76..7a3e34b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -78,6 +78,7 @@ enum {
 	POS_FIX_VIACOMBO,
 	POS_FIX_COMBO,
 	POS_FIX_SKL,
+	POS_FIX_FIFO,
 };
 
 /* Defines for ATI HD Audio support in SB450 south bridge */
@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
 MODULE_PARM_DESC(model, "Use the given board model.");
 module_param_array(position_fix, int, NULL, 0444);
 MODULE_PARM_DESC(position_fix, "DMA pointer read method."
-		 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
+		 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
 module_param_array(bdl_pos_adj, int, NULL, 0644);
 MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
 module_param_array(probe_mask, int, NULL, 0444);
@@ -350,6 +351,11 @@ enum {
 #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
 	(AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
 
+/* quirks for AMD SB */
+#define AZX_DCAPS_PRESET_AMD_SB \
+	(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
+	 AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
 	(AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
@@ -920,6 +926,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
 	return bound_pos + mod_dma_pos;
 }
 
+#define AMD_FIFO_SIZE	32
+
+/* get the current DMA position with FIFO size correction */
+static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
+{
+	struct snd_pcm_substream *substream = azx_dev->core.substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	unsigned int pos, delay;
+
+	pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
+	if (!runtime)
+		return pos;
+
+	runtime->delay = AMD_FIFO_SIZE;
+	delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
+	if (azx_dev->insufficient) {
+		if (pos < delay) {
+			delay = pos;
+			runtime->delay = bytes_to_frames(runtime, pos);
+		} else {
+			azx_dev->insufficient = 0;
+		}
+	}
+
+	/* correct the DMA position for capture stream */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (pos < delay)
+			pos += azx_dev->core.bufsize;
+		pos -= delay;
+	}
+
+	return pos;
+}
+
+static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
+				   unsigned int pos)
+{
+	struct snd_pcm_substream *substream = azx_dev->core.substream;
+
+	/* just read back the calculated value in the above */
+	return substream->runtime->delay;
+}
+
 static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
 					 struct azx_dev *azx_dev)
 {
@@ -1528,6 +1577,7 @@ static int check_position_fix(struct azx *chip, int fix)
 	case POS_FIX_VIACOMBO:
 	case POS_FIX_COMBO:
 	case POS_FIX_SKL:
+	case POS_FIX_FIFO:
 		return fix;
 	}
 
@@ -1544,6 +1594,10 @@ static int check_position_fix(struct azx *chip, int fix)
 		dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
 		return POS_FIX_VIACOMBO;
 	}
+	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
+		dev_dbg(chip->card->dev, "Using FIFO position fix\n");
+		return POS_FIX_FIFO;
+	}
 	if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
 		dev_dbg(chip->card->dev, "Using LPIB position fix\n");
 		return POS_FIX_LPIB;
@@ -1564,6 +1618,7 @@ static void assign_position_fix(struct azx *chip, int fix)
 		[POS_FIX_VIACOMBO] = azx_via_get_position,
 		[POS_FIX_COMBO] = azx_get_pos_lpib,
 		[POS_FIX_SKL] = azx_get_pos_skl,
+		[POS_FIX_FIFO] = azx_get_pos_fifo,
 	};
 
 	chip->get_position[0] = chip->get_position[1] = callbacks[fix];
@@ -1578,6 +1633,9 @@ static void assign_position_fix(struct azx *chip, int fix)
 			azx_get_delay_from_lpib;
 	}
 
+	if (fix == POS_FIX_FIFO)
+		chip->get_delay[0] = chip->get_delay[1] =
+			azx_get_delay_from_fifo;
 }
 
 /*
@@ -2594,6 +2652,12 @@ static const struct pci_device_id azx_ids[] = {
 	/* AMD Hudson */
 	{ PCI_DEVICE(0x1022, 0x780d),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	/* AMD, X370 & co */
+	{ PCI_DEVICE(0x1022, 0x1457),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+	/* AMD, X570 & co */
+	{ PCI_DEVICE(0x1022, 0x1487),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
 	/* AMD Stoney */
 	{ PCI_DEVICE(0x1022, 0x157a),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index b70fbfa..6f17b25 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
 
-	switch (codec->core.vendor_id) {
-	case 0x14f12008: /* CX8200 */
-	case 0x14f150f2: /* CX20722 */
-	case 0x14f150f4: /* CX20724 */
-		break;
-	default:
-		return;
-	}
-
 	/* Turn the problematic codec into D3 to avoid spurious noises
 	   from the internal speaker during (and after) reboot */
 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
+	snd_hda_gen_reboot_notify(codec);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dc19896..9b5caf0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
 		alc_shutup(codec);
 }
 
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
-	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-	snd_hda_codec_write(codec, codec->core.afg, 0,
-			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-	msleep(10);
-}
-
 #define alc_free	snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
 	struct alc_spec *spec = codec->spec;
 
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-		spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+		spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
 		codec->power_save_node = 0; /* avoid click noises */
 		snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 40ad000..dd64c4b 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -280,7 +280,8 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
 				goto retry;
 			}
 			spin_unlock(&sound_loader_lock);
-			return -EBUSY;
+			r = -EBUSY;
+			goto fail;
 		}
 	}
 
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index e1fbb9c..a197fc3 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -604,14 +604,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
 		ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
 				    hiface_pcm_out_urb_handler);
 		if (ret < 0)
-			return ret;
+			goto error;
 	}
 
 	ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
 	if (ret < 0) {
-		kfree(rt);
 		dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
-		return ret;
+		goto error;
 	}
 
 	pcm->private_data = rt;
@@ -624,4 +623,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
 
 	chip->pcm = rt;
 	return 0;
+
+error:
+	for (i = 0; i < PCM_N_URBS; i++)
+		kfree(rt->out_urbs[i].buffer);
+	kfree(rt);
+	return ret;
 }
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e63a7d3..799d153 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -83,6 +83,7 @@ struct mixer_build {
 	unsigned char *buffer;
 	unsigned int buflen;
 	DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+	DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
 	struct usb_audio_term oterm;
 	const struct usbmix_name_map *map;
 	const struct usbmix_selector_map *selector_map;
@@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
 		return -EINVAL;
 	if (!desc->bNrInPins)
 		return -EINVAL;
+	if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+		return -EINVAL;
 
 	switch (state->mixer->protocol) {
 	case UAC_VERSION_1:
@@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
 			    struct usb_audio_term *term)
 {
 	int protocol = state->mixer->protocol;
 	int err;
 	void *p1;
+	unsigned char *hdr;
 
 	memset(term, 0, sizeof(*term));
-	while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-		unsigned char *hdr = p1;
+	for (;;) {
+		/* a loop in the terminal chain? */
+		if (test_and_set_bit(id, state->termbitmap))
+			return -EINVAL;
+
+		p1 = find_audio_control_unit(state, id);
+		if (!p1)
+			break;
+
+		hdr = p1;
 		term->id = id;
 
 		if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 					/* call recursively to verify that the
 					 * referenced clock entity is valid */
-					err = check_input_term(state, d->bCSourceID, term);
+					err = __check_input_term(state, d->bCSourceID, term);
 					if (err < 0)
 						return err;
 
@@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC2_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
 				/* call recursively to verify that the
 				 * referenced clock entity is valid */
-				err = check_input_term(state, d->bCSourceID, term);
+				err = __check_input_term(state, d->bCSourceID, term);
 				if (err < 0)
 					return err;
 
@@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
 			case UAC3_CLOCK_SELECTOR: {
 				struct uac_selector_unit_descriptor *d = p1;
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 				term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
 					return -EINVAL;
 
 				/* call recursively to retrieve the channel info */
-				err = check_input_term(state, d->baSourceID[0], term);
+				err = __check_input_term(state, d->baSourceID[0], term);
 				if (err < 0)
 					return err;
 
@@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
 	return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+			    struct usb_audio_term *term)
+{
+	memset(term, 0, sizeof(*term));
+	memset(state->termbitmap, 0, sizeof(state->termbitmap));
+	return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index f6ce6d5..fa2cc4a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -1058,6 +1058,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
 
 		pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 		if (!pd) {
+			kfree(fp->chmap);
 			kfree(fp->rate_table);
 			kfree(fp);
 			return NULL;
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index a19690a..c8c86a0 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -6,8 +6,9 @@
 #include "machine.h"
 #include "api/fs/fs.h"
 #include "debug.h"
+#include "symbol.h"
 
-int arch__fix_module_text_start(u64 *start, const char *name)
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
 {
 	u64 m_start = *start;
 	char path[PATH_MAX];
@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
 	if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
 		pr_debug2("Using module %s start:%#lx\n", path, m_start);
 		*start = m_start;
+	} else {
+		/* Successful read of the modules segment text start address.
+		 * Calculate difference between module start address
+		 * in memory and module text segment start address.
+		 * For example module load address is 0x3ff8011b000
+		 * (from /proc/modules) and module text segment start
+		 * address is 0x3ff8011b870 (from file above).
+		 *
+		 * Adjust the module size and subtract the GOT table
+		 * size located at the beginning of the module.
+		 */
+		*size -= (*start - m_start);
 	}
 
 	return 0;
 }
+
+/* On s390 kernel text segment start is located at very low memory addresses,
+ * for example 0x10000. Modules are located at very high memory addresses,
+ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
+ * and beginning of first module's text segment is very big.
+ * Therefore do not fill this gap and do not assign it to the kernel dso map.
+ */
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+	if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
+		/* Last kernel symbol mapped to end of page */
+		p->end = roundup(p->end, page_size);
+	else
+		p->end = c->start;
+	pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de916..0bdb34f 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -711,6 +711,16 @@ __cmd_probe(int argc, const char **argv)
 
 		ret = perf_add_probe_events(params.events, params.nevents);
 		if (ret < 0) {
+
+			/*
+			 * When perf_add_probe_events() fails it calls
+			 * cleanup_perf_probe_events(pevs, npevs), i.e.
+			 * cleanup_perf_probe_events(params.events, params.nevents), which
+			 * will call clear_perf_probe_event(), so set nevents to zero
+			 * to avoid cleanup_params() to call clear_perf_probe_event() again
+			 * on the same pevs.
+			 */
+			params.nevents = 0;
 			pr_err_with_code("  Error: Failed to add events.", ret);
 			return ret;
 		}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7f2e3b1..54c34c1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
 			   data->file.path);
 	}
 
+	if (f_header.attr_size == 0) {
+		pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+		       "Was the 'perf record' command properly terminated?\n",
+		       data->file.path);
+		return -EINVAL;
+	}
+
 	nr_attrs = f_header.attrs.size / f_header.attr_size;
 	lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
 	size += sizeof(struct perf_event_header);
 	size += ids * sizeof(u64);
 
-	ev = malloc(size);
+	ev = zalloc(size);
 
 	if (ev == NULL)
 		return -ENOMEM;
@@ -3472,7 +3479,7 @@ int perf_event__process_feature(struct perf_tool *tool,
 		return 0;
 
 	ff.buf  = (void *)fe->data;
-	ff.size = event->header.size - sizeof(event->header);
+	ff.size = event->header.size - sizeof(*fe);
 	ff.ph = &session->header;
 
 	if (feat_ops[feat].process(&ff, NULL))
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 076718a..003b70d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1295,6 +1295,7 @@ static int machine__set_modules_path(struct machine *machine)
 	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
+				u64 *size __maybe_unused,
 				const char *name __maybe_unused)
 {
 	return 0;
@@ -1306,7 +1307,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
 	struct machine *machine = arg;
 	struct map *map;
 
-	if (arch__fix_module_text_start(&start, name) < 0)
+	if (arch__fix_module_text_start(&start, &size, name) < 0)
 		return -1;
 
 	map = machine__findnew_module_map(machine, start, name);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ebde3ea..6f37678 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -219,7 +219,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
 
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 					const char *filename);
-int arch__fix_module_text_start(u64 *start, const char *name);
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
 
 int machine__load_kallsyms(struct machine *machine, const char *filename);
 
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 0715f97..91404ba 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -86,6 +86,11 @@ static int prefix_underscores_count(const char *str)
 	return tail - str;
 }
 
+void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+	p->end = c->start;
+}
+
 const char * __weak arch__normalize_symbol_name(const char *name)
 {
 	return name;
@@ -212,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 		curr = rb_entry(nd, struct symbol, rb_node);
 
 		if (prev->end == prev->start && prev->end != curr->start)
-			prev->end = curr->start;
+			arch__symbols__fixup_end(prev, curr);
 	}
 
 	/* Last entry */
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f25fae4..76ef2fa 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ const char *arch__normalize_symbol_name(const char *name);
 #define SYMBOL_A 0
 #define SYMBOL_B 1
 
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
 int arch__compare_symbol_names(const char *namea, const char *nameb);
 int arch__compare_symbol_names_n(const char *namea, const char *nameb,
 				 unsigned int n);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 56007a7..2c146d0 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -192,14 +192,24 @@ struct comm *thread__comm(const struct thread *thread)
 
 struct comm *thread__exec_comm(const struct thread *thread)
 {
-	struct comm *comm, *last = NULL;
+	struct comm *comm, *last = NULL, *second_last = NULL;
 
 	list_for_each_entry(comm, &thread->comm_list, list) {
 		if (comm->exec)
 			return comm;
+		second_last = last;
 		last = comm;
 	}
 
+	/*
+	 * 'last' with no start time might be the parent's comm of a synthesized
+	 * thread (created by processing a synthesized fork event). For a main
+	 * thread, that is very probably wrong. Prefer a later comm to avoid
+	 * that case.
+	 */
+	if (second_last && !last->start && thread->pid_ == thread->tid)
+		return second_last;
+
 	return last;
 }
 
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 02bac8a..d982650 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
 	kvm_timer_schedule(vcpu);
+	/*
+	 * If we're about to block (most likely because we've just hit a
+	 * WFI), we need to sync back the state of the GIC CPU interface
+	 * so that we have the lastest PMR and group enables. This ensures
+	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
+	 * whether we have pending interrupts.
+	 */
+	preempt_disable();
+	kvm_vgic_vmcr_sync(vcpu);
+	preempt_enable();
+
 	kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892a..57281c1 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
 		       kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
 
-void vgic_v2_put(struct kvm_vcpu *vcpu)
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 
 	cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+	vgic_v2_vmcr_sync(vcpu);
 	cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 3f2350a..5c55995 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
 		__vgic_v3_activate_traps(vcpu);
 }
 
-void vgic_v3_put(struct kvm_vcpu *vcpu)
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
 	if (likely(cpu_if->vgic_sre))
 		cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+	vgic_v3_vmcr_sync(vcpu);
 
 	kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
 
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index c5165e3..250cd72 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
 		vgic_v3_put(vcpu);
 }
 
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+		return;
+
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_vmcr_sync(vcpu);
+	else
+		vgic_v3_vmcr_sync(vcpu);
+}
+
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 {
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index a9002471..d5e4542 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 void vgic_v2_init_lrs(void);
 void vgic_v2_load(struct kvm_vcpu *vcpu);
 void vgic_v2_put(struct kvm_vcpu *vcpu);
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
 
 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
 
 void vgic_v3_load(struct kvm_vcpu *vcpu);
 void vgic_v3_put(struct kvm_vcpu *vcpu);
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
 
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2b36a51..4a584a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2317,6 +2317,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair.  However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+	if (kvm_arch_dy_runnable(vcpu))
+		return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return true;
+#endif
+
+	return false;
+}
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 {
 	struct kvm *kvm = me->kvm;
@@ -2346,7 +2369,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 				continue;
 			if (vcpu == me)
 				continue;
-			if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+			if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
 				continue;
 			if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
 				continue;