Merge "mailbox: apcs-ipc: Add compatible string for scuba"
diff --git a/Android.bp b/Android.bp
index 4341e3a..7567737 100644
--- a/Android.bp
+++ b/Android.bp
@@ -3,20 +3,43 @@
     srcs: ["scripts/unifdef.c"],
     sanitize: {
         never: true,
-    }
+    },
+}
+
+gensrcs {
+    name: "gen-headers_install.sh",
+    srcs: ["scripts/headers_install.sh"],
+    tools: ["unifdef"],
+    cmd: "sed 's+scripts/unifdef+$(location unifdef)+g' $(in) > $(out)",
+    output_extension: "sh",
+}
+
+cc_prebuilt_binary {
+    name: "headers_install.sh",
+    device_supported: false,
+    host_supported: true,
+    srcs: [":gen-headers_install.sh"],
+}
+
+gensrcs {
+    name: "qcom-kernel-includes",
+    cmd: "$(location headers_install.sh) `dirname $(out)` `dirname $(in)` `basename $(in)`",
+    tools: ["headers_install.sh"],
+    export_include_dirs: ["include/uapi"],
+    srcs: [
+        "include/uapi/**/*.h",
+    ],
+    output_extension: "h",
 }
 
 gensrcs {
     name: "qseecom-kernel-includes",
-
-    // move to out/ as root for header generation because of scripts/unifdef
-    // storage - at the expense of extra ../ references
-    cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
-
-    tools: ["unifdef"],
-    tool_files: ["scripts/headers_install.sh"],
+    cmd: "$(location headers_install.sh) `dirname $(out)` `dirname $(in)` `basename $(in)`",
+    tools: ["headers_install.sh"],
     export_include_dirs: ["include/uapi"],
-    srcs: ["include/uapi/linux/qseecom.h"],
+    srcs: [
+        "include/uapi/linux/qseecom.h",
+    ],
     output_extension: "h",
 }
 
@@ -25,3 +48,11 @@
     generated_headers: ["qseecom-kernel-includes"],
     export_generated_headers: ["qseecom-kernel-includes"],
 }
+
+cc_library_headers {
+    name: "qcom_kernel_headers",
+    generated_headers: ["qcom-kernel-includes"],
+    export_generated_headers: ["qcom-kernel-includes"],
+    vendor: true,
+    recovery_available: true,
+}
diff --git a/Documentation/perf/qcom_l2_counters.txt b/Documentation/perf/qcom_l2_counters.txt
new file mode 100644
index 0000000..5b45aaf
--- /dev/null
+++ b/Documentation/perf/qcom_l2_counters.txt
@@ -0,0 +1,63 @@
+Qualcomm Technologies, Inc. l2 Cache counters
+=============================================
+
+This driver supports the L2 cache clusters counters found in
+Qualcomm Technologies, Inc.
+
+There are multiple physical L2 cache clusters, each with their
+own counters. Each cluster has one or more CPUs associated with it.
+
+There is one logical L2 PMU exposed, which aggregates the results from
+the physical PMUs(counters).
+
+The driver provides a description of its available events and configuration
+options in sysfs, see /sys/devices/l2cache_counters.
+
+The "format" directory describes the format of the events.
+
+And format is of the form 0xXXX
+Where,
+
+  1 bit(lsb) for group (group is either txn/tenure counter).
+  4 bits for serial number for counter starting from 0 to 8.
+  5 bits for bit position of counter enable bit in a register.
+
+The driver provides a "cpumask" sysfs attribute which contains a mask
+consisting of one CPU per cluster which will be used to handle all the PMU
+events on that cluster.
+
+Examples for use with perf:
+
+  perf stat -e l2cache_counters/ddr_read/,l2cache_counters/ddr_write/ -a sleep 1
+
+  perf stat -e l2cache_counters/cycles/ -C 2 sleep 1
+
+Limitation: The driver does not support sampling, therefore "perf record" will
+not work. Per-task perf sessions are not supported.
+
+For transaction counters we don't need to set any configuration
+before monitoring.
+
+For tenure counter use case, we need to set threshold value of low and mid
+range occurrence counter value of cluster(as these occurrence counter exist
+for each cluster) in sysfs.
+
+echo 1 > /sys/bus/eventsource/devices/l2cache_counters/which_cluster_tenure
+echo X > /sys/bus/event_source/devices/l2cache_counters/low_tenure_threshold
+echo Y > /sys/bus/event_source/devices/l2cache_counters/mid_tenure_threshold
+Here, X < Y
+
+e.g:
+
+  perf stat -e l2cache_counters/low_range_occur/ -e
+  l2cache_counters/mid_range_occur/ -e l2cache_counters/high_range
+  _occur/ -C 4 sleep 10
+
+ Performance counter stats for 'CPU(s) 4':
+
+                 7      l2cache_counters/low_range_occur/
+                 5      l2cache_counters/mid_range_occur/
+                 7      l2cache_counters/high_range_occur/
+
+      10.204140400 seconds time elapsed
+
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9ceb0b2..8c47ddb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -97,6 +97,7 @@
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UID16
 	select HAVE_VIRT_CPU_ACCOUNTING_GEN
+	select IOMMU_DMA if IOMMU_SUPPORT
 	select IRQ_FORCED_THREADING
 	select MODULES_USE_ELF_REL
 	select NEED_DMA_MAP_STATE
@@ -133,7 +134,7 @@
 config ARM_DMA_IOMMU_ALIGNMENT
 	int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
 	range 4 9
-	default 8
+	default 9
 	help
 	  DMA mapping framework by default aligns all buffers to the smallest
 	  PAGE_SIZE order which is greater than or equal to the requested buffer
@@ -1717,6 +1718,35 @@
 	  Disabling this is usually safe for small single-platform
 	  configurations. If unsure, say y.
 
+choice
+	prompt "Virtual Memory Reclaim"
+	default ENABLE_VMALLOC_SAVING
+	help
+	  Select the method of reclaiming virtual memory. Two values
+	  are allowed to choose, one is NO_VM_RECLAIM, the other is
+	  ENABLE_VMALLOC_SAVING.
+
+	  If you are not absolutely sure what you are doing, leave this
+	  option alone.
+
+config ENABLE_VMALLOC_SAVING
+	bool "Reclaim memory for each subsystem"
+	help
+	  Enable this config to reclaim the virtual space belonging
+	  to any subsystem which is expected to have a lifetime of
+	  the entire system. This feature allows lowmem to be non-
+	  contiguous.
+
+config NO_VM_RECLAIM
+	bool "Do not reclaim memory"
+	help
+	  Do not reclaim any memory. This might result in less lowmem
+	  and wasting some virtual memory space which could otherwise
+	  be reclaimed by using any of the other two config options as
+	  above.
+
+endchoice
+
 config FORCE_MAX_ZONEORDER
 	int "Maximum zone order"
 	default "12" if SOC_AM33XX
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f6fcb8a..908022d 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -92,6 +92,21 @@
 	      8 - SIGSEGV faults
 	     16 - SIGBUS faults
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	def_bool y
+	depends on FORCE_PAGES
+
+config FORCE_PAGES
+	bool "Force lowmem to be mapped with 4K pages"
+        help
+          There are some advanced debug features that can only be done when
+          memory is mapped with pages instead of sections. Enable this option
+          to always map lowmem pages with pages. This may have a performance
+          cost due to increased TLB pressure.
+
+          If unsure say N.
+
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
 	bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/configs/vendor/bengal-perf_defconfig b/arch/arm/configs/vendor/bengal-perf_defconfig
index 44f2dd8..2879d58 100644
--- a/arch/arm/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm/configs/vendor/bengal-perf_defconfig
@@ -253,6 +253,7 @@
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
@@ -285,9 +286,11 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -340,9 +343,11 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_DEBUG=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
@@ -469,20 +474,24 @@
 CONFIG_MMC_IPC_LOGGING=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -491,6 +500,7 @@
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
+CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
 CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SPMI_PMIC_CLKDIV=y
@@ -502,6 +512,7 @@
 CONFIG_MAILBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
@@ -520,6 +531,7 @@
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
@@ -535,6 +547,8 @@
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -542,17 +556,24 @@
 CONFIG_QCOM_GLINK_PKT=y
 CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -565,7 +586,6 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT2_FS=y
@@ -594,6 +614,7 @@
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
@@ -608,6 +629,10 @@
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_XZ_DEC=y
 CONFIG_STACK_HASH_ORDER_SHIFT=12
 CONFIG_PRINTK_TIME=y
@@ -625,6 +650,7 @@
 CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_HWEVENT=y
diff --git a/arch/arm/configs/vendor/bengal_defconfig b/arch/arm/configs/vendor/bengal_defconfig
index 71b910b..25c7945 100644
--- a/arch/arm/configs/vendor/bengal_defconfig
+++ b/arch/arm/configs/vendor/bengal_defconfig
@@ -96,6 +96,7 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -254,6 +255,7 @@
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
@@ -287,9 +289,11 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -335,16 +339,17 @@
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
-CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_DEBUG=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
@@ -471,21 +476,26 @@
 CONFIG_MMC_IPC_LOGGING=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -494,6 +504,7 @@
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
+CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
 CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SPMI_PMIC_CLKDIV=y
@@ -505,7 +516,9 @@
 CONFIG_MAILBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
 CONFIG_ARM_SMMU_TESTBUS_DUMP=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
@@ -524,6 +537,7 @@
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
@@ -539,10 +553,12 @@
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_ERP=y
 CONFIG_PANIC_ON_GLADIATOR_ERROR=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -550,19 +566,26 @@
 CONFIG_QCOM_GLINK_PKT=y
 CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 # CONFIG_MSM_JTAGV8 is not set
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
@@ -575,7 +598,6 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT2_FS=y
@@ -604,6 +626,7 @@
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
@@ -618,6 +641,10 @@
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_XZ_DEC=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
@@ -631,6 +658,7 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PANIC_ON_OOM=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
 CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
 CONFIG_DEBUG_OBJECTS=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c19d2f2..6913622 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -178,10 +178,26 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+extern void __dma_map_area(const void *addr, size_t size, int dir);
+extern void __dma_unmap_area(const void *addr, size_t size, int dir);
 extern void dmac_inv_range(const void *start, const void *end);
 extern void dmac_clean_range(const void *start, const void *end);
 extern void dmac_flush_range(const void *, const void *);
 
+static inline void __dma_inv_area(const void *start, size_t len)
+{
+	dmac_inv_range(start, start + len);
+}
+
+static inline void __dma_clean_area(const void *start, size_t len)
+{
+	dmac_clean_range(start, start + len);
+}
+
+static inline void __dma_flush_area(const void *start, size_t len)
+{
+	dmac_flush_range(start, start + len);
+}
 #endif
 
 /*
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 8f36119..4313886 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -15,6 +15,7 @@
 struct dma_iommu_mapping {
 	/* iommu specific data */
 	struct iommu_domain	*domain;
+	const struct dma_map_ops *ops;
 
 	unsigned long		**bitmaps;	/* array of bitmaps */
 	unsigned int		nr_bitmaps;	/* nr of elements in array */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 073ce84..57444f1a 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -161,6 +161,11 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #define dmac_inv_range			__glue(_CACHE, _dma_inv_range)
 #define dmac_clean_range		__glue(_CACHE, _dma_clean_range)
+#define dmac_map_area          __glue(_CACHE, _dma_map_area)
+#define dmac_unmap_area            __glue(_CACHE, _dma_unmap_area)
+
+#define __dma_map_area         dmac_map_area
+#define __dma_unmap_area       dmac_unmap_area
 #endif
 
 #endif
diff --git a/arch/arm/include/uapi/asm/setup.h b/arch/arm/include/uapi/asm/setup.h
index 6b335a9..08d1709 100644
--- a/arch/arm/include/uapi/asm/setup.h
+++ b/arch/arm/include/uapi/asm/setup.h
@@ -17,7 +17,7 @@
 
 #include <linux/types.h>
 
-#define COMMAND_LINE_SIZE 1024
+#define COMMAND_LINE_SIZE 2048
 
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a906cc54..a172ff5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -51,6 +51,9 @@
 #include <asm/virt.h>
 #include <asm/mach/arch.h>
 #include <asm/mpu.h>
+#include <soc/qcom/minidump.h>
+
+#include <soc/qcom/lpm_levels.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -607,12 +610,13 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
 /*
  * ipi_cpu_stop - handle IPI from smp_send_stop()
  */
-static void ipi_cpu_stop(unsigned int cpu)
+static void ipi_cpu_stop(unsigned int cpu, struct pt_regs *regs)
 {
 	if (system_state <= SYSTEM_RUNNING) {
 		raw_spin_lock(&stop_lock);
 		pr_crit("CPU%u: stopping\n", cpu);
 		dump_stack();
+		dump_stack_minidump(regs->uregs[13]);
 		raw_spin_unlock(&stop_lock);
 	}
 
@@ -682,7 +686,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
 	case IPI_CPU_STOP:
 		irq_enter();
-		ipi_cpu_stop(cpu);
+		ipi_cpu_stop(cpu, regs);
 		irq_exit();
 		break;
 
@@ -723,6 +727,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
 void smp_send_reschedule(int cpu)
 {
+	update_ipi_history(cpu);
 	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index a66a9ad1..7bb6192 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -5,6 +5,9 @@
 	select ARM_GIC
 	select ARM_AMBA
 	select PINCTRL
+	select MFD_CORE
+	select SND_SOC_COMPRESS
+	select SND_HWDEP
 	select QCOM_SCM if SMP
 	help
 	  Support for Qualcomm's devicetree based systems.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 75bc0a8..420b0fd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -28,6 +28,8 @@
 #include <linux/vmalloc.h>
 #include <linux/sizes.h>
 #include <linux/cma.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -112,6 +114,46 @@ static void __dma_page_cpu_to_dev(struct page *, unsigned long,
 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
 		size_t, enum dma_data_direction);
 
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+		const void *caller);
+
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);
+
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
+					bool coherent);
+
+static void *arm_dma_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size,
+			unsigned long attrs);
+
+static void arm_dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size);
+
+
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
+				 bool coherent)
+{
+	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		return pgprot_stronglyordered(prot);
+	else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
+		return pgprot_writecombine(prot);
+	return prot;
+}
+
+static bool is_dma_coherent(struct device *dev, unsigned long attrs,
+			    bool is_coherent)
+{
+	if (attrs & DMA_ATTR_FORCE_COHERENT)
+		is_coherent = true;
+	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+
+	return is_coherent;
+}
+
 /**
  * arm_dma_map_page - map a portion of a page for streaming DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -200,6 +242,8 @@ const struct dma_map_ops arm_dma_ops = {
 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
 	.mapping_error		= arm_dma_mapping_error,
 	.dma_supported		= arm_dma_supported,
+	.remap			= arm_dma_remap,
+	.unremap		= arm_dma_unremap,
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
@@ -472,6 +516,15 @@ void __init dma_contiguous_remap(void)
 		struct map_desc map;
 		unsigned long addr;
 
+		/*
+		 * Make start and end PMD_SIZE aligned, observing memory
+		 * boundaries
+		 */
+		if (memblock_is_memory(start & PMD_MASK))
+			start = start & PMD_MASK;
+		if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
+			end = ALIGN(end, PMD_SIZE);
+
 		if (end > arm_lowmem_limit)
 			end = arm_lowmem_limit;
 		if (start >= end)
@@ -492,8 +545,13 @@ void __init dma_contiguous_remap(void)
 		 * and ensures that this code is architecturally compliant.
 		 */
 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
-		     addr += PMD_SIZE)
-			pmd_clear(pmd_off_k(addr));
+		     addr += PMD_SIZE) {
+			pmd_t *pmd;
+
+			pmd = pmd_off_k(addr);
+			if (pmd_bad(*pmd))
+				pmd_clear(pmd);
+		}
 
 		flush_tlb_kernel_range(__phys_to_virt(start),
 				       __phys_to_virt(end));
@@ -512,21 +570,38 @@ static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
 	return 0;
 }
 
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+			bool want_vaddr)
 {
 	unsigned long start = (unsigned long) page_address(page);
 	unsigned end = start + size;
+	int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data);
 
-	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+	if (!want_vaddr)
+		func = __dma_clear_pte;
+	else
+		func = __dma_update_pte;
+
+	apply_to_page_range(&init_mm, start, size, func, &prot);
+	mb(); /*Ensure pte's are updated */
 	flush_tlb_kernel_range(start, end);
 }
 
+#define NO_KERNEL_MAPPING_DUMMY		0x2222
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 				 pgprot_t prot, struct page **ret_page,
 				 const void *caller, bool want_vaddr)
 {
 	struct page *page;
-	void *ptr = NULL;
+	void *ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
 	/*
 	 * __alloc_remap_buffer is only called when the device is
 	 * non-coherent
@@ -600,21 +675,31 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
 
 	__dma_clear_buffer(page, size, coherent_flag);
 
-	if (!want_vaddr)
-		goto out;
-
 	if (PageHighMem(page)) {
-		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
-		if (!ptr) {
-			dma_release_from_contiguous(dev, page, count);
-			return NULL;
+		if (!want_vaddr) {
+			/*
+			 * Something non-NULL needs to be returned here. Give
+			 * back a dummy address that is unmapped to catch
+			 * clients trying to use the address incorrectly
+			 */
+			ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
+
+			/* also flush out the stale highmem mappings */
+			kmap_flush_unused();
+			kmap_atomic_flush_unused();
+		} else {
+			ptr = __dma_alloc_remap(page, size, GFP_KERNEL,
+					prot, caller);
+			if (!ptr) {
+				dma_release_from_contiguous(dev, page, count);
+				return NULL;
+			}
 		}
 	} else {
-		__dma_remap(page, size, prot);
+		__dma_remap(page, size, prot, want_vaddr);
 		ptr = page_address(page);
 	}
 
- out:
 	*ret_page = page;
 	return ptr;
 }
@@ -622,23 +707,13 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
 static void __free_from_contiguous(struct device *dev, struct page *page,
 				   void *cpu_addr, size_t size, bool want_vaddr)
 {
-	if (want_vaddr) {
-		if (PageHighMem(page))
-			__dma_free_remap(cpu_addr, size, true);
-		else
-			__dma_remap(page, size, PAGE_KERNEL);
-	}
+	if (PageHighMem(page))
+		__dma_free_remap(cpu_addr, size, true);
+	else
+		__dma_remap(page, size, PAGE_KERNEL, true);
 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
-static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
-{
-	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
-			pgprot_writecombine(prot) :
-			pgprot_dmacoherent(prot);
-	return prot;
-}
-
 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
 				   struct page **ret_page)
 {
@@ -803,7 +878,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 		kfree(buf);
 	}
 
-	return args.want_vaddr ? addr : page;
+	return addr;
 }
 
 /*
@@ -813,7 +888,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 		    gfp_t gfp, unsigned long attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	return __dma_alloc(dev, size, handle, gfp, prot, false,
 			   attrs, __builtin_return_address(0));
@@ -849,6 +924,41 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 	return ret;
 }
 
+static void *arm_dma_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size,
+			unsigned long attrs)
+{
+	void *ptr;
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+	unsigned long offset = handle & ~PAGE_MASK;
+
+	size = PAGE_ALIGN(size + offset);
+	ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
+			__builtin_return_address(0));
+	return ptr ? ptr + offset : ptr;
+}
+
+static void arm_dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size)
+{
+	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+	struct vm_struct *area;
+
+	size = PAGE_ALIGN(size);
+	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
+
+	area = find_vm_area(remapped_addr);
+	if (!area || (area->flags & flags) != flags) {
+		WARN(1, "trying to free invalid coherent area: %pK\n",
+			remapped_addr);
+		return;
+	}
+
+	vunmap(remapped_addr);
+	flush_tlb_kernel_range((unsigned long)remapped_addr,
+			(unsigned long)(remapped_addr + size));
+}
 /*
  * Create userspace mapping for the DMA-coherent memory.
  */
@@ -863,7 +973,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		 unsigned long attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+						false);
 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
@@ -883,9 +994,10 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 		.page = page,
 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
 	};
+	void *addr = (args.want_vaddr) ? cpu_addr : page;
 
-	buf = arm_dma_buffer_find(cpu_addr);
-	if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
+	buf = arm_dma_buffer_find(addr);
+	if (WARN(!buf, "Freeing invalid buffer %pK\n", addr))
 		return;
 
 	buf->allocator->free(&args);
@@ -1282,8 +1394,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 					  int coherent_flag)
 {
 	struct page **pages;
-	int count = size >> PAGE_SHIFT;
-	int array_size = count * sizeof(struct page *);
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
 	int i = 0;
 	int order_idx = 0;
 
@@ -1402,16 +1514,18 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
  */
 static dma_addr_t
 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
-		       unsigned long attrs)
+			int coherent_flag)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t dma_addr, iova;
 	int i;
+	int prot = IOMMU_READ | IOMMU_WRITE;
 
 	dma_addr = __alloc_iova(mapping, size);
 	if (dma_addr == ARM_MAPPING_ERROR)
 		return dma_addr;
+	prot |= coherent_flag ? IOMMU_CACHE : 0;
 
 	iova = dma_addr;
 	for (i = 0; i < count; ) {
@@ -1426,8 +1540,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
 				break;
 
 		len = (j - i) << PAGE_SHIFT;
-		ret = iommu_map(mapping->domain, iova, phys, len,
-				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
+		ret = iommu_map(mapping->domain, iova, phys, len, prot);
 		if (ret < 0)
 			goto fail;
 		iova += len;
@@ -1497,7 +1610,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
 	if (!addr)
 		return NULL;
 
-	*handle = __iommu_create_mapping(dev, &page, size, attrs);
+	*handle = __iommu_create_mapping(dev, &page, size, coherent_flag);
 	if (*handle == ARM_MAPPING_ERROR)
 		goto err_mapping;
 
@@ -1522,17 +1635,19 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
 	    int coherent_flag)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	struct page **pages;
 	void *addr = NULL;
+	pgprot_t prot;
 
 	*handle = ARM_MAPPING_ERROR;
 	size = PAGE_ALIGN(size);
 
-	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
+	if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
 		return __iommu_alloc_simple(dev, size, gfp, handle,
 					    coherent_flag, attrs);
 
+	coherent_flag = is_dma_coherent(dev, attrs, coherent_flag);
+	prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent_flag);
 	/*
 	 * Following is a work-around (a.k.a. hack) to prevent pages
 	 * with __GFP_COMP being passed to split_page() which cannot
@@ -1546,7 +1661,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	if (!pages)
 		return NULL;
 
-	*handle = __iommu_create_mapping(dev, pages, size, attrs);
+	*handle = __iommu_create_mapping(dev, pages, size, coherent_flag);
 	if (*handle == ARM_MAPPING_ERROR)
 		goto err_buffer;
 
@@ -1613,7 +1728,8 @@ static int arm_iommu_mmap_attrs(struct device *dev,
 		struct vm_area_struct *vma, void *cpu_addr,
 		dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					is_dma_coherent(dev, attrs, NORMAL));
 
 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
@@ -1658,7 +1774,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 void arm_iommu_free_attrs(struct device *dev, size_t size,
 		    void *cpu_addr, dma_addr_t handle, unsigned long attrs)
 {
-	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs,
+				is_dma_coherent(dev, attrs, NORMAL));
 }
 
 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
@@ -1806,7 +1923,38 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
 		int nents, enum dma_data_direction dir, unsigned long attrs)
 {
-	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
+	struct scatterlist *s;
+	int i;
+	size_t ret;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = 0, current_offset = 0;
+	dma_addr_t iova;
+	int prot = __dma_info_to_prot(dir, attrs);
+	bool coherent;
+
+	for_each_sg(sg, s, nents, i)
+		total_length += s->length;
+
+	iova = __alloc_iova(mapping, total_length);
+	if (iova == ARM_MAPPING_ERROR)
+		return 0;
+
+	coherent = of_dma_is_coherent(dev->of_node);
+	prot |= is_dma_coherent(dev, attrs, coherent) ? IOMMU_CACHE : 0;
+
+	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
+	if (ret != total_length) {
+		__free_iova(mapping, iova, total_length);
+		return 0;
+	}
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = iova + current_offset;
+		s->dma_length = total_length - current_offset;
+		current_offset += s->length;
+	}
+
+	return nents;
 }
 
 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -1857,7 +2005,15 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 			enum dma_data_direction dir,
 			unsigned long attrs)
 {
-	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = sg_dma_len(sg);
+	dma_addr_t iova = sg_dma_address(sg);
+
+	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, total_length);
+	__free_iova(mapping, iova, total_length);
 }
 
 /**
@@ -1872,6 +2028,12 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 {
 	struct scatterlist *s;
 	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
 
 	for_each_sg(sg, s, nents, i)
 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1891,6 +2053,13 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 	struct scatterlist *s;
 	int i;
 
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
+
 	for_each_sg(sg, s, nents, i)
 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 }
@@ -1912,7 +2081,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == ARM_MAPPING_ERROR)
@@ -1920,11 +2093,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 
 	prot = __dma_info_to_prot(dir, attrs);
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return ARM_MAPPING_ERROR;
@@ -1944,7 +2118,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     unsigned long attrs)
 {
-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+	if (!is_dma_coherent(dev, attrs, false) &&
+	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		__dma_page_cpu_to_dev(page, offset, size, dir);
 
 	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
@@ -1967,9 +2142,6 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
-	if (!iova)
-		return;
-
 	iommu_unmap(mapping->domain, iova, len);
 	__free_iova(mapping, iova, len);
 }
@@ -1995,7 +2167,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 	if (!iova)
 		return;
 
-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+	if (!(is_dma_coherent(dev, attrs, false) ||
+	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
 		__dma_page_dev_to_cpu(page, offset, size, dir);
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -2066,11 +2239,10 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
 	dma_addr_t iova = handle & PAGE_MASK;
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	if (!iova)
-		return;
-
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	if (!iova_coherent)
+		__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
@@ -2080,11 +2252,10 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
 	dma_addr_t iova = handle & PAGE_MASK;
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	if (!iova)
-		return;
-
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	if (!iova_coherent)
+		__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
 const struct dma_map_ops iommu_ops = {
@@ -2314,73 +2485,119 @@ void arm_iommu_detach_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
+/*
 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
 {
 	return coherent ? &iommu_coherent_ops : &iommu_ops;
 }
+*/
 
 static void arm_iommu_dma_release_mapping(struct kref *kref)
 {
 	int i;
+	int is_fast = 0;
+	int s1_bypass = 0;
 	struct dma_iommu_mapping *mapping =
 			container_of(kref, struct dma_iommu_mapping, kref);
 
-	for (i = 0; i < mapping->nr_bitmaps; i++)
-		kfree(mapping->bitmaps[i]);
-	kfree(mapping->bitmaps);
+	if (!mapping)
+		return;
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+			&s1_bypass);
+
+	if (is_fast) {
+		fast_smmu_put_dma_cookie(mapping->domain);
+	} else if (!s1_bypass) {
+		for (i = 0; i < mapping->nr_bitmaps; i++)
+			kfree(mapping->bitmaps[i]);
+		kfree(mapping->bitmaps);
+	}
+
 	kfree(mapping);
 }
 
-struct dma_iommu_mapping *
-arm_iommu_dma_init_mapping(dma_addr_t base, u64 size)
+static int
+iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
 {
-	unsigned int bits = size >> PAGE_SHIFT;
-	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
-	struct dma_iommu_mapping *mapping;
+	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
 	int extensions = 1;
 	int err = -ENOMEM;
 
-	/* currently only 32-bit DMA address space is supported */
-	if (size > DMA_BIT_MASK(32) + 1)
-		return ERR_PTR(-ERANGE);
-
 	if (!bitmap_size)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (bitmap_size > PAGE_SIZE) {
 		extensions = bitmap_size / PAGE_SIZE;
 		bitmap_size = PAGE_SIZE;
 	}
 
-	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
-	if (!mapping)
-		goto err;
-
 	mapping->bitmap_size = bitmap_size;
 	mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
 				   GFP_KERNEL);
 	if (!mapping->bitmaps)
-		goto err2;
+		goto err;
 
 	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!mapping->bitmaps[0])
-		goto err3;
+		goto err2;
 
 	mapping->nr_bitmaps = 1;
 	mapping->extensions = extensions;
-	mapping->base = base;
 	mapping->bits = BITS_PER_BYTE * bitmap_size;
+	mapping->ops = &iommu_ops;
 
 	spin_lock_init(&mapping->lock);
+	return 0;
+err2:
+	kfree(mapping->bitmaps);
+err:
+	return err;
+}
+
+struct dma_iommu_mapping *
+arm_iommu_dma_init_mapping(struct device *dev, dma_addr_t base, u64 size,
+		struct iommu_domain *domain)
+{
+	unsigned int bits = size >> PAGE_SHIFT;
+	struct dma_iommu_mapping *mapping;
+	int err = 0;
+	int is_fast = 0;
+	int s1_bypass = 0;
+
+	if (!bits)
+		return ERR_PTR(-EINVAL);
+
+	/* currently only 32-bit DMA address space is supported */
+	if (size > DMA_BIT_MASK(32) + 1)
+		return ERR_PTR(-ERANGE);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		return ERR_PTR(-ENOMEM);
+
+	mapping->base = base;
+	mapping->bits = bits;
+	mapping->domain = domain;
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &is_fast);
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+
+	if (is_fast)
+		err = fast_smmu_init_mapping(dev, mapping);
+	else if (s1_bypass)
+		mapping->ops = arm_get_dma_map_ops(dev->archdata.dma_coherent);
+	else
+		err = iommu_init_mapping(dev, mapping);
+
+	if (err) {
+		kfree(mapping);
+		return ERR_PTR(err);
+	}
 
 	kref_init(&mapping->kref);
 	return mapping;
-err3:
-	kfree(mapping->bitmaps);
-err2:
-	kfree(mapping);
-err:
-	return ERR_PTR(err);
 }
 
 /*
@@ -2450,14 +2667,13 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
 		return false;
 	}
 
-	mapping = arm_iommu_dma_init_mapping(dma_base, size);
+	mapping = arm_iommu_dma_init_mapping(dev, dma_base, size, domain);
 	if (IS_ERR(mapping)) {
 		pr_warn("Failed to initialize %llu-byte IOMMU mapping for device %s\n",
 				size, dev_name(dev));
 		return false;
 	}
 
-	mapping->domain = domain;
 	kref_get(&mapping->kref);
 	to_dma_iommu_mapping(dev) = mapping;
 
@@ -2502,7 +2718,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 {
 	const struct dma_map_ops *dma_ops;
 	struct dma_iommu_mapping *mapping;
-	int s1_bypass = 0;
 
 	dev->archdata.dma_coherent = coherent;
 
@@ -2516,15 +2731,10 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 
 	if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) {
 		mapping = to_dma_iommu_mapping(dev);
-		if (mapping)
-			iommu_domain_get_attr(mapping->domain,
-				DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
-		if (s1_bypass)
-			dma_ops = arm_get_dma_map_ops(coherent);
-		else
-			dma_ops = arm_get_iommu_dma_map_ops(coherent);
-	} else
+		dma_ops = mapping->ops;
+	} else {
 		dma_ops = arm_get_dma_map_ops(coherent);
+	}
 
 	set_dma_ops(dev, dma_ops);
 
@@ -2536,6 +2746,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 #endif
 	dev->archdata.dma_ops_setup = true;
 }
+EXPORT_SYMBOL(arch_setup_dma_ops);
 
 void arch_teardown_dma_ops(struct device *dev)
 {
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
index aaef64b..bdeb5c1 100644
--- a/arch/arm/mm/dma.h
+++ b/arch/arm/mm/dma.h
@@ -5,8 +5,6 @@
 #include <asm/glue-cache.h>
 
 #ifndef MULTI_CACHE
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
 
 /*
  * These are private to the dma-mapping API.  Do not use directly.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e1d330a2..5d85f59 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -496,6 +496,56 @@ static void __init free_highpages(void)
 #endif
 }
 
+#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
+#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
+#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+static void print_vmalloc_lowmem_info(void)
+{
+	struct memblock_region *reg, *prev_reg = NULL;
+
+	pr_notice(
+		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+		MLM((unsigned long)high_memory, VMALLOC_END));
+
+	for_each_memblock_rev(memory, reg) {
+		phys_addr_t start_phys = reg->base;
+		phys_addr_t end_phys = reg->base + reg->size;
+
+		if (start_phys > arm_lowmem_limit)
+			continue;
+
+		if (end_phys > arm_lowmem_limit)
+			end_phys = arm_lowmem_limit;
+
+		if (prev_reg == NULL) {
+			prev_reg = reg;
+
+			pr_notice(
+			"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+			MLM((unsigned long)__va(start_phys),
+			(unsigned long)__va(end_phys)));
+
+			continue;
+		}
+
+		pr_notice(
+		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+		MLM((unsigned long)__va(end_phys),
+		(unsigned long)__va(prev_reg->base)));
+
+
+		pr_notice(
+		"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+		MLM((unsigned long)__va(start_phys),
+		(unsigned long)__va(end_phys)));
+
+		prev_reg = reg;
+	}
+}
+#endif
+
 /*
  * mem_init() marks the free areas in the mem_map and tells us how much
  * memory is free.  This is done after various parts of the system have
@@ -524,9 +574,6 @@ void __init mem_init(void)
 
 	mem_init_print_info(NULL);
 
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
 
 	pr_notice("Virtual kernel memory layout:\n"
 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
@@ -534,28 +581,33 @@ void __init mem_init(void)
 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
-			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
-			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
-			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-			"      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
-			"      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
-			"      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
-			"       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",
-
+			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n",
 			MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
 #ifdef CONFIG_HAVE_TCM
 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 #endif
-			MLK(FIXADDR_START, FIXADDR_END),
+			MLK(FIXADDR_START, FIXADDR_END));
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+	print_vmalloc_lowmem_info();
+#else
+	pr_notice(
+		   "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		   "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
 			MLM(VMALLOC_START, VMALLOC_END),
-			MLM(PAGE_OFFSET, (unsigned long)high_memory),
+			MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
+	pr_notice(
+#ifdef CONFIG_HIGHMEM
+		   "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+		   "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+#endif
+		   "      .text : 0x%pK - 0x%pK   (%4d kB)\n"
+		   "      .init : 0x%pK - 0x%pK   (%4d kB)\n"
+		   "      .data : 0x%pK - 0x%pK   (%4d kB)\n"
+		   "       .bss : 0x%pK - 0x%pK   (%4d kB)\n",
 #ifdef CONFIG_HIGHMEM
 			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
 				(PAGE_SIZE)),
@@ -569,10 +621,6 @@ void __init mem_init(void)
 			MLK_ROUNDUP(_sdata, _edata),
 			MLK_ROUNDUP(__bss_start, __bss_stop));
 
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
 	/*
 	 * Check boundaries twice: Some fundamental inconsistencies can
 	 * be detected at build time already.
@@ -588,6 +636,10 @@ void __init mem_init(void)
 #endif
 }
 
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
 #ifdef CONFIG_STRICT_KERNEL_RWX
 struct section_perm {
 	const char *name;
@@ -596,6 +648,9 @@ struct section_perm {
 	pmdval_t mask;
 	pmdval_t prot;
 	pmdval_t clear;
+	pteval_t ptemask;
+	pteval_t pteprot;
+	pteval_t pteclear;
 };
 
 /* First section-aligned location at or after __start_rodata. */
@@ -609,6 +664,8 @@ static struct section_perm nx_perms[] = {
 		.end	= (unsigned long)_stext,
 		.mask	= ~PMD_SECT_XN,
 		.prot	= PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 	/* Make init RW (set NX). */
 	{
@@ -617,6 +674,8 @@ static struct section_perm nx_perms[] = {
 		.end	= (unsigned long)_sdata,
 		.mask	= ~PMD_SECT_XN,
 		.prot	= PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 	/* Make rodata NX (set RO in ro_perms below). */
 	{
@@ -625,6 +684,8 @@ static struct section_perm nx_perms[] = {
 		.end    = (unsigned long)__init_begin,
 		.mask   = ~PMD_SECT_XN,
 		.prot   = PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 };
 
@@ -642,6 +703,8 @@ static struct section_perm ro_perms[] = {
 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
 		.clear  = PMD_SECT_AP_WRITE,
 #endif
+		.ptemask = ~L_PTE_RDONLY,
+		.pteprot = L_PTE_RDONLY,
 	},
 };
 
@@ -650,6 +713,35 @@ static struct section_perm ro_perms[] = {
  * copied into each mm). During startup, this is the init_mm. Is only
  * safe to be called with preemption disabled, as under stop_machine().
  */
+struct pte_data {
+	pteval_t mask;
+	pteval_t val;
+};
+
+static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr,
+			void *d)
+{
+	struct pte_data *data = d;
+	pte_t pte = *ptep;
+
+	pte = __pte((pte_val(*ptep) & data->mask) | data->val);
+	set_pte_ext(ptep, pte, 0);
+
+	return 0;
+}
+
+static inline void pte_update(unsigned long addr, pteval_t mask,
+				  pteval_t prot, struct mm_struct *mm)
+{
+	struct pte_data data;
+
+	data.mask = mask;
+	data.val = prot;
+
+	apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
+	flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
 static inline void section_update(unsigned long addr, pmdval_t mask,
 				  pmdval_t prot, struct mm_struct *mm)
 {
@@ -698,11 +790,21 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
 
 		for (addr = perms[i].start;
 		     addr < perms[i].end;
-		     addr += SECTION_SIZE)
-			section_update(addr, perms[i].mask,
-				set ? perms[i].prot : perms[i].clear, mm);
-	}
+		     addr += SECTION_SIZE) {
+			pmd_t *pmd;
 
+			pmd = pmd_offset(pud_offset(pgd_offset(mm, addr),
+						addr), addr);
+			if (pmd_bad(*pmd))
+				section_update(addr, perms[i].mask,
+					set ? perms[i].prot : perms[i].clear,
+					mm);
+			else
+				pte_update(addr, perms[i].ptemask,
+				     set ? perms[i].pteprot : perms[i].pteclear,
+				     mm);
+		}
+	}
 }
 
 /**
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 5bf9443..4aa04a7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -92,7 +92,8 @@ void __init add_static_vm_early(struct static_vm *svm)
 	void *vaddr;
 
 	vm = &svm->vm;
-	vm_area_add_early(vm);
+	if (!vm_area_check_early(vm))
+		vm_area_add_early(vm);
 	vaddr = vm->addr;
 
 	list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 70e560c..047ee14 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1447,12 +1447,21 @@ static void __init map_lowmem(void)
 	struct memblock_region *reg;
 	phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
 	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+	struct static_vm *svm;
+	phys_addr_t start;
+	phys_addr_t end;
+	unsigned long vaddr;
+	unsigned long pfn;
+	unsigned long length;
+	unsigned int type;
+	int nr = 0;
 
 	/* Map all the lowmem memory banks. */
 	for_each_memblock(memory, reg) {
-		phys_addr_t start = reg->base;
-		phys_addr_t end = start + reg->size;
 		struct map_desc map;
+		start = reg->base;
+		end = start + reg->size;
+		nr++;
 
 		if (memblock_is_nomap(reg))
 			continue;
@@ -1504,6 +1513,34 @@ static void __init map_lowmem(void)
 			}
 		}
 	}
+	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+
+	for_each_memblock(memory, reg) {
+		struct vm_struct *vm;
+
+		start = reg->base;
+		end = start + reg->size;
+
+		if (end > arm_lowmem_limit)
+			end = arm_lowmem_limit;
+		if (start >= end)
+			break;
+
+		vm = &svm->vm;
+		pfn = __phys_to_pfn(start);
+		vaddr = __phys_to_virt(start);
+		length = end - start;
+		type = MT_MEMORY_RW;
+
+		vm->addr = (void *)(vaddr & PAGE_MASK);
+		vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+		vm->phys_addr = __pfn_to_phys(pfn);
+		vm->flags = VM_LOWMEM;
+		vm->flags |= VM_ARM_MTYPE(type);
+		vm->caller = map_lowmem;
+		add_static_vm_early(svm++);
+		mark_vmalloc_reserved_area(vm->addr, vm->size);
+	}
 }
 
 #ifdef CONFIG_ARM_PV_FIXUP
@@ -1602,6 +1639,119 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
 
 #endif
 
+#ifdef CONFIG_FORCE_PAGES
+/*
+ * remap a PMD into pages
+ * We split a single pmd here none of this two pmd nonsense
+ */
+static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
+				unsigned long end, unsigned long pfn,
+				const struct mem_type *type)
+{
+	pte_t *pte, *start_pte;
+	pmd_t *base_pmd;
+
+	base_pmd = pmd_offset(
+			pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
+		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Following is needed when new pte is allocated for pmd[1]
+		 * cases, which may happen when base (start) address falls
+		 * under pmd[1].
+		 */
+		if (addr & SECTION_SIZE)
+			start_pte += pte_index(addr);
+#endif
+	} else {
+		start_pte = pte_offset_kernel(base_pmd, addr);
+	}
+
+	pte = start_pte;
+
+	do {
+		set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
+		pfn++;
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+
+	*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
+	mb(); /* let pmd be programmed */
+	flush_pmd_entry(pmd);
+	flush_tlb_all();
+}
+
+/*
+ * It's significantly easier to remap as pages later after all memory is
+ * mapped. Everything is sections so all we have to do is split
+ */
+static void __init remap_pages(void)
+{
+	struct memblock_region *reg;
+
+	for_each_memblock(memory, reg) {
+		phys_addr_t phys_start = reg->base;
+		phys_addr_t phys_end = reg->base + reg->size;
+		unsigned long addr = (unsigned long)__va(phys_start);
+		unsigned long end = (unsigned long)__va(phys_end);
+		pmd_t *pmd = NULL;
+		unsigned long next;
+		unsigned long pfn = __phys_to_pfn(phys_start);
+		bool fixup = false;
+		unsigned long saved_start = addr;
+
+		if (phys_start > arm_lowmem_limit)
+			break;
+		if (phys_end > arm_lowmem_limit)
+			end = (unsigned long)__va(arm_lowmem_limit);
+		if (phys_start >= phys_end)
+			break;
+
+		pmd = pmd_offset(
+			pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+#ifndef	CONFIG_ARM_LPAE
+		if (addr & SECTION_SIZE) {
+			fixup = true;
+			pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
+			pmd++;
+		}
+
+		if (end & SECTION_SIZE)
+			pmd_empty_section_gap(end);
+#endif
+
+		do {
+			next = addr + SECTION_SIZE;
+
+			if (pmd_none(*pmd) || pmd_bad(*pmd))
+				split_pmd(pmd, addr, next, pfn,
+						&mem_types[MT_MEMORY_RWX]);
+			pmd++;
+			pfn += SECTION_SIZE >> PAGE_SHIFT;
+
+		} while (addr = next, addr < end);
+
+		if (fixup) {
+			/*
+			 * Put a faulting page table here to avoid detecting no
+			 * pmd when accessing an odd section boundary. This
+			 * needs to be faulting to help catch errors and avoid
+			 * speculation
+			 */
+			pmd = pmd_off_k(saved_start);
+			pmd[0] = pmd[1] & ~1;
+		}
+	}
+}
+#else
+static void __init remap_pages(void)
+{
+
+}
+#endif
+
 static void __init early_fixmap_shutdown(void)
 {
 	int i;
@@ -1644,6 +1794,7 @@ void __init paging_init(const struct machine_desc *mdesc)
 	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
 	early_fixmap_shutdown();
+	remap_pages();
 	devicemaps_init(mdesc);
 	kmap_init();
 	tcm_init();
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 1403cb4..365dc9d 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,8 @@ static int change_memory_common(unsigned long addr, int numpages,
 	if (!size)
 		return 0;
 
-	if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
+	if (!IS_ENABLED(CONFIG_FORCE_PAGES) &&
+	    !in_range(start, size, MODULES_VADDR, MODULES_END) &&
 	    !in_range(start, size, VMALLOC_START, VMALLOC_END))
 		return -EINVAL;
 
@@ -97,3 +98,19 @@ int set_memory_x(unsigned long addr, int numpages)
 					__pgprot(0),
 					__pgprot(L_PTE_XN));
 }
+
+#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr;
+
+	if (PageHighMem(page))
+		return;
+
+	addr = (unsigned long) page_address(page);
+	if (enable)
+		set_memory_rw(addr, numpages);
+	else
+		set_memory_ro(addr, numpages);
+}
+#endif
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index f91b7d1..7ae180d 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -182,6 +182,14 @@
 	  This enables support for the BENGAL chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_SCUBA
+	bool "Enable Support for Qualcomm Technologies, Inc. SCUBA"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	help
+	  This enables support for the SCUBA chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_REALTEK
 	bool "Realtek Platforms"
 	help
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index 463d4e7..1ab315e 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -1,3 +1,4 @@
+CONFIG_LOCALVERSION="-perf"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
@@ -248,8 +249,11 @@
 CONFIG_QRTR_SMD=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_REGMAP_WCD_IRQ=y
@@ -276,6 +280,7 @@
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -318,6 +323,7 @@
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
@@ -356,6 +362,7 @@
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_RPM_SMD=y
@@ -363,11 +370,15 @@
 CONFIG_REGULATOR_PM8008=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=y
 CONFIG_VIDEO_VICODEC=y
@@ -443,6 +454,7 @@
 CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
@@ -452,6 +464,7 @@
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -460,6 +473,7 @@
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
+CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
 CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SPMI_PMIC_CLKDIV=y
@@ -506,6 +520,7 @@
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -516,14 +531,23 @@
 CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
 CONFIG_PWM=y
@@ -531,12 +555,14 @@
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_QCOM_MPM=y
 CONFIG_PHY_XGENE=y
+CONFIG_QCOM_L2_COUNTERS=y
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+# CONFIG_NVMEM_SYSFS is not set
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -594,6 +620,7 @@
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index e60f8fd..9edbc5f8 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -46,6 +46,7 @@
 CONFIG_PROFILING=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_BENGAL=y
+CONFIG_ARCH_SCUBA=y
 CONFIG_PCI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
@@ -103,6 +104,7 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -255,8 +257,11 @@
 CONFIG_QRTR_SMD=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_REGMAP_WCD_IRQ=y
@@ -285,6 +290,7 @@
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -329,6 +335,7 @@
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
@@ -337,6 +344,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_BENGAL=y
+CONFIG_PINCTRL_SCUBA=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
@@ -367,6 +375,7 @@
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_RPM_SMD=y
@@ -374,11 +383,15 @@
 CONFIG_REGULATOR_PM8008=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_VIDEO_VIM2M=y
 CONFIG_VIDEO_VICODEC=y
@@ -454,6 +467,7 @@
 CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
 CONFIG_EDAC_CORTEX_ARM64=y
 CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY=y
@@ -468,6 +482,7 @@
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+CONFIG_ION_POOL_AUTO_REFILL=y
 CONFIG_QPNP_REVID=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -476,6 +491,7 @@
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
+CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
 CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SPMI_PMIC_CLKDIV=y
@@ -489,6 +505,7 @@
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
 CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
 CONFIG_ARM_SMMU_TESTBUS_DUMP=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
@@ -527,6 +544,7 @@
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_ERP=y
 CONFIG_PANIC_ON_GLADIATOR_ERROR=y
+CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -537,15 +555,24 @@
 CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
 CONFIG_PWM=y
@@ -553,12 +580,14 @@
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_QCOM_MPM=y
 CONFIG_PHY_XGENE=y
+CONFIG_QCOM_L2_COUNTERS=y
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+# CONFIG_NVMEM_SYSFS is not set
 CONFIG_QCOM_QFPROM=y
 CONFIG_NVMEM_SPMI_SDAM=y
-CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -600,7 +629,6 @@
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_XZ_DEC=y
-CONFIG_STACK_HASH_ORDER_SHIFT=12
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_MODULE_LOAD_INFO=y
@@ -611,6 +639,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PANIC_ON_OOM=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
 CONFIG_PAGE_POISONING=y
 CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 00f57a7..504b11e 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -740,6 +740,7 @@
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_LOCK_TORTURE_TEST=m
 CONFIG_DEBUG_SG=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 91147d7..12744b0e 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -255,11 +255,13 @@
 CONFIG_DNS_RESOLVER=y
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
+CONFIG_QRTR_MHI=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
 CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -267,6 +269,7 @@
 CONFIG_REGMAP_WCD_IRQ=y
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
+CONFIG_MHI_BUS=y
 CONFIG_ZRAM=y
 CONFIG_ZRAM_DEDUP=y
 CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 2d828d0..e373833 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -261,11 +261,13 @@
 CONFIG_DNS_RESOLVER=y
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
+CONFIG_QRTR_MHI=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
 CONFIG_BTFM_SLIM_WCN3990=y
 CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -273,6 +275,7 @@
 CONFIG_REGMAP_WCD_IRQ=y
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
+CONFIG_MHI_BUS=y
 CONFIG_ZRAM=y
 CONFIG_ZRAM_DEDUP=y
 CONFIG_BLK_DEV_LOOP=y
@@ -706,6 +709,7 @@
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_LOCK_TORTURE_TEST=m
 CONFIG_DEBUG_SG=y
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a031f04..95f139a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -262,19 +262,12 @@ static void show_data(unsigned long addr, int nbytes, const char *name)
 static void show_extra_register_data(struct pt_regs *regs, int nbytes)
 {
 	mm_segment_t fs;
-	unsigned int i;
 
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	show_data(regs->pc - nbytes, nbytes * 2, "PC");
 	show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
 	show_data(regs->sp - nbytes, nbytes * 2, "SP");
-	for (i = 0; i < 30; i++) {
-		char name[4];
-
-		snprintf(name, sizeof(name), "X%u", i);
-		show_data(regs->regs[i] - nbytes, nbytes * 2, name);
-	}
 	set_fs(fs);
 }
 
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index cd14196..583c4fa 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -63,6 +63,7 @@
 #include <soc/qcom/minidump.h>
 
 #include <soc/qcom/scm.h>
+#include <soc/qcom/lpm_levels.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -955,6 +956,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 void smp_send_reschedule(int cpu)
 {
 	BUG_ON(cpu_is_offline(cpu));
+	update_ipi_history(cpu);
 	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 89c87a6..e99c8b6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -333,7 +333,16 @@ static void __init update_memory_limit(void)
 	phys_addr_t min_ddr_sz = 0, offline_sz = 0;
 	int t_len = (2 * dt_root_size_cells) * sizeof(__be32);
 
-	ram_sz = memblock_phys_mem_size();
+	if (memory_limit == PHYS_ADDR_MAX)
+		ram_sz = memblock_phys_mem_size();
+	else if (IS_ALIGNED(memory_limit, MIN_MEMORY_BLOCK_SIZE))
+		ram_sz = memory_limit;
+	else {
+		WARN(1, "mem-offline is not supported for DDR size %lld\n",
+				memory_limit);
+		return;
+	}
+
 	node = of_get_flat_dt_subnode_by_name(dt_root, "mem-offline");
 	if (node == -FDT_ERR_NOTFOUND) {
 		pr_err("mem-offine node not found in FDT\n");
@@ -486,12 +495,16 @@ void __init arm64_memblock_init(void)
 		memblock_remove(0, memstart_addr);
 	}
 
-	update_memory_limit();
 	/*
 	 * Save bootloader imposed memory limit before we overwirte
 	 * memblock.
 	 */
-	bootloader_memory_limit = memblock_end_of_DRAM();
+	if (memory_limit == PHYS_ADDR_MAX)
+		bootloader_memory_limit = memblock_end_of_DRAM();
+	else
+		bootloader_memory_limit = memblock_max_addr(memory_limit);
+
+	update_memory_limit();
 
 	/*
 	 * Apply the memory limit if it was set. Since the kernel may be loaded
diff --git a/drivers/block/vs_block_client.c b/drivers/block/vs_block_client.c
index db1effad..4e3ba35 100644
--- a/drivers/block/vs_block_client.c
+++ b/drivers/block/vs_block_client.c
@@ -423,7 +423,7 @@ vs_block_client_make_request(struct request_queue *q, struct bio *bio)
 
 static int vs_block_client_get_blkdev_id(struct block_client *client)
 {
-	int id;
+	int id = 0;
 	int ret;
 
 retry:
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 692f8ec..8a0cdca 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -35,7 +35,6 @@
 #define BT_PWR_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
 #define BT_PWR_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
 
-
 static const struct of_device_id bt_power_match_table[] = {
 	{	.compatible = "qca,ar3002" },
 	{	.compatible = "qca,qca6174" },
@@ -63,6 +62,7 @@ static int bt_vreg_init(struct bt_power_vreg_data *vreg)
 	vreg->reg = regulator_get(dev, vreg->name);
 	if (IS_ERR(vreg->reg)) {
 		rc = PTR_ERR(vreg->reg);
+		vreg->reg = NULL;
 		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
 			__func__, vreg->name, rc);
 		goto out;
@@ -599,7 +599,7 @@ static int bt_dt_parse_vreg_info(struct device *dev,
 static int bt_dt_parse_clk_info(struct device *dev,
 		struct bt_power_clk_data **clk_data)
 {
-	int ret = -EINVAL;
+	int ret = 0;
 	struct bt_power_clk_data *clk = NULL;
 	struct device_node *np = dev->of_node;
 
@@ -636,7 +636,7 @@ static int bt_dt_parse_clk_info(struct device *dev,
 
 		*clk_data = clk;
 	} else {
-		BT_PWR_ERR("clocks is not provided in device tree");
+		BT_PWR_INFO("clocks is not provided in device tree");
 	}
 
 err:
@@ -657,74 +657,54 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
 			of_get_named_gpio(pdev->dev.of_node,
 						"qca,bt-reset-gpio", 0);
 		if (bt_power_pdata->bt_gpio_sys_rst < 0)
-			BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+			BT_PWR_INFO("bt-reset-gpio not provided in devicetree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_core,
 					"qca,bt-vdd-core");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-core not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_io,
 					"qca,bt-vdd-io");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-io not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_xtal,
 					"qca,bt-vdd-xtal");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-xtal not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_pa,
 					"qca,bt-vdd-pa");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-pa not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_ldo,
 					"qca,bt-vdd-ldo");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-ldo not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_chip_pwd,
 					"qca,bt-chip-pwd");
-		if (rc < 0)
-			BT_PWR_ERR("bt-chip-pwd not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_aon,
 					"qca,bt-vdd-aon");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-aon not provided in device tree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_dig,
 					"qca,bt-vdd-dig");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-dig not provided in device tree");
+
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_rfa1,
 					"qca,bt-vdd-rfa1");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-rfa1 not provided in device tree");
+
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_rfa2,
 					"qca,bt-vdd-rfa2");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-rfa2 not provided in device tree");
+
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_asd,
 					"qca,bt-vdd-asd");
-		if (rc < 0)
-			BT_PWR_ERR("bt-vdd-asd not provided in device tree");
+
 		rc = bt_dt_parse_clk_info(&pdev->dev,
 					&bt_power_pdata->bt_chip_clk);
-		if (rc < 0)
-			BT_PWR_ERR("clock not provided in device tree");
 	}
 
 	bt_power_pdata->bt_power_setup = bluetooth_power;
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index a41573c..4bf961d 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -54,6 +54,37 @@ enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
 
 #endif
 
+void mhi_reg_write_work(struct work_struct *w)
+{
+	struct mhi_controller *mhi_cntrl = container_of(w,
+						struct mhi_controller,
+						reg_write_work);
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	struct reg_write_info *info =
+				&mhi_cntrl->reg_write_q[mhi_cntrl->read_idx];
+
+	if (!info->valid)
+		return;
+
+	if (mhi_is_active(mhi_cntrl->mhi_dev) && msm_pcie_prevent_l1(pci_dev))
+		return;
+
+	while (info->valid) {
+		if (!mhi_is_active(mhi_cntrl->mhi_dev))
+			return;
+
+		writel_relaxed(info->val, info->reg_addr);
+		info->valid = false;
+		mhi_cntrl->read_idx =
+				(mhi_cntrl->read_idx + 1) &
+						(REG_WRITE_QUEUE_LEN - 1);
+		info = &mhi_cntrl->reg_write_q[mhi_cntrl->read_idx];
+	}
+
+	msm_pcie_allow_l1(pci_dev);
+}
+
 static int mhi_arch_pm_notifier(struct notifier_block *nb,
 				unsigned long event, void *unused)
 {
@@ -722,6 +753,7 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
 	}
 
 	msm_pcie_l1ss_timeout_enable(pci_dev);
+	mhi_cntrl->force_m3_done = true;
 
 	MHI_LOG("Exited\n");
 
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 9bb7717..1801155 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -770,6 +770,12 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
 	mhi_cntrl->iova_start = memblock_start_of_DRAM();
 	mhi_cntrl->iova_stop = memblock_end_of_DRAM();
 
+	mhi_cntrl->need_force_m3 = true;
+
+	/* setup host support for SFR retreival */
+	if (of_property_read_bool(of_node, "mhi,sfr-support"))
+		mhi_cntrl->sfr_len = MHI_MAX_SFR_LEN;
+
 	of_node = of_parse_phandle(mhi_cntrl->of_node, "qcom,iommu-group", 0);
 	if (of_node) {
 		use_s1 = true;
@@ -834,11 +840,28 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
 	mhi_cntrl->fw_image = firmware_info->fw_image;
 	mhi_cntrl->edl_image = firmware_info->edl_image;
 
+	mhi_cntrl->offload_wq = alloc_ordered_workqueue("offload_wq",
+						WQ_MEM_RECLAIM | WQ_HIGHPRI);
+	if (!mhi_cntrl->offload_wq)
+		goto error_register;
+
+	INIT_WORK(&mhi_cntrl->reg_write_work, mhi_reg_write_work);
+
+	mhi_cntrl->reg_write_q = kcalloc(REG_WRITE_QUEUE_LEN,
+					sizeof(*mhi_cntrl->reg_write_q),
+					GFP_KERNEL);
+	if (!mhi_cntrl->reg_write_q)
+		goto error_free_wq;
+
+	atomic_set(&mhi_cntrl->write_idx, -1);
+
 	if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_qcom_group))
 		MHI_ERR("Error while creating the sysfs group\n");
 
 	return mhi_cntrl;
 
+error_free_wq:
+	destroy_workqueue(mhi_cntrl->offload_wq);
 error_register:
 	mhi_free_controller(mhi_cntrl);
 
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index 3604863..f9c1d8a 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -30,6 +30,8 @@
 #define REMOTE_TIME_REMAINDER_US(x) (REMOTE_TICKS_TO_US((x)) % \
 					(REMOTE_TICKS_TO_SEC((x)) * 1000000ULL))
 
+#define MHI_MAX_SFR_LEN (256)
+
 extern const char * const mhi_ee_str[MHI_EE_MAX];
 #define TO_MHI_EXEC_STR(ee) (ee >= MHI_EE_MAX ? "INVALID_EE" : mhi_ee_str[ee])
 
@@ -65,6 +67,7 @@ struct mhi_dev {
 void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
 int mhi_pci_probe(struct pci_dev *pci_dev,
 		  const struct pci_device_id *device_id);
+void mhi_reg_write_work(struct work_struct *w);
 
 #ifdef CONFIG_ARCH_QCOM
 
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index 0840148..896d517 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -156,13 +156,14 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 
 	MHI_LOG("BHIe programming for RDDM\n");
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
 		      upper_32_bits(mhi_buf->dma_addr));
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
 		      lower_32_bits(mhi_buf->dma_addr));
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS,
+			mhi_buf->len);
 	sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
 
 	if (unlikely(!sequence_id))
@@ -232,7 +233,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 			/* Hardware reset; force device to enter rddm */
 			MHI_LOG(
 				"Did not enter RDDM, do a host req. reset\n");
-			mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+			mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs,
 				      MHI_SOC_RESET_REQ_OFFSET,
 				      MHI_SOC_RESET_REQ);
 			udelay(delayus);
@@ -308,13 +309,14 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
 
 	MHI_LOG("Starting BHIe Programming\n");
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
 		      upper_32_bits(mhi_buf->dma_addr));
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
 		      lower_32_bits(mhi_buf->dma_addr));
 
-	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS,
+			mhi_buf->len);
 
 	mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
 	mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
@@ -372,14 +374,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
 		goto invalid_pm_state;
 	}
 
-	mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
-	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
 		      upper_32_bits(dma_addr));
-	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
 		      lower_32_bits(dma_addr));
-	mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
 	mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
-	mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id);
+	mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGTXDB,
+			mhi_cntrl->session_id);
 	read_unlock_bh(pm_lock);
 
 	MHI_LOG("Waiting for image transfer completion\n");
@@ -523,10 +526,9 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
 	}
 }
 
-void mhi_fw_load_worker(struct work_struct *work)
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 {
 	int ret;
-	struct mhi_controller *mhi_cntrl;
 	const char *fw_name;
 	const struct firmware *firmware;
 	struct image_info *image_info;
@@ -534,17 +536,7 @@ void mhi_fw_load_worker(struct work_struct *work)
 	dma_addr_t dma_addr;
 	size_t size;
 
-	mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
-
-	MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
-		TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
-	ret = wait_event_timeout(mhi_cntrl->state_event,
-				 MHI_IN_PBL(mhi_cntrl->ee) ||
-				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
-				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
-	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 		MHI_ERR("MHI is not in valid state\n");
 		return;
 	}
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 7f68495..b4e568c 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -73,6 +73,19 @@ static const char * const mhi_pm_state_str[] = {
 
 struct mhi_bus mhi_bus;
 
+struct mhi_controller *find_mhi_controller_by_name(const char *name)
+{
+	struct mhi_controller *mhi_cntrl, *tmp_cntrl;
+
+	list_for_each_entry_safe(mhi_cntrl, tmp_cntrl, &mhi_bus.controller_list,
+				 node) {
+		if (mhi_cntrl->name && (!strcmp(name, mhi_cntrl->name)))
+			return mhi_cntrl;
+	}
+
+	return NULL;
+}
+
 const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
 {
 	int index = find_last_bit((unsigned long *)&state, 32);
@@ -590,7 +603,6 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
 		return -ENOMEM;
 
 	spin_lock_init(&mhi_tsync->lock);
-	mutex_init(&mhi_tsync->lpm_mutex);
 	INIT_LIST_HEAD(&mhi_tsync->head);
 	init_completion(&mhi_tsync->completion);
 
@@ -658,6 +670,42 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
 	return ret;
 }
 
+int mhi_init_sfr(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_sfr_info *sfr_info = mhi_cntrl->mhi_sfr;
+	int ret = -EIO;
+
+	if (!sfr_info)
+		return ret;
+
+	/* do a clean-up if we reach here post SSR */
+	memset(sfr_info->str, 0, sfr_info->len);
+
+	sfr_info->buf_addr = mhi_alloc_coherent(mhi_cntrl, sfr_info->len,
+					&sfr_info->dma_addr, GFP_KERNEL);
+	if (!sfr_info->buf_addr) {
+		MHI_ERR("Failed to allocate memory for sfr\n");
+		return -ENOMEM;
+	}
+
+	init_completion(&sfr_info->completion);
+
+	ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_SFR_CFG);
+	if (ret) {
+		MHI_ERR("Failed to send sfr cfg cmd\n");
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&sfr_info->completion,
+			msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (!ret || sfr_info->ccs != MHI_EV_CC_SUCCESS) {
+		MHI_ERR("Failed to get sfr cfg cmd completion\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
 {
 	int ret, er_index;
@@ -682,7 +730,7 @@ static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
 	MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
 
 	/* advertise host support */
-	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
 		      MHI_BW_SCALE_SETUP(er_index));
 
 	return 0;
@@ -780,8 +828,8 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 
 	/* setup wake db */
 	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
-	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
-	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
 	mhi_cntrl->wake_set = false;
 
 	/* setup bw scale db */
@@ -1297,6 +1345,8 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
 	if (!ret)
 		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_offset;
 
+	of_property_read_string(of_node, "mhi,name", &mhi_cntrl->name);
+
 	return 0;
 
 error_ev_cfg:
@@ -1313,6 +1363,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 	struct mhi_chan *mhi_chan;
 	struct mhi_cmd *mhi_cmd;
 	struct mhi_device *mhi_dev;
+	struct mhi_sfr_info *sfr_info;
 	u32 soc_info;
 
 	if (!mhi_cntrl->of_node)
@@ -1337,11 +1388,11 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 
 	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
 	mutex_init(&mhi_cntrl->pm_mutex);
+	mutex_init(&mhi_cntrl->tsync_mutex);
 	rwlock_init(&mhi_cntrl->pm_lock);
 	spin_lock_init(&mhi_cntrl->transition_lock);
 	spin_lock_init(&mhi_cntrl->wlock);
 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
-	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
 	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
 	init_waitqueue_head(&mhi_cntrl->state_event);
 
@@ -1386,6 +1437,8 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 		mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
 	}
 
+	mhi_cntrl->write_reg = mhi_write_reg;
+
 	/* read the device info if possible */
 	if (mhi_cntrl->regs) {
 		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
@@ -1428,6 +1481,23 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 
 	mhi_cntrl->mhi_dev = mhi_dev;
 
+	if (mhi_cntrl->sfr_len) {
+		sfr_info = kzalloc(sizeof(*sfr_info), GFP_KERNEL);
+		if (!sfr_info) {
+			ret = -ENOMEM;
+			goto error_add_dev;
+		}
+
+		sfr_info->str = kzalloc(mhi_cntrl->sfr_len, GFP_KERNEL);
+		if (!sfr_info->str) {
+			ret = -ENOMEM;
+			goto error_alloc_sfr;
+		}
+
+		sfr_info->len = mhi_cntrl->sfr_len;
+		mhi_cntrl->mhi_sfr = sfr_info;
+	}
+
 	mhi_cntrl->parent = debugfs_lookup(mhi_bus_type.name, NULL);
 	mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR;
 
@@ -1438,6 +1508,9 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
 
 	return 0;
 
+error_alloc_sfr:
+	kfree(sfr_info);
+
 error_add_dev:
 	mhi_dealloc_device(mhi_cntrl, mhi_dev);
 
@@ -1455,12 +1528,18 @@ EXPORT_SYMBOL(of_register_mhi_controller);
 void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
 {
 	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+	struct mhi_sfr_info *sfr_info = mhi_cntrl->mhi_sfr;
 
 	kfree(mhi_cntrl->mhi_cmd);
 	kfree(mhi_cntrl->mhi_event);
 	vfree(mhi_cntrl->mhi_chan);
 	kfree(mhi_cntrl->mhi_tsync);
 
+	if (sfr_info) {
+		kfree(sfr_info->str);
+		kfree(sfr_info);
+	}
+
 	device_del(&mhi_dev->dev);
 	put_device(&mhi_dev->dev);
 
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index e4e80f9..e4c27fd2 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -243,6 +243,9 @@ extern struct bus_type mhi_bus_type;
 #define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
 			       div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
 
+/* Wait time to allow runtime framework to resume MHI in milliseconds */
+#define MHI_RESUME_TIME	(30000)
+
 struct mhi_event_ctxt {
 	u32 reserved : 8;
 	u32 intmodc : 8;
@@ -297,6 +300,7 @@ enum mhi_cmd_type {
 	MHI_CMD_TYPE_STOP = 17,
 	MHI_CMD_TYPE_START = 18,
 	MHI_CMD_TYPE_TSYNC = 24,
+	MHI_CMD_TYPE_SFR_CFG = 73,
 };
 
 /* no operation command */
@@ -327,6 +331,11 @@ enum mhi_cmd_type {
 #define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \
 					  (er << 24))
 
+/* subsystem failure reason cfg command */
+#define MHI_TRE_CMD_SFR_CFG_PTR(ptr) (ptr)
+#define MHI_TRE_CMD_SFR_CFG_DWORD0(len) (len)
+#define MHI_TRE_CMD_SFR_CFG_DWORD1 (MHI_CMD_TYPE_SFR_CFG << 16)
+
 #define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
 #define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
 
@@ -360,11 +369,14 @@ enum mhi_cmd_type {
 #define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
 #define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
 
+#define MHI_RSC_MIN_CREDITS (8)
+
 enum MHI_CMD {
 	MHI_CMD_RESET_CHAN,
 	MHI_CMD_START_CHAN,
 	MHI_CMD_STOP_CHAN,
 	MHI_CMD_TIMSYNC_CFG,
+	MHI_CMD_SFR_CFG,
 };
 
 enum MHI_PKT_TYPE {
@@ -381,6 +393,7 @@ enum MHI_PKT_TYPE {
 	MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
 	MHI_PKT_TYPE_EE_EVENT = 0x40,
 	MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+	MHI_PKT_TYPE_SFR_CFG_CMD = 0x49,
 	MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
 	MHI_PKT_TYPE_STALE_EVENT, /* internal event */
 };
@@ -712,10 +725,18 @@ struct mhi_timesync {
 	enum MHI_EV_CCS ccs;
 	struct completion completion;
 	spinlock_t lock; /* list protection */
-	struct mutex lpm_mutex; /* lpm protection */
 	struct list_head head;
 };
 
+struct mhi_sfr_info {
+	void *buf_addr;
+	dma_addr_t dma_addr;
+	size_t len;
+	char *str;
+	enum MHI_EV_CCS ccs;
+	struct completion completion;
+};
+
 struct mhi_bus {
 	struct list_head controller_list;
 	struct mutex lock;
@@ -725,6 +746,8 @@ struct mhi_bus {
 #define MHI_TIMEOUT_MS (1000)
 extern struct mhi_bus mhi_bus;
 
+struct mhi_controller *find_mhi_controller_by_name(const char *name);
+
 /* debug fs related functions */
 int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
 int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
@@ -812,11 +835,14 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
 			      u32 *offset);
 void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
 int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
+int mhi_init_sfr(struct mhi_controller *mhi_cntrl);
 int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
 int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
 void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
 int mhi_early_notify_device(struct device *dev, void *data);
+void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl,
+			void __iomem *base, u32 offset, u32 val);
 
 /* timesync log support */
 static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
@@ -908,8 +934,11 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
 int mhi_dtr_init(void);
 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 		      struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 			struct mhi_chan *mhi_chan);
+void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl);
+void mhi_force_reg_write(struct mhi_controller *mhi_cntrl);
 
 /* isr handlers */
 irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
@@ -919,16 +948,16 @@ void mhi_ev_task(unsigned long data);
 
 #ifdef CONFIG_MHI_DEBUG
 
-#define MHI_ASSERT(cond, msg) do { \
+#define MHI_ASSERT(cond, fmt, ...) do { \
 	if (cond) \
-		panic(msg); \
+		panic(fmt); \
 } while (0)
 
 #else
 
-#define MHI_ASSERT(cond, msg) do { \
+#define MHI_ASSERT(cond, fmt, ...) do { \
 	if (cond) { \
-		MHI_ERR(msg); \
+		MHI_ERR(fmt); \
 		WARN_ON(cond); \
 	} \
 } while (0)
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 414fd85..e943dd7 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -14,6 +14,8 @@
 #include <linux/mhi.h>
 #include "mhi_internal.h"
 
+static char *mhi_generic_sfr = "unknown reason";
+
 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
 				    struct mhi_chan *mhi_chan);
 
@@ -89,6 +91,45 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
 	return -ENXIO;
 }
 
+void mhi_force_reg_write(struct mhi_controller *mhi_cntrl)
+{
+	if (mhi_cntrl->offload_wq)
+		flush_work(&mhi_cntrl->reg_write_work);
+}
+
+void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl)
+{
+	cancel_work_sync(&mhi_cntrl->reg_write_work);
+	memset(mhi_cntrl->reg_write_q, 0,
+	       sizeof(struct reg_write_info) * REG_WRITE_QUEUE_LEN);
+	mhi_cntrl->read_idx = 0;
+	atomic_set(&mhi_cntrl->write_idx, -1);
+}
+
+static void mhi_reg_write_enqueue(struct mhi_controller *mhi_cntrl,
+	void __iomem *reg_addr, u32 val)
+{
+	u32 q_index = atomic_inc_return(&mhi_cntrl->write_idx);
+
+	q_index = q_index & (REG_WRITE_QUEUE_LEN - 1);
+
+	MHI_ASSERT(mhi_cntrl->reg_write_q[q_index].valid, "queue full idx %d",
+			q_index);
+
+	mhi_cntrl->reg_write_q[q_index].reg_addr = reg_addr;
+	mhi_cntrl->reg_write_q[q_index].val = val;
+	mhi_cntrl->reg_write_q[q_index].valid = true;
+}
+
+void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl,
+		   void __iomem *base,
+		   u32 offset,
+		   u32 val)
+{
+	mhi_reg_write_enqueue(mhi_cntrl, base + offset, val);
+	queue_work(mhi_cntrl->offload_wq, &mhi_cntrl->reg_write_work);
+}
+
 void mhi_write_reg(struct mhi_controller *mhi_cntrl,
 		   void __iomem *base,
 		   u32 offset,
@@ -113,15 +154,15 @@ void mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
 
 	tmp &= ~mask;
 	tmp |= (val << shift);
-	mhi_write_reg(mhi_cntrl, base, offset, tmp);
+	mhi_cntrl->write_reg(mhi_cntrl, base, offset, tmp);
 }
 
 void mhi_write_db(struct mhi_controller *mhi_cntrl,
 		  void __iomem *db_addr,
 		  dma_addr_t wp)
 {
-	mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
-	mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
+	mhi_cntrl->write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
+	mhi_cntrl->write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
 }
 
 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
@@ -437,6 +478,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
 	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
 	struct mhi_buf_info *buf_info;
 	struct mhi_tre *mhi_tre;
+	bool ring_db = true;
+	int n_free_tre, n_queued_tre;
 
 	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
 		return -ENOMEM;
@@ -476,6 +519,18 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
 		mhi_tre->dword[0] =
 			MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base);
 		mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1;
+		/*
+		 * on RSC channel IPA HW has a minimum credit requirement before
+		 * switching to DB mode
+		 */
+		n_free_tre = mhi_get_no_free_descriptors(mhi_dev,
+				DMA_FROM_DEVICE);
+		n_queued_tre = tre_ring->elements - n_free_tre;
+		read_lock_bh(&mhi_chan->lock);
+		if (mhi_chan->db_cfg.db_mode &&
+				n_queued_tre < MHI_RSC_MIN_CREDITS)
+			ring_db = false;
+		read_unlock_bh(&mhi_chan->lock);
 	} else {
 		mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
 		mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
@@ -493,7 +548,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
 	if (mhi_chan->dir == DMA_TO_DEVICE)
 		atomic_inc(&mhi_cntrl->pending_pkts);
 
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) {
 		read_lock_bh(&mhi_chan->lock);
 		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 		read_unlock_bh(&mhi_chan->lock);
@@ -759,10 +814,12 @@ static const struct attribute_group mhi_tsync_group = {
 void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl)
 {
 	if (mhi_cntrl->mhi_tsync) {
+		mutex_lock(&mhi_cntrl->tsync_mutex);
 		sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj,
 				   &mhi_tsync_group);
 		kfree(mhi_cntrl->mhi_tsync);
 		mhi_cntrl->mhi_tsync = NULL;
+		mutex_unlock(&mhi_cntrl->tsync_mutex);
 	}
 }
 
@@ -908,6 +965,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
 	u32 ev_code;
 	struct mhi_result result;
 	unsigned long flags = 0;
+	bool ring_db = true;
+	int n_free_tre, n_queued_tre;
 
 	ev_code = MHI_TRE_GET_EV_CODE(event);
 	buf_ring = &mhi_chan->buf_ring;
@@ -1002,9 +1061,22 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
 
 		MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan);
 		mhi_chan->db_cfg.db_mode = true;
+
+		/*
+		 * on RSC channel IPA HW has a minimum credit requirement before
+		 * switching to DB mode
+		 */
+		if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
+			n_free_tre = mhi_get_no_free_descriptors(
+					mhi_chan->mhi_dev, DMA_FROM_DEVICE);
+			n_queued_tre = tre_ring->elements - n_free_tre;
+			if (n_queued_tre < MHI_RSC_MIN_CREDITS)
+				ring_db = false;
+		}
+
 		read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 		if (tre_ring->wp != tre_ring->rp &&
-		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+		    MHI_DB_ACCESS_VALID(mhi_cntrl) && ring_db) {
 			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 		}
 		read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
@@ -1047,7 +1119,12 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
 	xfer_len = MHI_TRE_GET_EV_LEN(event);
 
 	/* received out of bound cookie */
-	MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n");
+	if (cookie >= buf_ring->len) {
+		MHI_ERR("cookie 0x%08x bufring_len %zu", cookie, buf_ring->len);
+		MHI_ERR("Processing Event:0x%llx 0x%08x 0x%08x\n",
+			event->ptr, event->dword[0], event->dword[1]);
+		panic("invalid cookie");
+	}
 
 	buf_info = buf_ring->base + cookie;
 
@@ -1099,6 +1176,7 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
 	struct mhi_tre *cmd_pkt;
 	struct mhi_chan *mhi_chan;
 	struct mhi_timesync *mhi_tsync;
+	struct mhi_sfr_info *sfr_info;
 	enum mhi_cmd_type type;
 	u32 chan;
 
@@ -1109,17 +1187,25 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
 
 	type = MHI_TRE_GET_CMD_TYPE(cmd_pkt);
 
-	if (type == MHI_CMD_TYPE_TSYNC) {
+	switch (type) {
+	case MHI_CMD_TYPE_TSYNC:
 		mhi_tsync = mhi_cntrl->mhi_tsync;
 		mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre);
 		complete(&mhi_tsync->completion);
-	} else {
+		break;
+	case MHI_CMD_TYPE_SFR_CFG:
+		sfr_info = mhi_cntrl->mhi_sfr;
+		sfr_info->ccs = MHI_TRE_GET_EV_CODE(tre);
+		complete(&sfr_info->completion);
+		break;
+	default:
 		chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
 		mhi_chan = &mhi_cntrl->mhi_chan[chan];
 		write_lock_bh(&mhi_chan->lock);
 		mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
 		complete(&mhi_chan->completion);
 		write_unlock_bh(&mhi_chan->lock);
+		break;
 	}
 
 	mhi_del_ring_element(mhi_cntrl, mhi_ring);
@@ -1413,6 +1499,9 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 		goto exit_no_lock;
 	}
 
+	if (mhi_cntrl->need_force_m3 && !mhi_cntrl->force_m3_done)
+		goto exit_no_lock;
+
 	ret = __mhi_device_get_sync(mhi_cntrl);
 	if (ret)
 		goto exit_no_lock;
@@ -1469,7 +1558,7 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
-		mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+		mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
 			      MHI_BW_SCALE_RESULT(result,
 						  link_info.sequence_num));
 
@@ -1657,7 +1746,9 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
 	struct mhi_tre *cmd_tre = NULL;
 	struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
 	struct mhi_ring *ring = &mhi_cmd->ring;
-	int chan = 0;
+	struct mhi_sfr_info *sfr_info;
+	int chan = 0, ret = 0;
+	bool cmd_db_not_set = false;
 
 	MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n",
 		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
@@ -1697,6 +1788,14 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
 		cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1
 			(mhi_cntrl->mhi_tsync->er_index);
 		break;
+	case MHI_CMD_SFR_CFG:
+		sfr_info = mhi_cntrl->mhi_sfr;
+		cmd_tre->ptr = MHI_TRE_CMD_SFR_CFG_PTR
+						(sfr_info->dma_addr);
+		cmd_tre->dword[0] = MHI_TRE_CMD_SFR_CFG_DWORD0
+						(sfr_info->len - 1);
+		cmd_tre->dword[1] = MHI_TRE_CMD_SFR_CFG_DWORD1;
+		break;
 	}
 
 
@@ -1707,11 +1806,33 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
 	/* queue to hardware */
 	mhi_add_ring_element(mhi_cntrl, ring);
 	read_lock_bh(&mhi_cntrl->pm_lock);
+	/*
+	 * If elements are queued to the command ring and MHI state is
+	 * not M0 since MHI is in suspend or its in transition to M0, the DB
+	 * will not be rung. Under such condition give it enough time from
+	 * the apps to have the opportunity to resume so it can write the DB.
+	 */
 	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 		mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+	else
+		cmd_db_not_set = true;
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 	spin_unlock_bh(&mhi_cmd->lock);
 
+	if (cmd_db_not_set) {
+		ret = wait_event_timeout(mhi_cntrl->state_event,
+			MHI_DB_ACCESS_VALID(mhi_cntrl) ||
+			MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+			msecs_to_jiffies(MHI_RESUME_TIME));
+		if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+			MHI_ERR(
+				"Did not enter M0, cur_state:%s pm_state:%s\n",
+				TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+			return -EIO;
+		}
+	}
+
 	return 0;
 }
 
@@ -2348,16 +2469,17 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
 	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
 	int ret;
 
+	mutex_lock(&mhi_cntrl->tsync_mutex);
 	/* not all devices support time feature */
-	if (!mhi_tsync)
-		return -EIO;
+	if (!mhi_tsync) {
+		ret = -EIO;
+		goto err_unlock;
+	}
 
 	/* bring to M0 state */
 	ret = __mhi_device_get_sync(mhi_cntrl);
 	if (ret)
-		return ret;
-
-	mutex_lock(&mhi_tsync->lpm_mutex);
+		goto err_unlock;
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
@@ -2391,8 +2513,8 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
 error_invalid_state:
 	mhi_cntrl->wake_put(mhi_cntrl, false);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
-	mutex_unlock(&mhi_tsync->lpm_mutex);
-
+err_unlock:
+	mutex_unlock(&mhi_cntrl->tsync_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(mhi_get_remote_time_sync);
@@ -2420,13 +2542,16 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
 	int ret;
 
 	/* not all devices support time feature */
-	if (!mhi_tsync)
-		return -EIO;
+	mutex_lock(&mhi_cntrl->tsync_mutex);
+	if (!mhi_tsync) {
+		ret = -EIO;
+		goto err_unlock;
+	}
 
 	/* tsync db can only be rung in M0 state */
 	ret = __mhi_device_get_sync(mhi_cntrl);
 	if (ret)
-		return ret;
+		goto err_unlock;
 
 	/*
 	 * technically we can use GFP_KERNEL, but wants to avoid
@@ -2485,7 +2610,8 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	mhi_cntrl->wake_put(mhi_cntrl, false);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
-
+err_unlock:
+	mutex_unlock(&mhi_cntrl->tsync_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(mhi_get_remote_time);
@@ -2540,3 +2666,20 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
 	}
 }
 EXPORT_SYMBOL(mhi_debug_reg_dump);
+
+char *mhi_get_restart_reason(const char *name)
+{
+	struct mhi_controller *mhi_cntrl;
+	struct mhi_sfr_info *sfr_info;
+
+	mhi_cntrl = find_mhi_controller_by_name(name);
+	if (!mhi_cntrl)
+		return ERR_PTR(-ENODEV);
+
+	sfr_info = mhi_cntrl->mhi_sfr;
+	if (!sfr_info)
+		return ERR_PTR(-EINVAL);
+
+	return strlen(sfr_info->str) ? sfr_info->str : mhi_generic_sfr;
+}
+EXPORT_SYMBOL(mhi_get_restart_reason);
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 638174f..5a2cbd8 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -166,8 +166,8 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
 		mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
 				    MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
 	} else {
-		mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-			MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state);
+		mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+				(state << MHICTRL_MHISTATE_SHIFT));
 	}
 }
 
@@ -478,6 +478,12 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 
 	wake_up_all(&mhi_cntrl->state_event);
 
+	/* offload register write if supported */
+	if (mhi_cntrl->offload_wq) {
+		mhi_reset_reg_write_q(mhi_cntrl);
+		mhi_cntrl->write_reg = mhi_write_reg_offload;
+	}
+
 	/* force MHI to be in M0 state before continuing */
 	ret = __mhi_device_get_sync(mhi_cntrl);
 	if (ret)
@@ -512,7 +518,8 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
-	/* setup support for time sync */
+	/* setup support for additional features (SFR, timesync, etc.) */
+	mhi_init_sfr(mhi_cntrl);
 	mhi_init_timesync(mhi_cntrl);
 
 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
@@ -546,6 +553,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	struct mhi_cmd_ctxt *cmd_ctxt;
 	struct mhi_cmd *mhi_cmd;
 	struct mhi_event_ctxt *er_ctxt;
+	struct mhi_sfr_info *sfr_info = mhi_cntrl->mhi_sfr;
 	int ret, i;
 
 	MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n",
@@ -553,6 +561,12 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
 		to_mhi_pm_state_str(transition_state));
 
+	/* restore async write call back */
+	mhi_cntrl->write_reg = mhi_write_reg;
+
+	if (mhi_cntrl->offload_wq)
+		mhi_reset_reg_write_q(mhi_cntrl);
+
 	/* We must notify MHI control driver so it can clean up first */
 	if (transition_state == MHI_PM_SYS_ERR_PROCESS)
 		mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
@@ -606,7 +620,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		 * device cleares INTVEC as part of RESET processing,
 		 * re-program it
 		 */
-		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+		mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
 	}
 
 	MHI_LOG("Waiting for all pending event ring processing to complete\n");
@@ -629,9 +643,19 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 
 	MHI_LOG("Waiting for all pending threads to complete\n");
 	wake_up_all(&mhi_cntrl->state_event);
-	flush_work(&mhi_cntrl->fw_worker);
 	flush_work(&mhi_cntrl->low_priority_worker);
 
+	mhi_cntrl->force_m3_done = false;
+
+	if (sfr_info && sfr_info->buf_addr) {
+		mhi_free_coherent(mhi_cntrl, sfr_info->len, sfr_info->buf_addr,
+				  sfr_info->dma_addr);
+		sfr_info->buf_addr = NULL;
+	}
+
+	/* remove support for time sync */
+	mhi_destroy_timesync(mhi_cntrl);
+
 	mutex_lock(&mhi_cntrl->pm_mutex);
 
 	MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0");
@@ -666,9 +690,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		er_ctxt->wp = er_ctxt->rbase;
 	}
 
-	/* remove support for time sync */
-	mhi_destroy_timesync(mhi_cntrl);
-
 	if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
 		mhi_ready_state_transition(mhi_cntrl);
 	} else {
@@ -837,7 +858,7 @@ void mhi_pm_st_worker(struct work_struct *work)
 				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
 			write_unlock_irq(&mhi_cntrl->pm_lock);
 			if (MHI_IN_PBL(mhi_cntrl->ee))
-				wake_up_all(&mhi_cntrl->state_event);
+				mhi_fw_load_handler(mhi_cntrl);
 			break;
 		case MHI_ST_TRANSITION_SBL:
 			write_lock_irq(&mhi_cntrl->pm_lock);
@@ -929,7 +950,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 		mhi_cntrl->bhie = mhi_cntrl->regs + val;
 	}
 
-	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
 	mhi_cntrl->pm_state = MHI_PM_POR;
 	mhi_cntrl->ee = MHI_EE_MAX;
 	current_ee = mhi_get_exec_env(mhi_cntrl);
@@ -946,9 +967,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 	next_state = MHI_IN_PBL(current_ee) ?
 		MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY;
 
-	if (next_state == MHI_ST_TRANSITION_PBL)
-		schedule_work(&mhi_cntrl->fw_worker);
-
 	mhi_queue_state_transition(mhi_cntrl, next_state);
 
 	mhi_init_debugfs(mhi_cntrl);
@@ -977,11 +995,19 @@ EXPORT_SYMBOL(mhi_async_power_up);
 void mhi_control_error(struct mhi_controller *mhi_cntrl)
 {
 	enum MHI_PM_STATE cur_state, transition_state;
+	struct mhi_sfr_info *sfr_info = mhi_cntrl->mhi_sfr;
 
 	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 
+	/* copy subsystem failure reason string if supported */
+	if (sfr_info && sfr_info->buf_addr) {
+		memcpy(sfr_info->str, sfr_info->buf_addr, sfr_info->len);
+		pr_err("mhi: %s sfr: %s\n", mhi_cntrl->name,
+		       sfr_info->buf_addr);
+	}
+
 	/* link is not down if device is in RDDM */
 	transition_state = (mhi_cntrl->ee == MHI_EE_RDDM) ?
 		MHI_PM_DEVICE_ERR_DETECT : MHI_PM_LD_ERR_FATAL_DETECT;
@@ -998,6 +1024,8 @@ void mhi_control_error(struct mhi_controller *mhi_cntrl)
 		goto exit_control_error;
 	}
 
+	mhi_cntrl->dev_state = MHI_STATE_SYS_ERR;
+
 	/* notify waiters to bail out early since MHI has entered ERROR state */
 	wake_up_all(&mhi_cntrl->state_event);
 
@@ -1439,6 +1467,8 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 		mhi_trigger_resume(mhi_cntrl);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
+	mhi_force_reg_write(mhi_cntrl);
+
 	ret = wait_event_timeout(mhi_cntrl->state_event,
 				 mhi_cntrl->pm_state == MHI_PM_M0 ||
 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index 1749f48..275e4e2b 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -475,7 +475,7 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
 	struct mhi_net_chain *chain = mhi_netdev->chain;
 	int rx_work = 0;
 
-	MSG_VERB("Entered\n");
+	MSG_VERB("Entr:%d\n", budget);
 
 	rx_work = mhi_poll(mhi_dev, budget);
 
@@ -501,7 +501,7 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
 	if (rx_work < budget)
 		napi_complete(napi);
 
-	MSG_VERB("polled %d pkts\n", rx_work);
+	MSG_VERB("polled %d\n", rx_work);
 
 	return rx_work;
 }
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4a56171..4385e26 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -101,6 +101,10 @@
 #define SDSP_DOMAIN_ID (2)
 #define CDSP_DOMAIN_ID (3)
 
+/* ctxid of every message is OR-ed with fl->pd (0/1/2) before */
+/* it is sent to DSP. So mask 2 LSBs to retrieve actual context */
+#define CONTEXT_PD_CHECK (3)
+
 #define RH_CID ADSP_DOMAIN_ID
 
 #define PERF_KEYS \
@@ -111,6 +115,9 @@
 #define FASTRPC_STATIC_HANDLE_MAX (20)
 #define FASTRPC_LATENCY_CTRL_ENB  (1)
 
+/* Maximum PM timeout that can be voted through fastrpc*/
+#define MAX_PM_TIMEOUT_MS 50
+
 /* timeout in us for busy polling after early response from remote processor */
 #define FASTRPC_POLL_TIME (4000)
 
@@ -285,7 +292,6 @@ struct smq_invoke_ctx {
 	uint32_t earlyWakeTime;
 	/* work done status flag */
 	bool isWorkDone;
-	bool pm_awake_voted;
 };
 
 struct fastrpc_ctx_lst {
@@ -354,6 +360,8 @@ struct fastrpc_channel_ctx {
 	/* cpu capabilities shared to DSP */
 	uint64_t cpuinfo_todsp;
 	bool cpuinfo_status;
+	struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
+	spinlock_t ctxlock;
 };
 
 struct fastrpc_apps {
@@ -370,8 +378,6 @@ struct fastrpc_apps {
 	struct device *dev;
 	unsigned int latency;
 	int rpmsg_register;
-	spinlock_t ctxlock;
-	struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
 	bool legacy_remote_heap;
 	/* Unique job id for each message */
 	uint64_t jobid[NUM_CHANNELS];
@@ -460,6 +466,8 @@ struct fastrpc_file {
 	char *debug_buf;
 	/* Flag to enable PM wake/relax voting for every remote invoke */
 	int wake_enable;
+	struct wakeup_source *wake_source;
+	uint32_t ws_timeout;
 };
 
 static struct fastrpc_apps gfa;
@@ -532,8 +540,7 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
 static int hlosvm[1] = {VMID_HLOS};
 static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
-static void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted);
-static void fastrpc_pm_relax(bool *pm_awake_voted);
+static void fastrpc_pm_awake(struct fastrpc_file *fl);
 
 static inline int64_t getnstimediff(struct timespec *start)
 {
@@ -924,7 +931,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
 	int err = 0, vmid, sgl_index = 0;
 	struct scatterlist *sgl = NULL;
 
-	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
 	chan = &apps->channel[cid];
@@ -1303,11 +1310,12 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
 			 struct smq_invoke_ctx **po)
 {
 	struct fastrpc_apps *me = &gfa;
-	int err = 0, bufs, ii, size = 0;
+	int err = 0, bufs, ii, size = 0, cid = -1;
 	struct smq_invoke_ctx *ctx = NULL;
 	struct fastrpc_ctx_lst *clst = &fl->clst;
 	struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
-	int cid;
+	struct fastrpc_channel_ctx *chan = 0;
+	unsigned long irq_flags = 0;
 
 	bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
 	size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
@@ -1364,23 +1372,24 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
 	ctx->magic = FASTRPC_CTX_MAGIC;
 	ctx->rspFlags = NORMAL_RESPONSE;
 	ctx->isWorkDone = false;
-	ctx->pm_awake_voted = false;
 
 	spin_lock(&fl->hlock);
 	hlist_add_head(&ctx->hn, &clst->pending);
+	cid = (fl->cid >= ADSP_DOMAIN_ID && fl->cid < NUM_CHANNELS)
+			? fl->cid : 0;
+	chan = &me->channel[cid];
 	spin_unlock(&fl->hlock);
 
-	spin_lock(&me->ctxlock);
-	cid = (fl->cid >= 0 && fl->cid < NUM_CHANNELS) ? fl->cid : 0;
+	spin_lock_irqsave(&chan->ctxlock, irq_flags);
 	me->jobid[cid]++;
 	for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
-		if (!me->ctxtable[ii]) {
-			me->ctxtable[ii] = ctx;
+		if (!chan->ctxtable[ii]) {
+			chan->ctxtable[ii] = ctx;
 			ctx->ctxid = (me->jobid[cid] << 12) | (ii << 4);
 			break;
 		}
 	}
-	spin_unlock(&me->ctxlock);
+	spin_unlock_irqrestore(&chan->ctxlock, irq_flags);
 	VERIFY(err, ii < FASTRPC_CTX_MAX);
 	if (err) {
 		pr_err("adsprpc: out of context memory\n");
@@ -1411,15 +1420,18 @@ static void context_free(struct smq_invoke_ctx *ctx)
 	struct fastrpc_apps *me = &gfa;
 	int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
 		    REMOTE_SCALARS_OUTBUFS(ctx->sc);
+	int cid = ctx->fl->cid;
+	struct fastrpc_channel_ctx *chan = &me->channel[cid];
+	unsigned long irq_flags = 0;
 
-	spin_lock(&me->ctxlock);
+	spin_lock_irqsave(&chan->ctxlock, irq_flags);
 	for (i = 0; i < FASTRPC_CTX_MAX; i++) {
-		if (me->ctxtable[i] == ctx) {
-			me->ctxtable[i] = NULL;
+		if (chan->ctxtable[i] == ctx) {
+			chan->ctxtable[i] = NULL;
 			break;
 		}
 	}
-	spin_unlock(&me->ctxlock);
+	spin_unlock_irqrestore(&chan->ctxlock, irq_flags);
 
 	spin_lock(&ctx->fl->hlock);
 	hlist_del_init(&ctx->hn);
@@ -1444,7 +1456,7 @@ static void context_free(struct smq_invoke_ctx *ctx)
 static void context_notify_user(struct smq_invoke_ctx *ctx,
 		int retval, uint32_t rspFlags, uint32_t earlyWakeTime)
 {
-	fastrpc_pm_awake(ctx->fl->wake_enable, &ctx->pm_awake_voted);
+	fastrpc_pm_awake(ctx->fl);
 	ctx->retval = retval;
 	switch (rspFlags) {
 	case NORMAL_RESPONSE:
@@ -1647,10 +1659,12 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 		mutex_lock(&ctx->fl->map_mutex);
 		if (ctx->fds && (ctx->fds[i] != -1))
-			fastrpc_mmap_create(ctx->fl, ctx->fds[i],
+			err = fastrpc_mmap_create(ctx->fl, ctx->fds[i],
 					ctx->attrs[i], buf, len,
 					mflags, &ctx->maps[i]);
 		mutex_unlock(&ctx->fl->map_mutex);
+		if (err)
+			goto bail;
 		ipage += 1;
 	}
 	PERF_END);
@@ -1661,9 +1675,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 
 		if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
 			dmaflags = FASTRPC_DMAHANDLE_NOMAP;
-		VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
-				FASTRPC_ATTR_NOVA, 0, 0, dmaflags,
-				&ctx->maps[i]));
+		if (ctx->fds && (ctx->fds[i] != -1))
+			err = fastrpc_mmap_create(ctx->fl, ctx->fds[i],
+					FASTRPC_ATTR_NOVA, 0, 0, dmaflags,
+					&ctx->maps[i]);
 		if (err) {
 			mutex_unlock(&ctx->fl->map_mutex);
 			goto bail;
@@ -1838,6 +1853,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 			continue;
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
+		if (map && (map->attr & FASTRPC_ATTR_FORCE_NOFLUSH))
+			continue;
 
 		if (rpra && rpra[i].buf.len &&
 			ctx->overps[oix]->mstart) {
@@ -1943,6 +1960,8 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
 			continue;
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
+		if (map && (map->attr & FASTRPC_ATTR_FORCE_NOINVALIDATE))
+			continue;
 
 		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
 				buf_page_start(rpra[i].buf.pv))
@@ -1997,6 +2016,8 @@ static void inv_args(struct smq_invoke_ctx *ctx)
 			continue;
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
+		if (map && (map->attr & FASTRPC_ATTR_FORCE_NOINVALIDATE))
+			continue;
 
 		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
 				buf_page_start(rpra[i].buf.pv)) {
@@ -2069,7 +2090,6 @@ static void fastrpc_init(struct fastrpc_apps *me)
 	INIT_HLIST_HEAD(&me->drivers);
 	INIT_HLIST_HEAD(&me->maps);
 	spin_lock_init(&me->hlock);
-	spin_lock_init(&me->ctxlock);
 	me->channel = &gcinfo[0];
 	for (i = 0; i < NUM_CHANNELS; i++) {
 		init_completion(&me->channel[i].work);
@@ -2079,29 +2099,21 @@ static void fastrpc_init(struct fastrpc_apps *me)
 		me->channel[i].secure = SECURE_CHANNEL;
 		mutex_init(&me->channel[i].smd_mutex);
 		mutex_init(&me->channel[i].rpmsg_mutex);
+		spin_lock_init(&me->channel[i].ctxlock);
 	}
 	/* Set CDSP channel to non secure */
 	me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
 }
 
-static inline void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted)
+static inline void fastrpc_pm_awake(struct fastrpc_file *fl)
 {
-	struct fastrpc_apps *me = &gfa;
-
-	if (!fl_wake_enable || *pm_awake_voted)
+	if (!fl->wake_enable || !fl->wake_source)
 		return;
-	__pm_stay_awake(me->wake_source);
-	*pm_awake_voted = true;
-}
-
-static inline void fastrpc_pm_relax(bool *pm_awake_voted)
-{
-	struct fastrpc_apps *me = &gfa;
-
-	if (!(*pm_awake_voted))
-		return;
-	__pm_relax(me->wake_source);
-	*pm_awake_voted = false;
+	/*
+	 * Vote with PM to abort any suspend in progress and
+	 * keep system awake for specified timeout
+	 */
+	pm_wakeup_ws_event(fl->wake_source, fl->ws_timeout, true);
 }
 
 static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx,
@@ -2223,9 +2235,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	int err = 0, interrupted = 0, cid = fl->cid;
 	struct timespec invoket = {0};
 	int64_t *perf_counter = NULL;
-	bool pm_awake_voted = false;
 
-	fastrpc_pm_awake(fl->wake_enable, &pm_awake_voted);
 	if (fl->profile) {
 		perf_counter = getperfcounter(fl, PERF_COUNT);
 		getnstimeofday(&invoket);
@@ -2243,7 +2253,8 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 		}
 	}
 
-	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS && fl->sctx != NULL);
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS &&
+		fl->sctx != NULL);
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
 			__func__, current->comm);
@@ -2294,9 +2305,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (err)
 		goto bail;
  wait:
-	fastrpc_pm_relax(&pm_awake_voted);
 	fastrpc_wait_for_completion(ctx, &interrupted, kernel);
-	pm_awake_voted = ctx->pm_awake_voted;
 	VERIFY(err, 0 == (err = interrupted));
 	if (err)
 		goto bail;
@@ -2335,7 +2344,6 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (fl->profile && !interrupted)
 		fastrpc_update_invoke_count(invoke->handle, perf_counter,
 						&invoket);
-	fastrpc_pm_relax(&pm_awake_voted);
 	return err;
 }
 
@@ -2658,7 +2666,7 @@ static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl)
 	struct fastrpc_ioctl_invoke_crc ioctl;
 	remote_arg_t ra[2];
 
-	VERIFY(err, fl && fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	VERIFY(err, fl && fl->cid >= ADSP_DOMAIN_ID && fl->cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
 
@@ -2808,7 +2816,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
 	remote_arg_t ra[1];
 	int tgid = 0;
 
-	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	VERIFY(err, fl->cid >= ADSP_DOMAIN_ID && fl->cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
 	VERIFY(err, fl->sctx != NULL);
@@ -3348,7 +3356,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
 		return -EINVAL;
 
 	cid = get_cid_from_rpdev(rpdev);
-	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
 	mutex_lock(&gcinfo[cid].rpmsg_mutex);
@@ -3387,7 +3395,7 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
 		return;
 
 	cid = get_cid_from_rpdev(rpdev);
-	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
 	if (err)
 		goto bail;
 	mutex_lock(&gcinfo[cid].rpmsg_mutex);
@@ -3409,8 +3417,17 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 	struct smq_invoke_rspv2 *rspv2 = NULL;
 	struct fastrpc_apps *me = &gfa;
 	uint32_t index, rspFlags = 0, earlyWakeTime = 0;
-	int err = 0, cid = -1;
+	int err = 0, cid;
+	struct fastrpc_channel_ctx *chan = 0;
+	unsigned long irq_flags = 0;
+	bool is_ctxtable_locked = false;
 
+	cid = get_cid_from_rpdev(rpdev);
+	VERIFY(err, (cid >= ADSP_DOMAIN_ID && cid <= NUM_CHANNELS));
+	if (err)
+		goto bail;
+
+	chan = &me->channel[cid];
 	VERIFY(err, (rsp && len >= sizeof(*rsp)));
 	if (err)
 		goto bail;
@@ -3422,11 +3439,10 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 		earlyWakeTime = rspv2->earlyWakeTime;
 		rspFlags = rspv2->flags;
 	}
-	cid = get_cid_from_rpdev(rpdev);
 	trace_fastrpc_rpmsg_response(cid, rsp->ctx,
 		rsp->retval, rspFlags, earlyWakeTime);
 #if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
-	if (cid >= 0 && cid < NUM_CHANNELS) {
+	if (cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS) {
 		LOG_FASTRPC_GLINK_MSG(gcinfo[cid].ipc_log_ctx,
 		"recvd pkt %pK (sz %d): ctx 0x%llx, retVal %d, flags %u, earlyWake %u",
 		data, len, rsp->ctx, rsp->retval, rspFlags, earlyWakeTime);
@@ -3438,12 +3454,18 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 	if (err)
 		goto bail;
 
-	VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
+	if (rspFlags == COMPLETE_SIGNAL) {
+		spin_lock_irqsave(&chan->ctxlock, irq_flags);
+		is_ctxtable_locked = true;
+	}
+	VERIFY(err, !IS_ERR_OR_NULL(chan->ctxtable[index]));
 	if (err)
 		goto bail;
 
-	VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
-		me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
+	VERIFY(err, ((chan->ctxtable[index]->ctxid ==
+		(rsp->ctx & ~CONTEXT_PD_CHECK)) &&
+			chan->ctxtable[index]->magic ==
+				FASTRPC_CTX_MAGIC));
 	if (err)
 		goto bail;
 
@@ -3452,12 +3474,15 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
 		if (err)
 			goto bail;
 	}
-	context_notify_user(me->ctxtable[index], rsp->retval,
+	context_notify_user(chan->ctxtable[index], rsp->retval,
 				 rspFlags, earlyWakeTime);
 bail:
+	if (rspFlags == COMPLETE_SIGNAL && is_ctxtable_locked)
+		spin_unlock_irqrestore(&chan->ctxlock, irq_flags);
 	if (err)
 		pr_err("adsprpc: ERROR: %s: invalid response (data %pK, len %d) from remote subsystem (err %d)\n",
 				__func__, data, len, err);
+
 	return err;
 }
 
@@ -3497,6 +3522,8 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
 	spin_lock(&fl->apps->hlock);
 	hlist_del_init(&fl->hn);
 	spin_unlock(&fl->apps->hlock);
+	if (fl->wake_source)
+		wakeup_source_unregister(fl->wake_source);
 	kfree(fl->debug_buf);
 
 	if (!fl->sctx) {
@@ -3884,6 +3911,11 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
 	debugfs_file = debugfs_create_file(fl->debug_buf, 0644, debugfs_root,
 						fl, &debugfs_fops);
 
+	fl->wake_source = wakeup_source_register(fl->debug_buf);
+	if (IS_ERR_OR_NULL(fl->wake_source)) {
+		pr_err("adsprpc: Error: %s: %s: wakeup_source_register failed with err %ld\n",
+			current->comm, __func__, PTR_ERR(fl->wake_source));
+	}
 	context_list_ctor(&fl->clst);
 	spin_lock_init(&fl->hlock);
 	INIT_HLIST_HEAD(&fl->maps);
@@ -4000,8 +4032,26 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
 		cp->kalloc.kalloc_support = 1;
 		break;
 	case FASTRPC_CONTROL_WAKELOCK:
+		if (fl->dev_minor != MINOR_NUM_SECURE_DEV) {
+			pr_err("adsprpc: %s: %s: PM voting not allowed for non-secure device node %d\n",
+				current->comm, __func__, fl->dev_minor);
+			err = -EPERM;
+			goto bail;
+		}
 		fl->wake_enable = cp->wp.enable;
 		break;
+	case FASTRPC_CONTROL_PM:
+		if (!fl->wake_enable) {
+			/* Kernel PM voting not requested by this application */
+			err = -EACCES;
+			goto bail;
+		}
+		if (cp->pm.timeout > MAX_PM_TIMEOUT_MS)
+			fl->ws_timeout = MAX_PM_TIMEOUT_MS;
+		else
+			fl->ws_timeout = cp->pm.timeout;
+		fastrpc_pm_awake(fl);
+		break;
 	default:
 		err = -EBADRQC;
 		break;
@@ -4086,8 +4136,9 @@ static int fastrpc_getperf(struct fastrpc_ioctl_perf *ioctl_perf,
 		mutex_unlock(&fl->perf_mutex);
 
 		if (fperf) {
-			K_COPY_TO_USER(err, 0, (void *)ioctl_perf->data,
-				fperf, sizeof(*fperf));
+			K_COPY_TO_USER(err, 0,
+				(void *)ioctl_perf->data, fperf,
+				sizeof(*fperf) - sizeof(struct hlist_node));
 		}
 	}
 	K_COPY_TO_USER(err, 0, param, ioctl_perf, sizeof(*ioctl_perf));
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 8c01385..6ce6630 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -130,11 +130,21 @@ struct compat_fastrpc_ctrl_kalloc {
 	compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
 };
 
+struct compat_fastrpc_ctrl_wakelock {
+	compat_uint_t enable;	/* wakelock control enable */
+};
+
+struct compat_fastrpc_ctrl_pm {
+	compat_uint_t timeout;	/* timeout(in ms) for PM to keep system awake*/
+};
+
 struct compat_fastrpc_ioctl_control {
 	compat_uint_t req;
 	union {
 		struct compat_fastrpc_ctrl_latency lp;
 		struct compat_fastrpc_ctrl_kalloc kalloc;
+		struct compat_fastrpc_ctrl_wakelock wp;
+		struct compat_fastrpc_ctrl_pm pm;
 	};
 };
 
@@ -336,6 +346,12 @@ static int compat_get_fastrpc_ioctl_control(
 		err |= put_user(p, &ctrl->lp.enable);
 		err |= get_user(p, &ctrl32->lp.latency);
 		err |= put_user(p, &ctrl->lp.latency);
+	} else if (p == FASTRPC_CONTROL_WAKELOCK) {
+		err |= get_user(p, &ctrl32->wp.enable);
+		err |= put_user(p, &ctrl->wp.enable);
+	} else if (p == FASTRPC_CONTROL_PM) {
+		err |= get_user(p, &ctrl32->pm.timeout);
+		err |= put_user(p, &ctrl->pm.timeout);
 	}
 
 	return err;
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index c1e5af9..bcc63c8 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -46,6 +46,16 @@
 /* Fastrpc attribute for no mapping of fd  */
 #define FASTRPC_ATTR_NOMAP (16)
 
+/*
+ * Fastrpc attribute to skip flush by fastrpc
+ */
+#define FASTRPC_ATTR_FORCE_NOFLUSH  (32)
+
+/*
+ * Fastrpc attribute to skip invalidate by fastrpc
+ */
+#define FASTRPC_ATTR_FORCE_NOINVALIDATE (64)
+
 /* Driver should operate in parallel with the co-processor */
 #define FASTRPC_MODE_PARALLEL    0
 
@@ -239,6 +249,7 @@ enum fastrpc_control_type {
 	FASTRPC_CONTROL_SMMU		=	2,
 	FASTRPC_CONTROL_KALLOC		=	3,
 	FASTRPC_CONTROL_WAKELOCK	=	4,
+	FASTRPC_CONTROL_PM		=	5,
 };
 
 struct fastrpc_ctrl_latency {
@@ -254,12 +265,17 @@ struct fastrpc_ctrl_wakelock {
 	uint32_t enable;	/* wakelock control enable */
 };
 
+struct fastrpc_ctrl_pm {
+	uint32_t timeout;	/* timeout(in ms) for PM to keep system awake*/
+};
+
 struct fastrpc_ioctl_control {
 	uint32_t req;
 	union {
 		struct fastrpc_ctrl_latency lp;
 		struct fastrpc_ctrl_kalloc kalloc;
 		struct fastrpc_ctrl_wakelock wp;
+		struct fastrpc_ctrl_pm pm;
 	};
 };
 
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index dba4802..968772a 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -3503,6 +3503,7 @@ static int __diag_multisim_mask_init(struct diag_mask_info *mask_info,
 		int mask_len, int subid_index)
 {
 	struct diag_multisim_masks *temp = NULL;
+	struct diag_multisim_masks *ms_ptr = NULL;
 
 	if (!mask_info || mask_len <= 0 || subid_index < 0)
 		return -EINVAL;
@@ -3522,9 +3523,10 @@ static int __diag_multisim_mask_init(struct diag_mask_info *mask_info,
 		temp->next = NULL;
 
 		if (mask_info->ms_ptr) {
-			while (mask_info->ms_ptr->next)
-				mask_info->ms_ptr = mask_info->ms_ptr->next;
-			mask_info->ms_ptr->next = temp;
+			ms_ptr = mask_info->ms_ptr;
+			while (ms_ptr->next)
+				ms_ptr = ms_ptr->next;
+			ms_ptr->next = temp;
 		} else {
 			mask_info->ms_ptr = temp;
 		}
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
index 1036ab7..5259451 100644
--- a/drivers/char/diag/diag_masks.h
+++ b/drivers/char/diag/diag_masks.h
@@ -163,7 +163,7 @@ extern struct diag_mask_info msg_bt_mask;
 extern struct diag_mask_info log_mask;
 extern struct diag_mask_info event_mask;
 
-#define MAX_SIM_NUM 7
+#define MAX_SIM_NUM 2
 #define INVALID_INDEX -1
 #define LEGACY_MASK_CMD 0
 #define SUBID_CMD 1
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index a0e664c..110e916 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -222,6 +222,9 @@
 #define DEFAULT_LOW_WM_VAL	15
 #define DEFAULT_HIGH_WM_VAL	85
 
+#define HDLC_CTXT		1
+#define NON_HDLC_CTXT	2
+
 #define TYPE_DATA		0
 #define TYPE_CNTL		1
 #define TYPE_DCI		2
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 31769e7..2237736 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -253,18 +253,20 @@ void check_drain_timer(void)
 	}
 }
 
-void diag_add_client(int i, struct file *file)
+static int diag_add_client(int i, struct file *file)
 {
-	struct diagchar_priv *diagpriv_data;
+	struct diagchar_priv *diagpriv_data = NULL;
 
-	driver->client_map[i].pid = current->tgid;
 	diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
 							GFP_KERNEL);
-	if (diagpriv_data)
-		diagpriv_data->pid = current->tgid;
+	if (!diagpriv_data)
+		return -ENOMEM;
+	driver->client_map[i].pid = current->tgid;
+	diagpriv_data->pid = current->tgid;
 	file->private_data = diagpriv_data;
 	strlcpy(driver->client_map[i].name, current->comm, 20);
 	driver->client_map[i].name[19] = '\0';
+	return 0;
 }
 
 static void diag_mempool_init(void)
@@ -300,18 +302,31 @@ static void diag_mempool_exit(void)
 
 static int diagchar_open(struct inode *inode, struct file *file)
 {
-	int i = 0;
+	int i = 0, ret = 0;
 	void *temp;
 
 	if (driver) {
 		mutex_lock(&driver->diagchar_mutex);
 
+		for (i = 0; i < driver->num_clients; i++) {
+			if (driver->client_map[i].pid == current->tgid) {
+				pr_err_ratelimited("diag: Client already present current->tgid: %d\n",
+					current->tgid);
+				mutex_unlock(&driver->diagchar_mutex);
+				return -EEXIST;
+			}
+		}
 		for (i = 0; i < driver->num_clients; i++)
 			if (driver->client_map[i].pid == 0)
 				break;
 
 		if (i < driver->num_clients) {
-			diag_add_client(i, file);
+			ret = diag_add_client(i, file);
+			if (ret < 0) {
+				mutex_unlock(&driver->diagchar_mutex);
+				pr_err_ratelimited("diag: Insufficient memory for adding new client\n");
+				return ret;
+			}
 		} else {
 			if (i < THRESHOLD_CLIENT_LIMIT) {
 				driver->num_clients++;
@@ -329,7 +344,9 @@ static int diagchar_open(struct inode *inode, struct file *file)
 					goto fail;
 				else
 					driver->data_ready = temp;
-				diag_add_client(i, file);
+				ret = diag_add_client(i, file);
+				if (ret < 0)
+					goto fail;
 			} else {
 				mutex_unlock(&driver->diagchar_mutex);
 				pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
@@ -366,7 +383,7 @@ static int diagchar_open(struct inode *inode, struct file *file)
 fail:
 	driver->num_clients--;
 	mutex_unlock(&driver->diagchar_mutex);
-	pr_err_ratelimited("diag: Insufficient memory for new client");
+	pr_err_ratelimited("diag: Insufficient memory for new client\n");
 	return -ENOMEM;
 }
 
@@ -520,7 +537,19 @@ static int diag_remove_client_entry(struct file *file)
 	}
 
 	diagpriv_data = file->private_data;
-
+	for (i = 0; i < driver->num_clients; i++)
+		if (diagpriv_data && diagpriv_data->pid ==
+			driver->client_map[i].pid)
+			break;
+	if (i == driver->num_clients) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"pid %d, not present in client map\n",
+			diagpriv_data->pid);
+		mutex_unlock(&driver->diag_file_mutex);
+		return -EINVAL;
+	}
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+		driver->client_map[i].name, diagpriv_data->pid);
 	/*
 	 * clean up any DCI registrations, if this is a DCI client
 	 * This will specially help in case of ungraceful exit of any DCI client
@@ -528,32 +557,27 @@ static int diag_remove_client_entry(struct file *file)
 	 */
 	mutex_lock(&driver->dci_mutex);
 	do {
-		dci_entry = dci_lookup_client_entry_pid(current->tgid);
+		dci_entry = dci_lookup_client_entry_pid(diagpriv_data->pid);
 		if (dci_entry)
 			diag_dci_deinit_client(dci_entry);
 	} while (dci_entry);
 	mutex_unlock(&driver->dci_mutex);
 
-	diag_close_logging_process(current->tgid);
+	diag_close_logging_process(diagpriv_data->pid);
 
 	/* Delete the pkt response table entry for the exiting process */
-	diag_cmd_remove_reg_by_pid(current->tgid);
+	diag_cmd_remove_reg_by_pid(diagpriv_data->pid);
 
 	mutex_lock(&driver->diagchar_mutex);
 	driver->ref_count--;
 	if (driver->ref_count == 0)
 		diag_mempool_exit();
 
-	for (i = 0; i < driver->num_clients; i++) {
-		if (diagpriv_data && diagpriv_data->pid ==
-						driver->client_map[i].pid) {
-			driver->client_map[i].pid = 0;
-			kfree(diagpriv_data);
-			diagpriv_data = NULL;
-			file->private_data = 0;
-			break;
-		}
-	}
+	driver->client_map[i].pid = 0;
+	kfree(diagpriv_data);
+	diagpriv_data = NULL;
+	file->private_data = 0;
+
 	mutex_unlock(&driver->diagchar_mutex);
 	mutex_unlock(&driver->diag_file_mutex);
 	return 0;
@@ -562,8 +586,6 @@ static int diagchar_close(struct inode *inode, struct file *file)
 {
 	int ret;
 
-	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
-		current->comm, current->tgid);
 	ret = diag_remove_client_entry(file);
 
 	return ret;
@@ -3623,9 +3645,12 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	int proc = 0;
 
 	mutex_lock(&driver->diagchar_mutex);
-	for (i = 0; i < driver->num_clients; i++)
-		if (driver->client_map[i].pid == current->tgid)
+	for (i = 0; i < driver->num_clients; i++) {
+		if (driver->client_map[i].pid == current->tgid) {
 			index = i;
+			break;
+		}
+	}
 	mutex_unlock(&driver->diagchar_mutex);
 
 	if (index == -1) {
@@ -4390,10 +4415,12 @@ static int __init diagchar_init(void)
 	driver->in_busy_dcipktdata = 0;
 	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, TYPE_CMD);
 	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	hdlc_data.ctxt |= SET_HDLC_CTXT(HDLC_CTXT);
 	hdlc_data.len = 0;
 	hdlc_data.allocated = 0;
 	hdlc_data.flushed = 0;
 	non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	non_hdlc_data.ctxt |= SET_HDLC_CTXT(NON_HDLC_CTXT);
 	non_hdlc_data.len = 0;
 	non_hdlc_data.allocated = 0;
 	non_hdlc_data.flushed = 0;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index fe216df..a337b63 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1915,9 +1915,8 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
 				  int ctxt)
 {
 	unsigned long flags;
-	int peripheral = -1;
-	int type = -1;
-	int num = -1;
+	int peripheral = -1, type = -1;
+	int num = -1, hdlc_ctxt = -1;
 	struct diag_apps_data_t *temp = NULL;
 
 	if (!buf || len < 0)
@@ -1937,16 +1936,19 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
 			diag_ws_on_copy(DIAG_WS_MUX);
 		} else if (peripheral == APPS_DATA) {
 			spin_lock_irqsave(&driver->diagmem_lock, flags);
-			if (hdlc_data.allocated)
+			hdlc_ctxt = GET_HDLC_CTXT(buf_ctxt);
+			if ((hdlc_ctxt == HDLC_CTXT) && hdlc_data.allocated)
 				temp = &hdlc_data;
-			else if (non_hdlc_data.allocated)
+			else if ((hdlc_ctxt == NON_HDLC_CTXT) &&
+				non_hdlc_data.allocated)
 				temp = &non_hdlc_data;
 			else
 				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 				"No apps data buffer is allocated to be freed\n");
 			if (temp) {
 				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-				"Freeing Apps data buffer after write done hdlc.allocated: %d, non_hdlc.allocated: %d\n",
+				"Freeing Apps data buffer after write done hdlc_ctxt: %d, hdlc.allocated: %d, non_hdlc.allocated: %d\n",
+				hdlc_ctxt,
 				hdlc_data.allocated, non_hdlc_data.allocated);
 				diagmem_free(driver, temp->buf, POOL_TYPE_HDLC);
 				temp->buf = NULL;
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 502be0f..fd79491 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGFWD_H
@@ -17,6 +17,9 @@
 #define GET_BUF_NUM(n)		((n & 0x0000FF))
 #define GET_PD_CTXT(u)		((u & 0xFF000000) >> 24)
 
+#define SET_HDLC_CTXT(u)	((u & 0xFF) << 24)
+#define GET_HDLC_CTXT(u)	((u & 0xFF000000) >> 24)
+
 #define CHK_OVERFLOW(bufStart, start, end, length) \
 	((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
 
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 4937d0b..63def16 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -596,6 +596,7 @@ static void socket_read_work_fn(struct work_struct *work)
 	struct diag_socket_info *info = container_of(work,
 						     struct diag_socket_info,
 						     read_work);
+	struct diagfwd_info *fwd_info;
 
 	if (!info) {
 		diag_ws_release();
@@ -616,8 +617,9 @@ static void socket_read_work_fn(struct work_struct *work)
 		diag_ws_release();
 		return;
 	}
-
-	if (!info->fwd_ctxt && info->port_type == PORT_TYPE_SERVER)
+	fwd_info = info->fwd_ctxt;
+	if (info->port_type == PORT_TYPE_SERVER &&
+		(!fwd_info || !atomic_read(&fwd_info->opened)))
 		diag_socket_drop_data(info);
 
 	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
index 826260e..cdc4575 100644
--- a/drivers/char/rdbg.c
+++ b/drivers/char/rdbg.c
@@ -137,11 +137,10 @@ struct rdbg_device {
 
 int registers[32] = {0};
 static struct rdbg_device g_rdbg_instance = {
-	{ {0} },
-	NULL,
-	0,
-	SMP2P_NUM_PROCS,
-	NULL
+	.class = NULL,
+	.dev_no = 0,
+	.num_devices = SMP2P_NUM_PROCS,
+	.rdbg_data = NULL,
 };
 
 struct processor_specific_info {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index cd46b1d..1def686 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -441,3 +441,22 @@
 	  Support for the debug clock controller on Qualcomm Technologies, Inc
 	  BENGAL devices.
 	  Say Y if you want to support the clock measurement functionality.
+
+config SDM_GCC_LAGOON
+	tristate "LAGOON Global Clock Controller"
+	depends on COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  Support for the global clock controller on Qualcomm Technologies, Inc
+	  LAGOON devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI, I2C,
+	  USB, UFS, SD/eMMC, etc.
+
+config SDM_VIDEOCC_LAGOON
+	tristate "LAGOON Video Clock Controller"
+	select SDM_GCC_LAGOON
+	help
+	  Support for the video clock controller on Qualcomm Technologies, Inc.
+	  LAGOON devices.
+	  Say Y if you want to support video devices and functionality such as
+	  video encode/decode.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index dbf60b9..3dc03c5 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -51,7 +51,9 @@
 obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_GCC_LAGOON) += gcc-lagoon.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_LAGOON) += videocc-lagoon.o
 obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
 obj-$(CONFIG_SM_DEBUGCC_BENGAL) += debugcc-bengal.o
 obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4ac844f..201685c 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -94,6 +94,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
 	},
 	[CLK_ALPHA_PLL_TYPE_FABIA] =  {
 		[PLL_OFF_L_VAL] = 0x04,
+		[PLL_OFF_CAL_L_VAL] = 0x08,
 		[PLL_OFF_USER_CTL] = 0x0c,
 		[PLL_OFF_USER_CTL_U] = 0x10,
 		[PLL_OFF_CONFIG_CTL] = 0x14,
@@ -133,7 +134,16 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
 		[PLL_OFF_OPMODE] = 0x28,
 		[PLL_OFF_STATUS] = 0x38,
 	},
-
+	[CLK_ALPHA_PLL_TYPE_AGERA] =  {
+		[PLL_OFF_L_VAL] = 0x04,
+		[PLL_OFF_ALPHA_VAL] = 0x08,
+		[PLL_OFF_USER_CTL] = 0x0c,
+		[PLL_OFF_CONFIG_CTL] = 0x10,
+		[PLL_OFF_CONFIG_CTL_U] = 0x14,
+		[PLL_OFF_TEST_CTL] = 0x18,
+		[PLL_OFF_TEST_CTL_U] = 0x1c,
+		[PLL_OFF_STATUS] = 0x2c,
+	},
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
 
@@ -167,6 +177,9 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
 #define ZONDA_STAY_IN_CFA	BIT(16)
 #define ZONDA_PLL_FREQ_LOCK_DET	BIT(29)
 
+/* FABIA PLL specific settings */
+#define FABIA_PLL_CAL_VAL	0x3F
+
 #define pll_alpha_width(p)					\
 		((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ?	\
 				 ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
@@ -1315,14 +1328,24 @@ const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
 
-void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+int clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 			     const struct alpha_pll_config *config)
 {
 	u32 val, mask;
 
+	if (!config) {
+		pr_err("PLL configuration missing.\n");
+		return -EINVAL;
+	}
+
 	if (config->l)
 		regmap_write(regmap, PLL_L_VAL(pll), config->l);
 
+	if (config->cal_l)
+		regmap_write(regmap, PLL_CAL_L_VAL(pll), config->cal_l);
+	else
+		regmap_write(regmap, PLL_CAL_L_VAL(pll), FABIA_PLL_CAL_VAL);
+
 	if (config->alpha)
 		regmap_write(regmap, PLL_FRAC(pll), config->alpha);
 
@@ -1330,6 +1353,26 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 		regmap_write(regmap, PLL_CONFIG_CTL(pll),
 						config->config_ctl_val);
 
+	if (config->config_ctl_hi_val)
+		regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+					config->config_ctl_hi_val);
+
+	if (config->user_ctl_val)
+		regmap_write(regmap, PLL_USER_CTL(pll),
+					config->user_ctl_val);
+
+	if (config->user_ctl_hi_val)
+		regmap_write(regmap, PLL_USER_CTL_U(pll),
+					config->user_ctl_hi_val);
+
+	if (config->test_ctl_val)
+		regmap_write(regmap, PLL_TEST_CTL(pll),
+					config->test_ctl_val);
+
+	if (config->test_ctl_hi_val)
+		regmap_write(regmap, PLL_TEST_CTL_U(pll),
+					config->test_ctl_hi_val);
+
 	if (config->post_div_mask) {
 		mask = config->post_div_mask;
 		val = config->post_div_val;
@@ -1340,6 +1383,7 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 							PLL_UPDATE_BYPASS);
 
 	regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
 
@@ -1347,7 +1391,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
 {
 	int ret;
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-	u32 val, opmode_val;
+	u32 val, opmode_val, l_val, cal_val;
 	struct regmap *regmap = pll->clkr.regmap;
 
 	ret = regmap_read(regmap, PLL_MODE(pll), &val);
@@ -1370,6 +1414,24 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
 	if ((opmode_val & PLL_OPMODE_RUN) && (val & PLL_OUTCTRL))
 		return 0;
 
+	ret = regmap_read(regmap, PLL_L_VAL(pll), &l_val);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(regmap, PLL_CAL_L_VAL(pll), &cal_val);
+	if (ret)
+		return ret;
+
+	/* PLL has lost it's L or CAL value, needs reconfiguration */
+	if (!l_val || !cal_val) {
+		ret = clk_fabia_pll_configure(pll, regmap, pll->config);
+		if (ret) {
+			pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
+			return ret;
+		}
+		pr_warn("PLL configuration lost, reconfiguration of PLL done.\n");
+	}
+
 	ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
 	if (ret)
 		return ret;
@@ -1447,15 +1509,27 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
 						unsigned long prate)
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-	u32 val, l, alpha_width = pll_alpha_width(pll);
+	u32 l, cal_val, alpha_width = pll_alpha_width(pll);
 	u64 a;
 	unsigned long rrate;
 	int ret = 0;
 
-	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+	ret = regmap_read(pll->clkr.regmap, PLL_CAL_L_VAL(pll), &cal_val);
 	if (ret)
 		return ret;
 
+	/* PLL has lost it's CAL value, needs reconfiguration */
+	if (!cal_val) {
+		ret = clk_fabia_pll_configure(pll, pll->clkr.regmap,
+				pll->config);
+		if (ret) {
+			pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
+			return ret;
+		}
+		pr_warn("%s: PLL configuration lost, reconfiguration of PLL done.\n",
+				clk_hw_get_name(hw));
+	}
+
 	rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
 
 	/*
@@ -1473,13 +1547,121 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
 	return __clk_alpha_pll_update_latch(pll);
 }
 
+/*
+ * Fabia PLL requires power-on self calibration which happen when the PLL comes
+ * out of reset. Calibration frequency is calculated by below relation:
+ *
+ * calibration freq = ((pll_l_valmax + pll_l_valmin) * 0.54)
+ */
+static int alpha_pll_fabia_prepare(struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	const struct pll_vco *vco;
+	struct clk_hw *parent;
+	unsigned long calibration_freq, freq_hz;
+	u32 l, alpha_width = pll_alpha_width(pll), regval;
+	u64 a;
+	int ret;
+
+	/* Check if calibration needs to be done i.e. PLL is in reset */
+	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
+	if (ret)
+		return ret;
+
+	/* Return early if calibration is not needed. */
+	if (regval & PLL_RESET_N)
+		return 0;
+
+	vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+	if (!vco) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+
+	calibration_freq = ((pll->vco_table[0].min_freq +
+				pll->vco_table[0].max_freq) * 54)/100;
+
+	parent = clk_hw_get_parent(hw);
+	if (!parent)
+		return -EINVAL;
+
+	freq_hz = alpha_pll_round_rate(calibration_freq,
+			clk_hw_get_rate(parent), &l, &a, alpha_width);
+	/*
+	 * Due to a limited number of bits for fractional rate programming, the
+	 * rounded up rate could be marginally higher than the requested rate.
+	 */
+	if (freq_hz > (calibration_freq + PLL_OUT_RATE_MARGIN) ||
+						freq_hz < calibration_freq)
+		return -EINVAL;
+
+	/* Setup PLL for calibration frequency */
+	regmap_write(pll->clkr.regmap, PLL_CAL_L_VAL(pll), FABIA_PLL_CAL_VAL);
+
+	/* Bringup the pll at calibration frequency */
+	ret = alpha_pll_fabia_enable(hw);
+	if (ret) {
+		pr_err("alpha pll calibration failed\n");
+		return ret;
+	}
+
+	alpha_pll_fabia_disable(hw);
+	return 0;
+}
+
+static void clk_fabia_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", PLL_OFF_MODE},
+		{"PLL_L_VAL", PLL_OFF_L_VAL},
+		{"PLL_CAL_L_VAL", PLL_OFF_CAL_L_VAL},
+		{"PLL_USER_CTL", PLL_OFF_USER_CTL},
+		{"PLL_USER_CTL_U", PLL_OFF_USER_CTL_U},
+		{"PLL_CONFIG_CTL", PLL_OFF_CONFIG_CTL},
+		{"PLL_CONFIG_CTL_U", PLL_OFF_CONFIG_CTL_U},
+		{"PLL_TEST_CTL", PLL_OFF_TEST_CTL},
+		{"PLL_TEST_CTL_U", PLL_OFF_TEST_CTL_U},
+		{"PLL_STATUS", PLL_OFF_STATUS},
+		{"PLL_OPMODE", PLL_OFF_MODE},
+		{"PLL_FRAC", PLL_OFF_FRAC},
+	};
+
+	static struct clk_register_data data1[] = {
+		{"APSS_PLL_VOTE", 0x0},
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[i].offset], &val);
+		clock_debug_output(f, false,
+				"%20s: 0x%.8x\n", data[i].name, val);
+	}
+
+	regmap_read(pll->clkr.regmap, pll->offset + pll->regs[data[0].offset],
+								&val);
+
+	if (val & PLL_FSM_ENA) {
+		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+				data1[0].offset, &val);
+		clock_debug_output(f, false,
+				"%20s: 0x%.8x\n", data1[0].name, val);
+	}
+}
+
 const struct clk_ops clk_alpha_pll_fabia_ops = {
+	.prepare = alpha_pll_fabia_prepare,
 	.enable = alpha_pll_fabia_enable,
 	.disable = alpha_pll_fabia_disable,
 	.is_enabled = clk_alpha_pll_is_enabled,
 	.set_rate = alpha_pll_fabia_set_rate,
 	.recalc_rate = alpha_pll_fabia_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
+	.list_registers = clk_fabia_pll_list_registers,
 	.bus_vote = clk_debug_bus_vote,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
@@ -1490,6 +1672,7 @@ const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
 	.is_enabled = clk_alpha_pll_is_enabled,
 	.recalc_rate = alpha_pll_fabia_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
+	.list_registers = clk_fabia_pll_list_registers,
 	.bus_vote = clk_debug_bus_vote,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
@@ -1971,3 +2154,162 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
 	.bus_vote = clk_debug_bus_vote,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
+
+int clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+				const struct alpha_pll_config *config)
+{
+	u32 val, mask;
+
+	if (!config) {
+		pr_err("PLL configuration missing.\n");
+		return -EINVAL;
+	}
+
+	if (config->l)
+		regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+	if (config->alpha)
+		regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+
+	if (config->post_div_mask) {
+		mask = config->post_div_mask;
+		val = config->post_div_val;
+		regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+	}
+
+	if (config->main_output_mask || config->aux_output_mask ||
+		config->aux2_output_mask || config->early_output_mask) {
+
+		val = config->main_output_mask;
+		val |= config->aux_output_mask;
+		val |= config->aux2_output_mask;
+		val |= config->early_output_mask;
+
+		mask = config->main_output_mask;
+		mask |= config->aux_output_mask;
+		mask |= config->aux2_output_mask;
+		mask |= config->early_output_mask;
+
+		regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+	}
+
+	if (config->config_ctl_val)
+		regmap_write(regmap, PLL_CONFIG_CTL(pll),
+					config->config_ctl_val);
+
+	if (config->config_ctl_hi_val)
+		regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+					config->config_ctl_hi_val);
+
+	if (config->test_ctl_val)
+		regmap_write(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+
+	if (config->test_ctl_hi_val)
+		regmap_write(regmap, PLL_TEST_CTL_U(pll),
+					config->test_ctl_hi_val);
+
+	return 0;
+}
+
+static unsigned long
+clk_agera_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 l, a, alpha_width = pll_alpha_width(pll);
+	u64 prate = parent_rate;
+
+	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+	regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &a);
+
+	return alpha_pll_calc_rate(prate, l, a, alpha_width);
+}
+
+static int clk_agera_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				  unsigned long prate)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	unsigned long rrate;
+	int ret;
+	u32 l, alpha_width = pll_alpha_width(pll);
+	u64 a;
+
+	rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+	/*
+	 * Due to limited number of bits for fractional rate programming, the
+	 * rounded up rate could be marginally higher than the requested rate.
+	 */
+	if (rrate > (rate + PLL_OUT_RATE_MARGIN) || rrate < rate) {
+		pr_err("Call set rate on the PLL with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	/* change L_VAL without having to go through the power on sequence */
+	regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+	/* Ensure that the write above goes through before proceeding. */
+	mb();
+
+	if (clk_hw_is_enabled(hw)) {
+		ret = wait_for_pll_enable_lock(pll);
+		if (ret) {
+			pr_err("Failed to lock after L_VAL update\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void clk_agera_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", PLL_OFF_MODE},
+		{"PLL_L_VAL", PLL_OFF_L_VAL},
+		{"PLL_ALPHA_VAL", PLL_OFF_ALPHA_VAL},
+		{"PLL_USER_CTL", PLL_OFF_USER_CTL},
+		{"PLL_CONFIG_CTL", PLL_OFF_CONFIG_CTL},
+		{"PLL_CONFIG_CTL_U", PLL_OFF_CONFIG_CTL_U},
+		{"PLL_TEST_CTL", PLL_OFF_TEST_CTL},
+		{"PLL_TEST_CTL_U", PLL_OFF_TEST_CTL_U},
+		{"PLL_STATUS", PLL_OFF_STATUS},
+	};
+
+	static struct clk_register_data data1 = {
+		"APSS_PLL_VOTE", 0x0
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[i].offset], &val);
+		clock_debug_output(f, false,
+				"%20s: 0x%.8x\n", data[i].name, val);
+	}
+
+	regmap_read(pll->clkr.regmap, pll->offset +
+					pll->regs[data[0].offset], &val);
+
+	if (val & PLL_FSM_ENA) {
+		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+					data1.offset, &val);
+		clock_debug_output(f, false,
+				"%20s: 0x%.8x\n", data1.name, val);
+	}
+}
+
+const struct clk_ops clk_alpha_pll_agera_ops = {
+	.enable = clk_alpha_pll_enable,
+	.disable = clk_alpha_pll_disable,
+	.is_enabled = clk_alpha_pll_is_enabled,
+	.recalc_rate = clk_agera_pll_recalc_rate,
+	.round_rate = clk_alpha_pll_round_rate,
+	.set_rate = clk_agera_pll_set_rate,
+	.list_registers = clk_agera_pll_list_registers,
+	.bus_vote = clk_debug_bus_vote,
+};
+EXPORT_SYMBOL(clk_alpha_pll_agera_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 9c5a800..2b3de02 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -15,6 +15,7 @@ enum {
 	CLK_ALPHA_PLL_TYPE_FABIA,
 	CLK_ALPHA_PLL_TYPE_LUCID,
 	CLK_ALPHA_PLL_TYPE_ZONDA,
+	CLK_ALPHA_PLL_TYPE_AGERA,
 	CLK_ALPHA_PLL_TYPE_MAX,
 };
 
@@ -139,13 +140,16 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
 extern const struct clk_ops clk_alpha_pll_zonda_ops;
 extern const struct clk_ops clk_alpha_pll_postdiv_zonda_ops;
 
+extern const struct clk_ops clk_alpha_pll_agera_ops;
+
 void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 			     const struct alpha_pll_config *config);
-void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+int clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config);
 void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config);
 void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config);
-
+int clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+				const struct alpha_pll_config *config);
 #endif
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 55f6a3b..4b76012 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -282,6 +282,7 @@ static const struct clk_rpmh_desc clk_rpmh_kona = {
 };
 
 DEFINE_CLK_RPMH_ARC(lito, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(lito, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
 DEFINE_CLK_RPMH_VRM(lito, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
 DEFINE_CLK_RPMH_VRM(lito, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
 DEFINE_CLK_RPMH_VRM(lito, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
@@ -291,6 +292,8 @@ DEFINE_CLK_RPMH_VRM(lito, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
 static struct clk_hw *lito_rpmh_clocks[] = {
 	[RPMH_CXO_CLK]		= &lito_bi_tcxo.hw,
 	[RPMH_CXO_CLK_A]	= &lito_bi_tcxo_ao.hw,
+	[RPMH_LN_BB_CLK2]	= &lito_ln_bb_clk2.hw,
+	[RPMH_LN_BB_CLK2_A]	= &lito_ln_bb_clk2_ao.hw,
 	[RPMH_LN_BB_CLK3]	= &lito_ln_bb_clk3.hw,
 	[RPMH_LN_BB_CLK3_A]	= &lito_ln_bb_clk3_ao.hw,
 	[RPMH_RF_CLK1]		= &lito_rf_clk1.hw,
diff --git a/drivers/clk/qcom/debugcc-bengal.c b/drivers/clk/qcom/debugcc-bengal.c
index c1cb3dc..0cd08b5 100644
--- a/drivers/clk/qcom/debugcc-bengal.c
+++ b/drivers/clk/qcom/debugcc-bengal.c
@@ -35,7 +35,7 @@ static int cpu_cc_debug_mux_sels[] = {
 
 static int apss_cc_debug_mux_pre_divs[] = {
 	0x8,		/* perfcl_clk */
-	0x4,		/* pwrcl_clk */
+	0x8,		/* pwrcl_clk */
 };
 
 static struct clk_debug_mux cpu_cc_debug_mux = {
@@ -115,7 +115,6 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"disp_cc_debug_mux",
 	"gcc_ahb2phy_csi_clk",
 	"gcc_ahb2phy_usb_clk",
-	"gcc_apc_vs_clk",
 	"gcc_bimc_gpu_axi_clk",
 	"gcc_boot_rom_ahb_clk",
 	"gcc_cam_throttle_nrt_clk",
@@ -151,10 +150,7 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_camss_tfe_2_csid_clk",
 	"gcc_camss_top_ahb_clk",
 	"gcc_cfg_noc_usb3_prim_axi_clk",
-	"gcc_cpuss_ahb_clk",
 	"gcc_cpuss_gnoc_clk",
-	"gcc_cpuss_throttle_core_clk",
-	"gcc_cpuss_throttle_xo_clk",
 	"gcc_disp_ahb_clk",
 	"gcc_disp_gpll0_div_clk_src",
 	"gcc_disp_hf_axi_clk",
@@ -170,14 +166,12 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_gpu_snoc_dvm_gfx_clk",
 	"gcc_gpu_throttle_core_clk",
 	"gcc_gpu_throttle_xo_clk",
-	"gcc_mss_vs_clk",
 	"gcc_pdm2_clk",
 	"gcc_pdm_ahb_clk",
 	"gcc_pdm_xo4_clk",
 	"gcc_prng_ahb_clk",
 	"gcc_qmip_camera_nrt_ahb_clk",
 	"gcc_qmip_camera_rt_ahb_clk",
-	"gcc_qmip_cpuss_cfg_ahb_clk",
 	"gcc_qmip_disp_ahb_clk",
 	"gcc_qmip_gpu_cfg_ahb_clk",
 	"gcc_qmip_video_vcodec_ahb_clk",
@@ -206,9 +200,6 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_usb3_prim_phy_com_aux_clk",
 	"gcc_usb3_prim_phy_pipe_clk",
 	"gcc_vcodec0_axi_clk",
-	"gcc_vdda_vs_clk",
-	"gcc_vddcx_vs_clk",
-	"gcc_vddmx_vs_clk",
 	"gcc_venus_ahb_clk",
 	"gcc_venus_ctl_axi_clk",
 	"gcc_video_ahb_clk",
@@ -217,11 +208,10 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_video_vcodec0_sys_clk",
 	"gcc_video_venus_ctl_clk",
 	"gcc_video_xo_clk",
-	"gcc_vs_ctrl_ahb_clk",
-	"gcc_vs_ctrl_clk",
-	"gcc_wcss_vs_clk",
 	"gpu_cc_debug_mux",
 	"mc_cc_debug_mux",
+	"measure_only_cnoc_clk",
+	"measure_only_snoc_clk",
 };
 
 static int gcc_debug_mux_sels[] = {
@@ -229,7 +219,6 @@ static int gcc_debug_mux_sels[] = {
 	0x42,		/* disp_cc_debug_mux */
 	0x63,		/* gcc_ahb2phy_csi_clk */
 	0x64,		/* gcc_ahb2phy_usb_clk */
-	0xC3,		/* gcc_apc_vs_clk */
 	0x90,		/* gcc_bimc_gpu_axi_clk */
 	0x76,		/* gcc_boot_rom_ahb_clk */
 	0x4C,		/* gcc_cam_throttle_nrt_clk */
@@ -265,10 +254,7 @@ static int gcc_debug_mux_sels[] = {
 	0x12F,		/* gcc_camss_tfe_2_csid_clk */
 	0x135,		/* gcc_camss_top_ahb_clk */
 	0x1D,		/* gcc_cfg_noc_usb3_prim_axi_clk */
-	0xA9,		/* gcc_cpuss_ahb_clk */
 	0xAA,		/* gcc_cpuss_gnoc_clk */
-	0xB2,		/* gcc_cpuss_throttle_core_clk */
-	0xB1,		/* gcc_cpuss_throttle_xo_clk */
 	0x38,		/* gcc_disp_ahb_clk */
 	0x47,		/* gcc_disp_gpll0_div_clk_src */
 	0x3D,		/* gcc_disp_hf_axi_clk */
@@ -284,14 +270,12 @@ static int gcc_debug_mux_sels[] = {
 	0xEA,		/* gcc_gpu_snoc_dvm_gfx_clk */
 	0xEF,		/* gcc_gpu_throttle_core_clk */
 	0xEE,		/* gcc_gpu_throttle_xo_clk */
-	0xC2,		/* gcc_mss_vs_clk */
 	0x73,		/* gcc_pdm2_clk */
 	0x71,		/* gcc_pdm_ahb_clk */
 	0x72,		/* gcc_pdm_xo4_clk */
 	0x74,		/* gcc_prng_ahb_clk */
 	0x3A,		/* gcc_qmip_camera_nrt_ahb_clk */
 	0x48,		/* gcc_qmip_camera_rt_ahb_clk */
-	0xB0,		/* gcc_qmip_cpuss_cfg_ahb_clk */
 	0x3B,		/* gcc_qmip_disp_ahb_clk */
 	0xED,		/* gcc_qmip_gpu_cfg_ahb_clk */
 	0x39,		/* gcc_qmip_video_vcodec_ahb_clk */
@@ -320,9 +304,6 @@ static int gcc_debug_mux_sels[] = {
 	0x5F,		/* gcc_usb3_prim_phy_com_aux_clk */
 	0x60,		/* gcc_usb3_prim_phy_pipe_clk */
 	0x142,		/* gcc_vcodec0_axi_clk */
-	0xBF,		/* gcc_vdda_vs_clk */
-	0xBD,		/* gcc_vddcx_vs_clk */
-	0xBE,		/* gcc_vddmx_vs_clk */
 	0x143,		/* gcc_venus_ahb_clk */
 	0x141,		/* gcc_venus_ctl_axi_clk */
 	0x36,		/* gcc_video_ahb_clk */
@@ -331,11 +312,10 @@ static int gcc_debug_mux_sels[] = {
 	0x13F,		/* gcc_video_vcodec0_sys_clk */
 	0x13D,		/* gcc_video_venus_ctl_clk */
 	0x3E,		/* gcc_video_xo_clk */
-	0xC1,		/* gcc_vs_ctrl_ahb_clk */
-	0xC0,		/* gcc_vs_ctrl_clk */
-	0xC4,		/* gcc_wcss_vs_clk */
 	0xE7,		/* gpu_cc_debug_mux */
 	0x9E,           /* mc_cc_debug_mux */
+	0x1A,		/* measure_only_cnoc_clk */
+	0x7,		/* measure_only_snoc_clk */
 };
 
 static struct clk_debug_mux gcc_debug_mux = {
@@ -362,7 +342,6 @@ static const char *const gpu_cc_debug_mux_parent_names[] = {
 	"gpu_cc_ahb_clk",
 	"gpu_cc_crc_ahb_clk",
 	"gpu_cc_cx_gfx3d_clk",
-	"gpu_cc_cx_gfx3d_slv_clk",
 	"gpu_cc_cx_gmu_clk",
 	"gpu_cc_cx_snoc_dvm_clk",
 	"gpu_cc_cxo_aon_clk",
@@ -376,7 +355,6 @@ static int gpu_cc_debug_mux_sels[] = {
 	0x10,		/* gpu_cc_ahb_clk */
 	0x11,		/* gpu_cc_crc_ahb_clk */
 	0x1A,		/* gpu_cc_cx_gfx3d_clk */
-	0x1B,		/* gpu_cc_cx_gfx3d_slv_clk */
 	0x18,		/* gpu_cc_cx_gmu_clk */
 	0x15,		/* gpu_cc_cx_snoc_dvm_clk */
 	0xA,		/* gpu_cc_cxo_aon_clk */
@@ -411,7 +389,7 @@ static const char *const mc_cc_debug_mux_parent_names[] = {
 };
 
 static struct clk_debug_mux mc_cc_debug_mux = {
-	.period_offset = 0x50,
+	.period_offset = 0x20,
 	.hw.init = &(struct clk_init_data){
 		.name = "mc_cc_debug_mux",
 		.ops = &clk_debug_mux_ops,
@@ -437,6 +415,22 @@ static struct clk_dummy measure_only_mccc_clk = {
 	},
 };
 
+static struct clk_dummy measure_only_cnoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_cnoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_snoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_snoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
 static struct clk_dummy perfcl_clk = {
 	.rrate = 1000,
 	.hw.init = &(struct clk_init_data){
@@ -454,7 +448,9 @@ static struct clk_dummy pwrcl_clk = {
 };
 
 struct clk_hw *debugcc_bengal_hws[] = {
+	&measure_only_cnoc_clk.hw,
 	&measure_only_mccc_clk.hw,
+	&measure_only_snoc_clk.hw,
 	&perfcl_clk.hw,
 	&pwrcl_clk.hw,
 };
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
index 8c1d82c..f1e5cda 100644
--- a/drivers/clk/qcom/dispcc-bengal.c
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -70,6 +70,11 @@ static const char * const disp_cc_parent_names_2[] = {
 	"core_bi_pll_test_se",
 };
 
+static const char * const disp_cc_parent_names_2_ao[] = {
+	"bi_tcxo_ao",
+	"core_bi_pll_test_se",
+};
+
 static const struct parent_map disp_cc_parent_map_3[] = {
 	{ P_BI_TCXO, 0 },
 	{ P_GPLL0_OUT_MAIN, 4 },
@@ -387,13 +392,9 @@ static struct clk_rcg2 disp_cc_xo_clk_src = {
 	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_xo_clk_src",
-		.parent_names = disp_cc_parent_names_2,
+		.parent_names = disp_cc_parent_names_2_ao,
 		.num_parents = 2,
 		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_cx,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 19200000},
 	},
 };
 
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
index ba18207..304b146 100644
--- a/drivers/clk/qcom/gcc-bengal.c
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -754,7 +754,6 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = {
 		.name = "gpll8_out_main",
 		.parent_names = (const char *[]){ "gpll8" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_alpha_pll_postdiv_ro_ops,
 	},
 };
@@ -961,6 +960,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
 	F(64000000, P_GPLL9_OUT_MAIN, 1, 1, 9),
 	{ }
@@ -1879,7 +1879,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_LOWER] = 133000000,
+			[VDD_LOWER] = 133333333,
 			[VDD_LOW] = 240000000,
 			[VDD_LOW_L1] = 300000000,
 			[VDD_NOMINAL] = 384000000},
@@ -2514,22 +2514,6 @@ static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
 	},
 };
 
-static struct clk_branch gcc_cpuss_ahb_clk = {
-	.halt_reg = 0x2b000,
-	.halt_check = BRANCH_HALT_VOTED,
-	.hwcg_reg = 0x2b000,
-	.hwcg_bit = 1,
-	.clkr = {
-		.enable_reg = 0x79004,
-		.enable_mask = BIT(21),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_cpuss_ahb_clk",
-			.flags = CLK_IS_CRITICAL,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_cpuss_gnoc_clk = {
 	.halt_reg = 0x2b004,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -2546,34 +2530,6 @@ static struct clk_branch gcc_cpuss_gnoc_clk = {
 	},
 };
 
-static struct clk_branch gcc_cpuss_throttle_core_clk = {
-	.halt_reg = 0x2b180,
-	.halt_check = BRANCH_HALT_VOTED,
-	.hwcg_reg = 0x2b180,
-	.hwcg_bit = 1,
-	.clkr = {
-		.enable_reg = 0x79004,
-		.enable_mask = BIT(30),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_cpuss_throttle_core_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_cpuss_throttle_xo_clk = {
-	.halt_reg = 0x2b17c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x2b17c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_cpuss_throttle_xo_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_disp_ahb_clk = {
 	.halt_reg = 0x1700c,
 	.halt_check = BRANCH_HALT,
@@ -2783,7 +2739,7 @@ static struct clk_branch gcc_gpu_iref_clk = {
 
 static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
 	.halt_reg = 0x3600c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_VOTED,
 	.hwcg_reg = 0x3600c,
 	.hwcg_bit = 1,
 	.clkr = {
@@ -2928,21 +2884,6 @@ static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
 	},
 };
 
-static struct clk_branch gcc_qmip_cpuss_cfg_ahb_clk = {
-	.halt_reg = 0x2b178,
-	.halt_check = BRANCH_HALT_VOTED,
-	.hwcg_reg = 0x2b178,
-	.hwcg_bit = 1,
-	.clkr = {
-		.enable_reg = 0x79004,
-		.enable_mask = BIT(18),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qmip_cpuss_cfg_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_qmip_disp_ahb_clk = {
 	.halt_reg = 0x17018,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -3709,10 +3650,7 @@ static struct clk_regmap *gcc_bengal_clocks[] = {
 	[GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
 	[GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
 	[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
-	[GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
 	[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
-	[GCC_CPUSS_THROTTLE_CORE_CLK] = &gcc_cpuss_throttle_core_clk.clkr,
-	[GCC_CPUSS_THROTTLE_XO_CLK] = &gcc_cpuss_throttle_xo_clk.clkr,
 	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
 	[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
 	[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
@@ -3740,7 +3678,6 @@ static struct clk_regmap *gcc_bengal_clocks[] = {
 	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
 	[GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
 	[GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
-	[GCC_QMIP_CPUSS_CFG_AHB_CLK] = &gcc_qmip_cpuss_cfg_ahb_clk.clkr,
 	[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
 	[GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
 	[GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-lagoon.c b/drivers/clk/qcom/gcc-lagoon.c
new file mode 100644
index 0000000..ca54500
--- /dev/null
+++ b/drivers/clk/qcom/gcc-lagoon.c
@@ -0,0 +1,2760 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-lagoon.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level-lito.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner);
+
+#define GCC_NPU_MISC		0x4cf00
+#define GCC_GPU_MISC		0x45f00
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_EVEN,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_ODD,
+	P_GPLL6_OUT_EVEN,
+	P_GPLL7_OUT_MAIN,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL6_OUT_EVEN, 2 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll6_out_even",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_ODD, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+	"bi_tcxo",
+	"gpll0_out_odd",
+	"core_bi_pll_test_se",
+};
+static const char * const gcc_parent_names_2_ao[] = {
+	"bi_tcxo_ao",
+	"gpll0_out_odd",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_ODD, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_odd",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_ODD, 2 },
+	{ P_CHIP_SLEEP_CLK, 5 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+	"bi_tcxo",
+	"gpll0_out_odd",
+	"chip_sleep_clk",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CHIP_SLEEP_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+	"bi_tcxo",
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL6_OUT_EVEN, 2 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+	"bi_tcxo",
+	"gpll6_out_even",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_ODD, 2 },
+	{ P_GPLL7_OUT_MAIN, 3 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_8[] = {
+	"bi_tcxo",
+	"gpll0_out_odd",
+	"gpll7",
+	"core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll gpll0 = {
+	.offset = 0x0,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_fabia_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_even",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_odd[] = {
+	{ 0x3, 3 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+	.offset = 0x0,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_gpll0_out_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_odd),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_odd",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll6 = {
+	.offset = 0x6000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll6",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_fabia_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_even = {
+	.offset = 0x6000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll6_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll6_out_even",
+		.parent_names = (const char *[]){ "gpll6" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll7 = {
+	.offset = 0x7000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll7",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_fabia_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_regmap_div gcc_gpu_gpll0_main_div_clk_src = {
+	.reg = 0x4514C,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "gcc_gpu_gpll0_main_div_clk_src",
+		.parent_names =
+			(const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+	.cmd_rcgr = 0x30014,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_2,
+	.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_cpuss_ahb_clk_src",
+		.parent_names = gcc_parent_names_2_ao,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx_ao,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+	F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+	F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+	.cmd_rcgr = 0x37004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp1_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+	.cmd_rcgr = 0x38004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp2_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+	.cmd_rcgr = 0x39004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp3_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+	.cmd_rcgr = 0x23010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_pdm2_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_pdm2_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 60000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+	F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+	F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+	F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+	F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+	F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+	F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+	F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+	F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+	F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+	F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+	F(128000000, P_GPLL6_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s0_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+	.cmd_rcgr = 0x21148,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s1_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+	.cmd_rcgr = 0x21278,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s2_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+	.cmd_rcgr = 0x213a8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s3_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+	.cmd_rcgr = 0x214d8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s4_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+	.cmd_rcgr = 0x21608,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+	.name = "gcc_qupv3_wrap0_s5_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+	.cmd_rcgr = 0x21738,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s0_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+	.cmd_rcgr = 0x22018,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s1_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+	.cmd_rcgr = 0x22148,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s2_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+	.cmd_rcgr = 0x22278,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s3_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+	.cmd_rcgr = 0x223a8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s4_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+	.cmd_rcgr = 0x224d8,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+	.name = "gcc_qupv3_wrap1_s5_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 5,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+	.cmd_rcgr = 0x22608,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+	F(144000, P_BI_TCXO, 16, 3, 25),
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(192000000, P_GPLL6_OUT_EVEN, 2, 0, 0),
+	F(384000000, P_GPLL6_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+	.cmd_rcgr = 0x4b024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_7,
+	.freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_apps_clk_src",
+		.parent_names = gcc_parent_names_7,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 384000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+	F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+	.cmd_rcgr = 0x4b00c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_ice_core_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 150000000,
+			[VDD_LOW_L1] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(9600000, P_BI_TCXO, 2, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_ODD, 8, 0, 0),
+	F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+	F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+	.cmd_rcgr = 0x2000c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_8,
+	.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc2_apps_clk_src",
+		.parent_names = gcc_parent_names_8,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 202000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+	F(25000000, P_GPLL0_OUT_ODD, 8, 0, 0),
+	F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+	F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+	F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+	F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+	.cmd_rcgr = 0x3a01c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_axi_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+	F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+	.cmd_rcgr = 0x3a048,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_ice_core_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 75000000,
+			[VDD_LOW] = 150000000,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+	F(9600000, P_BI_TCXO, 2, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+	.cmd_rcgr = 0x3a0b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+	.cmd_rcgr = 0x3a060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_unipro_core_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000,
+			[VDD_LOW] = 75000000,
+			[VDD_NOMINAL] = 150000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+	F(66666667, P_GPLL0_OUT_ODD, 3, 0, 0),
+	F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+	.cmd_rcgr = 0x1a01c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_master_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 66666667,
+			[VDD_LOW] = 133333333,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x1a034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+		.parent_names = gcc_parent_names_3,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+	.cmd_rcgr = 0x1a060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_6,
+	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb3_prim_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_6,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+	.halt_reg = 0x3e014,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x3e014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3e014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x3e014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3e014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3e014,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x3e014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3e014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3e014,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+	.halt_reg = 0x3e010,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3e010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3e010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+	.halt_reg = 0x26004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x26004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(28),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_boot_rom_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+	.halt_reg = 0x17008,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+	.halt_reg = 0x17018,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17018,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_throttle_nrt_axi_clk = {
+	.halt_reg = 0x17078,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x17078,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_throttle_nrt_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_throttle_rt_axi_clk = {
+	.halt_reg = 0x17024,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x17024,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_throttle_rt_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+	.halt_reg = 0x17030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x17030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+	.halt_reg = 0x2b00c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2b00c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(3),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+	.halt_reg = 0x2b008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ce1_clk = {
+	.halt_reg = 0x2b004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ce1_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+	.halt_reg = 0x1101c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1101c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1101c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cfg_noc_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+	.halt_reg = 0x30000,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x30000,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_cpuss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+	.halt_reg = 0x30004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x30004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(5),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_gnoc_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+	.halt_reg = 0x30008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x30008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_rbcpr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+	.halt_reg = 0x2d038,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x2d038,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x2d038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ddrss_gpu_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+	.halt_reg = 0x1700c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1700c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1700c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+	.halt_reg = 0x1701c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x1701c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1701c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_cc_sleep_clk = {
+	.halt_reg = 0x17074,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x17074,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17074,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_cc_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_cc_xo_clk = {
+	.halt_reg = 0x17070,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17070,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17070,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_cc_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_gpll0_clk = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_gpll0_clk",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_throttle_axi_clk = {
+	.halt_reg = 0x17028,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17028,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_throttle_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+	.halt_reg = 0x17034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x17034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_xo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp1_clk = {
+	.halt_reg = 0x37000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x37000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp1_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp2_clk = {
+	.halt_reg = 0x38000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x38000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp2_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp3_clk = {
+	.halt_reg = 0x39000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x39000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp3_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+	.halt_reg = 0x45004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_clk",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_div_clk",
+			.parent_names = (const char *[]){
+				"gcc_gpu_gpll0_main_div_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+	.halt_reg = 0x4500c,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x4500c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4500c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_memnoc_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+	.halt_reg = 0x45014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x45014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x45014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_snoc_dvm_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+	.halt_reg = 0x4c008,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x4c008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4c008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+	.halt_reg = 0x4d004,
+	.halt_check = BRANCH_HALT_DELAY,
+	.hwcg_reg = 0x4d004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4d004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon_dma_cfg_ahb_clk = {
+	.halt_reg = 0x4d008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4d008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon_dma_cfg_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon_dsp_cfg_ahb_clk = {
+	.halt_reg = 0x4d00c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4d00c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon_dsp_cfg_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+	.halt_reg = 0x4c004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x4c004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4c004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+	.halt_reg = 0x4c140,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x4c140,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4c140,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_dma_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_gpll0_clk = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_gpll0_clk",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_gpll0_div_clk",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+	.halt_reg = 0x2300c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2300c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm2_clk",
+			.parent_names = (const char *[]){
+				"gcc_pdm2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+	.halt_reg = 0x23004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x23004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x23004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+	.halt_reg = 0x23008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x23008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_xo4_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+	.halt_reg = 0x24004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x24004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(26),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_prng_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+	.halt_reg = 0x21014,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_2x_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+	.halt_reg = 0x2100c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+	.halt_reg = 0x21144,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s0_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+	.halt_reg = 0x21274,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s1_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+	.halt_reg = 0x213a4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s2_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+	.halt_reg = 0x214d4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s3_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+	.halt_reg = 0x21604,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(14),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s4_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+	.halt_reg = 0x21734,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s5_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s5_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+	.halt_reg = 0x22004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(16),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_core_2x_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+	.halt_reg = 0x22008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(17),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+	.halt_reg = 0x22014,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(20),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s0_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+	.halt_reg = 0x22144,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(21),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s1_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+	.halt_reg = 0x22274,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(22),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s2_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+	.halt_reg = 0x223a4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(23),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s3_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+	.halt_reg = 0x224d4,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(24),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s4_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+	.halt_reg = 0x22604,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(25),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s5_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s5_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+	.halt_reg = 0x21004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x21004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_m_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+	.halt_reg = 0x21008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x21008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_s_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+	.halt_reg = 0x2200c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x2200c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(18),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_1_m_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+	.halt_reg = 0x22010,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x22010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(19),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_1_s_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+	.halt_reg = 0x4b004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4b004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+	.halt_reg = 0x4b008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4b008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+	.halt_reg = 0x4b03c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x4b03c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4b03c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+	.halt_reg = 0x20008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x20008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+	.halt_reg = 0x20004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x20004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+	.halt_reg = 0x10140,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x10140,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_cpuss_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_cpuss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+	.halt_reg = 0x8c000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_mem_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+	.halt_reg = 0x3a00c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a00c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a00c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+	.halt_reg = 0x3a034,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a034,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+	.halt_reg = 0x3a0a4,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a0a4,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a0a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+	.halt_reg = 0x3a0a4,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a0a4,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a0a4,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+	.halt_reg = 0x3a0ac,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a0ac,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a0ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+	.halt_reg = 0x3a0ac,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a0ac,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a0ac,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+	.halt_reg = 0x3a014,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x3a014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_rx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+	.halt_reg = 0x3a018,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x3a018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_rx_symbol_1_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+	.halt_reg = 0x3a010,
+	.halt_check = BRANCH_HALT_SKIP,
+	.clkr = {
+		.enable_reg = 0x3a010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_tx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+	.halt_reg = 0x3a09c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a09c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a09c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+	.halt_reg = 0x3a09c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x3a09c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3a09c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+	.halt_reg = 0x1a00c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a00c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_master_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+	.halt_reg = 0x1a018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_mock_utmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+	.halt_reg = 0x1a014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+	.halt_reg = 0x8c010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+	.halt_reg = 0x1a050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+	.halt_reg = 0x1a054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1a054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_com_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+	.halt_reg = 0x1a058,
+	.halt_check = BRANCH_HALT_SKIP,
+	.hwcg_reg = 0x1a058,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x1a058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_pipe_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+	.halt_reg = 0x17004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+	.halt_reg = 0x17014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_throttle_axi_clk = {
+	.halt_reg = 0x17020,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x17020,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x17020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_throttle_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+	.halt_reg = 0x1702c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1702c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gcc_lagoon_clocks[] = {
+	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+	[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+	[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+	[GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+	[GCC_CAMERA_THROTTLE_NRT_AXI_CLK] =
+		&gcc_camera_throttle_nrt_axi_clk.clkr,
+	[GCC_CAMERA_THROTTLE_RT_AXI_CLK] = &gcc_camera_throttle_rt_axi_clk.clkr,
+	[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+	[GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+	[GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+	[GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+	[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+	[GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+	[GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+	[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+	[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+	[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+	[GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+	[GCC_DISP_CC_SLEEP_CLK] = &gcc_disp_cc_sleep_clk.clkr,
+	[GCC_DISP_CC_XO_CLK] = &gcc_disp_cc_xo_clk.clkr,
+	[GCC_DISP_GPLL0_CLK] = &gcc_disp_gpll0_clk.clkr,
+	[GCC_DISP_THROTTLE_AXI_CLK] = &gcc_disp_throttle_axi_clk.clkr,
+	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+	[GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+	[GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+	[GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
+	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+	[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+	[GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+	[GCC_NPU_BWMON_DMA_CFG_AHB_CLK] = &gcc_npu_bwmon_dma_cfg_ahb_clk.clkr,
+	[GCC_NPU_BWMON_DSP_CFG_AHB_CLK] = &gcc_npu_bwmon_dsp_cfg_ahb_clk.clkr,
+	[GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+	[GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+	[GCC_NPU_GPLL0_CLK] = &gcc_npu_gpll0_clk.clkr,
+	[GCC_NPU_GPLL0_DIV_CLK] = &gcc_npu_gpll0_div_clk.clkr,
+	[GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+	[GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+	[GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+	[GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+	[GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+	[GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+	[GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+	[GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+	[GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+	[GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+	[GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+	[GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+	[GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+	[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+	[GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+	[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+	[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+	[GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+	[GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+	[GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+	[GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+	[GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+		&gcc_ufs_phy_unipro_core_clk_src.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+		&gcc_usb30_prim_mock_utmi_clk_src.clkr,
+	[GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+	[GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+	[GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+	[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+	[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+	[GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+	[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+	[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+	[GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+	[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+	[GPLL0] = &gpll0.clkr,
+	[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+	[GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+	[GPLL6] = &gpll6.clkr,
+	[GPLL6_OUT_EVEN] = &gpll6_out_even.clkr,
+	[GPLL7] = &gpll7.clkr,
+	[GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+	[GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+	[GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] =
+				&gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+	[GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC] = &gcc_gpu_gpll0_main_div_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_lagoon_resets[] = {
+	[GCC_QUSB2PHY_PRIM_BCR] = { 0x1d000 },
+	[GCC_QUSB2PHY_SEC_BCR] = { 0x1e000 },
+	[GCC_SDCC1_BCR] = { 0x4b000 },
+	[GCC_SDCC2_BCR] = { 0x20000 },
+	[GCC_UFS_PHY_BCR] = { 0x3a000 },
+	[GCC_USB30_PRIM_BCR] = { 0x1a000 },
+	[GCC_USB3_PHY_PRIM_BCR] = { 0x1c000 },
+	[GCC_USB3_DP_PHY_PRIM_BCR] = { 0x1c008 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_lagoon_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xbf030,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_lagoon_desc = {
+	.config = &gcc_lagoon_regmap_config,
+	.clks = gcc_lagoon_clocks,
+	.num_clks = ARRAY_SIZE(gcc_lagoon_clocks),
+	.resets = gcc_lagoon_resets,
+	.num_resets = ARRAY_SIZE(gcc_lagoon_resets),
+};
+
+static const struct of_device_id gcc_lagoon_match_table[] = {
+	{ .compatible = "qcom,lagoon-gcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_lagoon_match_table);
+
+static int gcc_lagoon_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret;
+
+	regmap = qcom_cc_map(pdev, &gcc_lagoon_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao");
+	if (IS_ERR(vdd_cx_ao.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx_ao regulator\n");
+		return PTR_ERR(vdd_cx_ao.regulator[0]);
+	}
+
+	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+			ARRAY_SIZE(gcc_dfs_clocks));
+	if (ret)
+		return ret;
+
+	/* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+	regmap_update_bits(regmap, GCC_NPU_MISC, 0x3, 0x3);
+	regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
+
+	ret = qcom_cc_really_probe(pdev, &gcc_lagoon_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GCC clocks\n");
+	return ret;
+}
+
+static struct platform_driver gcc_lagoon_driver = {
+	.probe = gcc_lagoon_probe,
+	.driver = {
+		.name = "gcc-lagoon",
+		.of_match_table = gcc_lagoon_match_table,
+	},
+};
+
+static int __init gcc_lagoon_init(void)
+{
+	return platform_driver_register(&gcc_lagoon_driver);
+}
+core_initcall(gcc_lagoon_init);
+
+static void __exit gcc_lagoon_exit(void)
+{
+	platform_driver_unregister(&gcc_lagoon_driver);
+}
+module_exit(gcc_lagoon_exit);
+
+MODULE_DESCRIPTION("QTI GCC LAGOON Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index efc17a7..1b95e82 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -371,7 +371,8 @@ static int gdsc_enable(struct regulator_dev *rdev)
 		msm_bus_scale_client_update_request(sc->bus_handle, 0);
 		sc->is_bus_enabled = false;
 	}
-	if (sc->parent_regulator)
+
+	if (ret && sc->parent_regulator)
 		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
 
 	return ret;
@@ -383,13 +384,6 @@ static int gdsc_disable(struct regulator_dev *rdev)
 	uint32_t regval;
 	int i, ret = 0;
 
-	if (sc->parent_regulator) {
-		ret = regulator_set_voltage(sc->parent_regulator,
-				RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
-		if (ret)
-			return ret;
-	}
-
 	if (sc->force_root_en)
 		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
 
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
index 5a56ca9..4133f2d 100644
--- a/drivers/clk/qcom/npucc-lito.c
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -116,6 +117,10 @@ static const u32 crc_reg_val[] = {
 	CRC_MND_CFG_SETTING, CRC_SID_FSM_CTRL_SETTING,
 };
 
+static const u32 no_crc_reg_val[] = {
+	0x0, 0x0,
+};
+
 static struct alpha_pll_config npu_cc_pll0_config = {
 	.l = 0x14,
 	.cal_l = 0x49,
@@ -177,9 +182,9 @@ static struct clk_alpha_pll_postdiv npu_cc_pll0_out_even = {
 };
 
 static struct alpha_pll_config npu_cc_pll1_config = {
-	.l = 0xF,
+	.l = 0x4E,
 	.cal_l = 0x44,
-	.alpha = 0xA000,
+	.alpha = 0x2000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x329A699C,
@@ -228,7 +233,6 @@ static struct clk_alpha_pll_postdiv npu_cc_pll1_out_even = {
 		.name = "npu_cc_pll1_out_even",
 		.parent_names = (const char *[]){ "npu_cc_pll1" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_alpha_pll_postdiv_lucid_ops,
 	},
 };
@@ -292,6 +296,17 @@ static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src_no_crc[] = {
+	F(200000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(230000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(422000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(557000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(729000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(844000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	F(1000000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 	.cmd_rcgr = 0x1100,
 	.mnd_width = 0,
@@ -328,6 +343,16 @@ static const struct freq_tbl ftbl_npu_cc_core_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_npu_cc_core_clk_src_v2[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(333333333, P_NPU_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	F(400000000, P_NPU_CC_PLL1_OUT_EVEN, 2.5, 0, 0),
+	F(500000000, P_NPU_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 npu_cc_core_clk_src = {
 	.cmd_rcgr = 0x1010,
 	.mnd_width = 0,
@@ -339,7 +364,6 @@ static struct clk_rcg2 npu_cc_core_clk_src = {
 		.name = "npu_cc_core_clk_src",
 		.parent_names = npu_cc_parent_names_0,
 		.num_parents = 6,
-		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_rcg2_ops,
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
@@ -362,6 +386,14 @@ static const struct freq_tbl ftbl_npu_cc_lmh_clk_src[] = {
 	{ }
 };
 
+static const struct freq_tbl ftbl_npu_cc_lmh_clk_src_v2[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(285714286, P_NPU_CC_PLL1_OUT_EVEN, 3.5, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 npu_cc_lmh_clk_src = {
 	.cmd_rcgr = 0x1060,
 	.mnd_width = 0,
@@ -373,7 +405,6 @@ static struct clk_rcg2 npu_cc_lmh_clk_src = {
 		.name = "npu_cc_lmh_clk_src",
 		.parent_names = npu_cc_parent_names_0,
 		.num_parents = 6,
-		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_rcg2_ops,
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
@@ -1018,10 +1049,46 @@ static const struct qcom_cc_desc npu_qdsp6ss_pll_lito_desc = {
 
 static const struct of_device_id npu_cc_lito_match_table[] = {
 	{ .compatible = "qcom,lito-npucc" },
+	{ .compatible = "qcom,lito-npucc-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, npu_cc_lito_match_table);
 
+static int npu_cc_lito_fixup(struct platform_device *pdev)
+{
+	u32 val;
+	int ret;
+
+	ret = nvmem_cell_read_u32(&pdev->dev, "npu-bin", &val);
+	if (ret)
+		return ret;
+
+	val = val & GENMASK(17, 10);
+	if (val) {
+		npu_cc_pll0_config.custom_reg_val = no_crc_reg_val;
+		npu_cc_crc_div.div = 1;
+		npu_cc_cal_hm0_clk_src.freq_tbl =
+					ftbl_npu_cc_cal_hm0_clk_src_no_crc;
+	}
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,lito-npucc-v2")) {
+		npu_cc_pll1_config.l = 0x34;
+		npu_cc_pll1_config.alpha = 0x1555;
+
+		npu_cc_lmh_clk_src.freq_tbl = ftbl_npu_cc_lmh_clk_src_v2;
+		npu_cc_lmh_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] =
+								200000000;
+		npu_cc_lmh_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] =
+								285714286;
+
+		npu_cc_core_clk_src.freq_tbl = ftbl_npu_cc_core_clk_src_v2;
+		npu_cc_core_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] =
+								400000000;
+	}
+
+	return 0;
+}
+
 static int npu_clocks_lito_probe(struct platform_device *pdev,
 					const struct qcom_cc_desc *desc)
 {
@@ -1074,6 +1141,10 @@ static int npu_cc_lito_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	ret = npu_cc_lito_fixup(pdev);
+	if (ret)
+		return ret;
+
 	ret = npu_clocks_lito_probe(pdev, &npu_cc_lito_desc);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "npu_cc clock registration failed, ret=%d\n",
diff --git a/drivers/clk/qcom/videocc-lagoon.c b/drivers/clk/qcom/videocc-lagoon.c
new file mode 100644
index 0000000..5ddb669
--- /dev/null
+++ b/drivers/clk/qcom/videocc-lagoon.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-lagoon.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level-lito.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_VIDEO_PLL0_OUT_EVEN,
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_VIDEO_PLL0_OUT_EVEN, 3 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const video_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"video_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const video_cc_parent_names_1[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const video_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct pll_vco fabia_vco[] = {
+	{ 125000000, 1000000000, 1 },
+};
+
+/* 600 MHz */
+static const struct alpha_pll_config video_pll0_config = {
+	.l = 0x1F,
+	.cal_l = 0x29,
+	.alpha = 0x4000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002067,
+	.test_ctl_val = 0x40000000,
+	.test_ctl_hi_val = 0x00000000,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00004005,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "video_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fabia_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_video_pll0_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv video_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_video_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_video_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_pll0_out_even",
+		.parent_names = (const char *[]){ "video_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+	F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+	F(380000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+	F(460000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+	.cmd_rcgr = 0x1000,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = video_cc_parent_map_0,
+	.freq_tbl = ftbl_video_cc_iris_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_cc_iris_clk_src",
+		.parent_names = video_cc_parent_names_0,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 133250000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 300000000,
+			[VDD_NOMINAL] = 380000000,
+			[VDD_HIGH] = 460000000},
+	},
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+	F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+	.cmd_rcgr = 0x701c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = video_cc_parent_map_1,
+	.freq_tbl = ftbl_video_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_cc_sleep_clk_src",
+		.parent_names = video_cc_parent_names_1,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 32764},
+	},
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+	.halt_reg = 0x5004,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x5004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_iris_ahb_clk",
+			.parent_names = (const char *[]){
+				"video_cc_iris_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_mvs0_axi_clk = {
+	.halt_reg = 0x800c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x800c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_mvs0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+	.halt_reg = 0x3010,
+	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x3010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x3010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_mvs0_core_clk",
+			.parent_names = (const char *[]){
+				"video_cc_iris_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+	.halt_reg = 0x2014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_mvsc_core_clk",
+			.parent_names = (const char *[]){
+				"video_cc_iris_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_mvsc_ctl_axi_clk = {
+	.halt_reg = 0x8004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_mvsc_ctl_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+	.halt_reg = 0x7034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"video_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+	.halt_reg = 0x801c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x801c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_venus_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_xo_clk = {
+	.halt_reg = 0x7018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *video_cc_lagoon_clocks[] = {
+	[VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+	[VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+	[VIDEO_CC_MVS0_AXI_CLK] = &video_cc_mvs0_axi_clk.clkr,
+	[VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+	[VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+	[VIDEO_CC_MVSC_CTL_AXI_CLK] = &video_cc_mvsc_ctl_axi_clk.clkr,
+	[VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+	[VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+	[VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+	[VIDEO_CC_XO_CLK] = &video_cc_xo_clk.clkr,
+	[VIDEO_PLL0] = &video_pll0.clkr,
+	[VIDEO_PLL0_OUT_EVEN] = &video_pll0_out_even.clkr,
+};
+
+static const struct qcom_reset_map video_cc_lito_resets[] = {
+	[VCODEC_VIDEO_CC_INTERFACE_BCR] = { 0x8000 },
+	[VCODEC_VIDEO_CC_MVS0_BCR] = { 0x3000 },
+	[VCODEC_VIDEO_CC_MVSC_BCR] = { 0x2000 },
+};
+
+static const struct regmap_config video_cc_lagoon_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xb000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_lagoon_desc = {
+	.config = &video_cc_lagoon_regmap_config,
+	.clks = video_cc_lagoon_clocks,
+	.num_clks = ARRAY_SIZE(video_cc_lagoon_clocks),
+};
+
+static const struct of_device_id video_cc_lagoon_match_table[] = {
+	{ .compatible = "qcom,lagoon-videocc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, video_cc_lagoon_match_table);
+
+static int video_cc_lagoon_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	clk = devm_clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	devm_clk_put(&pdev->dev, clk);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev, "Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &video_cc_lagoon_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &video_cc_lagoon_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register VIDEO CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered VIDEO CC clocks\n");
+
+	return ret;
+}
+
+static struct platform_driver video_cc_lagoon_driver = {
+	.probe = video_cc_lagoon_probe,
+	.driver = {
+		.name = "video_cc-lagoon",
+		.of_match_table = video_cc_lagoon_match_table,
+	},
+};
+
+static int __init video_cc_lagoon_init(void)
+{
+	return platform_driver_register(&video_cc_lagoon_driver);
+}
+core_initcall(video_cc_lagoon_init);
+
+static void __exit video_cc_lagoon_exit(void)
+{
+	platform_driver_unregister(&video_cc_lagoon_driver);
+}
+module_exit(video_cc_lagoon_exit);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC LAGOON Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index bc674b7..f8d7b5e 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
@@ -565,6 +565,9 @@ static int parse_cpu_levels(struct device_node *dn, struct lpm_cluster *c)
 	if (ret)
 		return ret;
 
+	cpu->ipi_prediction = !(of_property_read_bool(dn,
+					"qcom,disable-ipi-prediction"));
+
 	cpu->lpm_prediction = !(of_property_read_bool(dn,
 					"qcom,disable-prediction"));
 
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index e7801c2..01f4f31 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -82,6 +82,9 @@ struct lpm_cluster *lpm_root_node;
 static bool lpm_prediction = true;
 module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
 
+static bool lpm_ipi_prediction = true;
+module_param_named(lpm_ipi_prediction, lpm_ipi_prediction, bool, 0664);
+
 struct lpm_history {
 	uint32_t resi[MAXSAMPLES];
 	int mode[MAXSAMPLES];
@@ -92,8 +95,14 @@ struct lpm_history {
 	int64_t stime;
 };
 
-static DEFINE_PER_CPU(struct lpm_history, hist);
+struct ipi_history {
+	uint32_t interval[MAXSAMPLES];
+	uint32_t current_ptr;
+	ktime_t cpu_idle_resched_ts;
+};
 
+static DEFINE_PER_CPU(struct lpm_history, hist);
+static DEFINE_PER_CPU(struct ipi_history, cpu_ipi_history);
 static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
 static bool suspend_in_progress;
 static struct hrtimer lpm_hrtimer;
@@ -453,14 +462,63 @@ static void biastimer_start(uint32_t time_ns)
 	hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
 }
 
-static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
-		struct lpm_cpu *cpu, int *idx_restrict,
-		uint32_t *idx_restrict_time)
+static uint64_t find_deviation(int *interval, uint32_t ref_stddev,
+				int64_t *stime)
 {
-	int i, j, divisor;
+	int divisor, i;
 	uint64_t max, avg, stddev;
 	int64_t thresh = LLONG_MAX;
+
+	do {
+		max = avg = divisor = stddev = 0;
+		for (i = 0; i < MAXSAMPLES; i++) {
+			int64_t value = interval[i];
+
+			if (value <= thresh) {
+				avg += value;
+				divisor++;
+				if (value > max)
+					max = value;
+			}
+		}
+		do_div(avg, divisor);
+
+		for (i = 0; i < MAXSAMPLES; i++) {
+			int64_t value = interval[i];
+
+			if (value <= thresh) {
+				int64_t diff = value - avg;
+
+				stddev += diff * diff;
+			}
+		}
+		do_div(stddev, divisor);
+		stddev = int_sqrt(stddev);
+
+	/*
+	 * If the deviation is less, return the average, else
+	 * ignore one maximum sample and retry
+	 */
+		if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
+					|| stddev <= ref_stddev) {
+			*stime = ktime_to_us(ktime_get()) + avg;
+			return avg;
+		}
+		thresh = max - 1;
+
+	} while (divisor > (MAXSAMPLES - 1));
+
+	return 0;
+}
+
+static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu, int *idx_restrict,
+		uint32_t *idx_restrict_time, uint32_t *ipi_predicted)
+{
+	int i, j;
+	uint64_t avg;
 	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+	struct ipi_history *ipi_history = &per_cpu(cpu_ipi_history, dev->cpu);
 
 	if (!lpm_prediction || !cpu->lpm_prediction)
 		return 0;
@@ -491,44 +549,9 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
 	 * that mode.
 	 */
 
-again:
-	max = avg = divisor = stddev = 0;
-	for (i = 0; i < MAXSAMPLES; i++) {
-		int64_t value = history->resi[i];
-
-		if (value <= thresh) {
-			avg += value;
-			divisor++;
-			if (value > max)
-				max = value;
-		}
-	}
-	do_div(avg, divisor);
-
-	for (i = 0; i < MAXSAMPLES; i++) {
-		int64_t value = history->resi[i];
-
-		if (value <= thresh) {
-			int64_t diff = value - avg;
-
-			stddev += diff * diff;
-		}
-	}
-	do_div(stddev, divisor);
-	stddev = int_sqrt(stddev);
-
-	/*
-	 * If the deviation is less, return the average, else
-	 * ignore one maximum sample and retry
-	 */
-	if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
-					|| stddev <= cpu->ref_stddev) {
-		history->stime = ktime_to_us(ktime_get()) + avg;
+	avg = find_deviation(history->resi, cpu->ref_stddev, &(history->stime));
+	if (avg)
 		return avg;
-	} else if (divisor  > (MAXSAMPLES - 1)) {
-		thresh = max - 1;
-		goto again;
-	}
 
 	/*
 	 * Find the number of premature exits for each of the mode,
@@ -571,6 +594,18 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
 			}
 		}
 	}
+
+	if (*idx_restrict_time || !cpu->ipi_prediction || !lpm_ipi_prediction)
+		return 0;
+
+	avg = find_deviation(ipi_history->interval, cpu->ref_stddev
+						+ DEFAULT_IPI_STDDEV,
+						&(history->stime));
+	if (avg) {
+		*ipi_predicted = 1;
+		return avg;
+	}
+
 	return 0;
 }
 
@@ -636,6 +671,21 @@ static inline bool lpm_disallowed(s64 sleep_us, int cpu, struct lpm_cpu *pm_cpu)
 	return false;
 }
 
+static void calculate_next_wakeup(uint32_t *next_wakeup_us,
+				  uint32_t next_event_us,
+				  uint32_t lvl_latency_us,
+				  s64 sleep_us)
+{
+	if (!next_event_us)
+		return;
+
+	if (next_event_us < lvl_latency_us)
+		return;
+
+	if (next_event_us < sleep_us)
+		*next_wakeup_us = next_event_us - lvl_latency_us;
+}
+
 static int cpu_power_select(struct cpuidle_device *dev,
 		struct lpm_cpu *cpu)
 {
@@ -649,7 +699,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
 	int i, idx_restrict;
 	uint32_t lvl_latency_us = 0;
 	uint64_t predicted = 0;
-	uint32_t htime = 0, idx_restrict_time = 0;
+	uint32_t htime = 0, idx_restrict_time = 0, ipi_predicted = 0;
 	uint32_t next_wakeup_us = (uint32_t)sleep_us;
 	uint32_t min_residency, max_residency;
 	struct power_params *pwr_params;
@@ -672,13 +722,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
 		if (latency_us < lvl_latency_us)
 			break;
 
-		if (next_event_us) {
-			if (next_event_us < lvl_latency_us)
-				break;
-
-			if (next_event_us < sleep_us)
-				next_wakeup_us = next_event_us - lvl_latency_us;
-		}
+		calculate_next_wakeup(&next_wakeup_us, next_event_us,
+				      lvl_latency_us, sleep_us);
 
 		if (!i && !cpu_isolated(dev->cpu)) {
 			/*
@@ -688,7 +733,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
 			 */
 			if (next_wakeup_us > max_residency) {
 				predicted = lpm_cpuidle_predict(dev, cpu,
-					&idx_restrict, &idx_restrict_time);
+					&idx_restrict, &idx_restrict_time,
+					&ipi_predicted);
 				if (predicted && (predicted < min_residency))
 					predicted = min_residency;
 			} else
@@ -725,7 +771,9 @@ static int cpu_power_select(struct cpuidle_device *dev,
 	if ((predicted || (idx_restrict != cpu->nlevels + 1)) &&
 	    (best_level < (cpu->nlevels-1))) {
 		htime = predicted + cpu->tmr_add;
-		if (htime == cpu->tmr_add)
+		if (lpm_ipi_prediction && cpu->ipi_prediction)
+			htime += DEFAULT_IPI_TIMER_ADD;
+		if (!predicted)
 			htime = idx_restrict_time;
 		else if (htime > max_residency)
 			htime = max_residency;
@@ -738,8 +786,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
 done_select:
 	trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
 
-	trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
-			predicted, htime);
+	trace_cpu_pred_select(idx_restrict_time ? 2 : (ipi_predicted ?
+				3 : (predicted ? 1 : 0)), predicted, htime);
 
 	return best_level;
 }
@@ -1365,6 +1413,20 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv,
 	return cpu_power_select(dev, cpu);
 }
 
+void update_ipi_history(int cpu)
+{
+	struct ipi_history *history = &per_cpu(cpu_ipi_history, cpu);
+	ktime_t now = ktime_get();
+
+	history->interval[history->current_ptr] =
+			ktime_to_us(ktime_sub(now,
+			history->cpu_idle_resched_ts));
+	(history->current_ptr)++;
+	if (history->current_ptr >= MAXSAMPLES)
+		history->current_ptr = 0;
+	history->cpu_idle_resched_ts = now;
+}
+
 static void update_history(struct cpuidle_device *dev, int idx)
 {
 	struct lpm_history *history = &per_cpu(hist, dev->cpu);
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 7f3d6f0..7610677 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -11,7 +11,9 @@
 #define CLUST_SMPL_INVLD_TIME 40000
 #define DEFAULT_PREMATURE_CNT 3
 #define DEFAULT_STDDEV 100
+#define DEFAULT_IPI_STDDEV 400
 #define DEFAULT_TIMER_ADD 100
+#define DEFAULT_IPI_TIMER_ADD 900
 #define TIMER_ADD_LOW 100
 #define TIMER_ADD_HIGH 1500
 #define STDDEV_LOW 100
@@ -46,6 +48,7 @@ struct lpm_cpu {
 	uint32_t ref_premature_cnt;
 	uint32_t tmr_add;
 	bool lpm_prediction;
+	bool ipi_prediction;
 	uint64_t bias;
 	struct cpuidle_driver *drv;
 	struct lpm_cluster *parent;
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index dac0203..097e871 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -20,7 +20,8 @@
 #include <soc/qcom/qseecomi.h>
 #include "iceregs.h"
 #include <linux/pfk.h>
-
+#include <linux/atomic.h>
+#include <linux/wait.h>
 
 #define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
 	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -800,6 +801,28 @@ static int qcom_ice_remove(struct platform_device *pdev)
 
 static int  qcom_ice_suspend(struct platform_device *pdev)
 {
+	struct ice_device *ice_dev;
+	int ret = 0;
+
+	ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return -EINVAL;
+	if (atomic_read(&ice_dev->is_ice_busy) != 0) {
+		ret = wait_event_interruptible_timeout(
+			ice_dev->block_suspend_ice_queue,
+			atomic_read(&ice_dev->is_ice_busy) == 0,
+			msecs_to_jiffies(1000));
+
+		if (!ret) {
+			pr_err("%s: Suspend ICE during an ongoing operation\n",
+				__func__);
+			atomic_set(&ice_dev->is_ice_suspended, 0);
+			return -ETIME;
+		}
+	}
+
+	atomic_set(&ice_dev->is_ice_suspended, 1);
 	return 0;
 }
 
@@ -1030,7 +1053,7 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev)
 		err = -EFAULT;
 		goto out;
 	}
-
+	init_waitqueue_head(&ice_dev->block_suspend_ice_queue);
 	qcom_ice_low_power_mode_enable(ice_dev);
 	qcom_ice_optimization_enable(ice_dev);
 	qcom_ice_config_proc_ignore(ice_dev);
@@ -1038,7 +1061,8 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev)
 	qcom_ice_enable(ice_dev);
 	ice_dev->is_ice_enabled = true;
 	qcom_ice_enable_intr(ice_dev);
-
+	atomic_set(&ice_dev->is_ice_suspended, 0);
+	atomic_set(&ice_dev->is_ice_busy, 0);
 out:
 	return err;
 }
@@ -1145,7 +1169,7 @@ static int qcom_ice_resume(struct platform_device *pdev)
 		 */
 		qcom_ice_enable(ice_dev);
 	}
-
+	atomic_set(&ice_dev->is_ice_suspended, 0);
 	return 0;
 }
 
@@ -1415,8 +1439,20 @@ static int qcom_ice_config_start(struct platform_device *pdev,
 		return 0;
 	}
 
+	if (atomic_read(&ice_dev->is_ice_suspended) == 1)
+		return -EINVAL;
+
+	if (async)
+		atomic_set(&ice_dev->is_ice_busy, 1);
+
 	ret = pfk_load_key_start(req->bio, ice_dev, &pfk_crypto_data,
 			&is_pfe, async);
+
+	if (async) {
+		atomic_set(&ice_dev->is_ice_busy, 0);
+		wake_up_interruptible(&ice_dev->block_suspend_ice_queue);
+	}
+
 	if (is_pfe) {
 		if (ret) {
 			if (ret != -EBUSY && ret != -EAGAIN)
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index cfc50a6..95d1c12 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -71,6 +71,10 @@ extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
 
 static inline int devfreq_update_stats(struct devfreq *df)
 {
-	return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+	if (df->profile->get_dev_status)
+		return df->profile->get_dev_status(df->dev.parent,
+							 &df->last_status);
+	else
+		return -ENODEV;
 }
 #endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
index db9b36f..34b4348 100644
--- a/drivers/devfreq/governor_bw_vbif.c
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -115,6 +115,8 @@ static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
 
 static struct devfreq_governor devfreq_vbif = {
 	.name = "bw_vbif",
+	/* Restrict this governor to only gpu devfreq devices */
+	.immutable = 1,
 	.get_target_freq = devfreq_vbif_get_freq,
 	.event_handler = devfreq_vbif_ev_handler,
 };
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index f305c75..ab3d71d 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -6,6 +6,7 @@
 #include <linux/devfreq.h>
 #include <linux/module.h>
 #include <linux/msm_adreno_devfreq.h>
+#include <linux/of_platform.h>
 #include <linux/slab.h>
 
 #include "devfreq_trace.h"
@@ -208,6 +209,14 @@ static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
 {
 	int result = 0;
 	unsigned long freq;
+	struct device_node *node = devfreq->dev.parent->of_node;
+
+	/*
+	 * We want to restrict this governor be set only for
+	 * gpu devfreq devices.
+	 */
+	if (!of_device_is_compatible(node, "qcom,kgsl-busmon"))
+		return -EINVAL;
 
 	mutex_lock(&devfreq->lock);
 	freq = devfreq->previous_freq;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index fabdc81..39a58ac 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -15,6 +15,7 @@
 #include <asm/cacheflush.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qtee_shmbridge.h>
+#include <linux/of_platform.h>
 #include "governor.h"
 
 static DEFINE_SPINLOCK(tz_lock);
@@ -552,11 +553,18 @@ static int tz_suspend(struct devfreq *devfreq)
 static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
 {
 	int result;
+	struct msm_adreno_extended_profile *gpu_profile;
+	struct device_node *node = devfreq->dev.parent->of_node;
 
-	struct msm_adreno_extended_profile *gpu_profile = container_of(
-					(devfreq->profile),
-					struct msm_adreno_extended_profile,
-					profile);
+	/*
+	 * We want to restrict this governor be set only for
+	 * gpu devfreq devices.
+	 */
+	if (!of_device_is_compatible(node, "qcom,kgsl-3d0"))
+		return -EINVAL;
+
+	gpu_profile = container_of((devfreq->profile),
+		struct msm_adreno_extended_profile, profile);
 
 	switch (event) {
 	case DEVFREQ_GOV_START:
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8835b5f..7794ad2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -823,8 +823,9 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 			list_del(&txmsg->next);
 		}
 
-		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
-		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
+		if ((txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+			txmsg->state == DRM_DP_SIDEBAND_TX_SENT) &&
+			txmsg->seqno != -1) {
 			mstb->tx_slots[txmsg->seqno] = NULL;
 		}
 	}
@@ -1638,7 +1639,8 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
 	if (ret != 1)
 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
 
-	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+	if (txmsg->seqno != -1)
+		txmsg->dst->tx_slots[txmsg->seqno] = NULL;
 }
 
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 50f9507..ac783b4 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1195,7 +1195,7 @@ static const struct adreno_reglist a650_hwcg_regs[] = {
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
-	{
+	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, 0),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
@@ -1213,6 +1213,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 	.sqefw_name = "a650_sqe.fw",
 	.gmufw_name = "a650_gmu.bin",
 	.zap_name = "a650_zap",
+	.hwcg = a650_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a650_hwcg_regs),
 	.vbif = a650_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
@@ -1223,7 +1225,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
-	{
+	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
 			ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
@@ -1242,6 +1244,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
 	.sqefw_name = "a650_sqe.fw",
 	.gmufw_name = "a650_gmu.bin",
 	.zap_name = "a650_zap",
+	.hwcg = a650_hwcg_regs,
+	.hwcg_count = ARRAY_SIZE(a650_hwcg_regs),
 	.vbif = a650_gbif_regs,
 	.vbif_count = ARRAY_SIZE(a650_gbif_regs),
 	.veto_fal10 = true,
@@ -1327,7 +1331,7 @@ static const struct adreno_reglist a612_hwcg_regs[] = {
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
-	{
+	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A612, 6, 1, 2, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
@@ -1352,7 +1356,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
-	{
+	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A616, 6, 1, 6, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC |
@@ -1379,7 +1383,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
 };
 
 static const struct adreno_a6xx_core adreno_gpu_core_a610 = {
-	{
+	.base = {
 		DEFINE_ADRENO_REV(ADRENO_REV_A610, 6, 1, 0, ANY_ID),
 		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
 			ADRENO_PREEMPTION,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f9bf55f..18d78f8 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -10,6 +10,7 @@
 #include <linux/of_fdt.h>
 #include <linux/module.h>
 #include <linux/msm_kgsl.h>
+#include <linux/msm-bus.h>
 #include <linux/regulator/consumer.h>
 #include <linux/nvmem-consumer.h>
 #include <soc/qcom/scm.h>
@@ -1654,6 +1655,60 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
 	adreno_fault_detect_start(adreno_dev);
 }
 
+static void do_gbif_halt(struct adreno_device *adreno_dev,
+	u32 halt_reg, u32 ack_reg, u32 mask, const char *client)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned long t;
+	u32 val;
+
+	adreno_writereg(adreno_dev, halt_reg, mask);
+
+	t = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
+	do {
+		adreno_readreg(adreno_dev, ack_reg, &val);
+		if ((val & mask) == mask)
+			return;
+
+		/*
+		 * If we are attempting GBIF halt in case of stall-on-fault
+		 * then the halt sequence will not complete as long as SMMU
+		 * is stalled.
+		 */
+		kgsl_mmu_pagefault_resume(&device->mmu);
+		usleep_range(10, 100);
+	} while (!time_after(jiffies, t));
+
+	/* Check one last time */
+	kgsl_mmu_pagefault_resume(&device->mmu);
+
+	adreno_readreg(adreno_dev, ack_reg, &val);
+	if ((val & mask) == mask)
+		return;
+
+	dev_err(device->dev, "%s GBIF Halt ack timed out\n", client);
+}
+
+/**
+ * adreno_smmu_resume - Clears stalled/pending transactions in GBIF pipe
+ * and resumes stalled SMMU
+ * @adreno_dev: Pointer to the the adreno device
+ */
+void adreno_smmu_resume(struct adreno_device *adreno_dev)
+{
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* Halt GBIF GX traffic */
+	if (gmu_core_dev_gx_is_on(KGSL_DEVICE(adreno_dev)))
+		do_gbif_halt(adreno_dev, ADRENO_REG_RBBM_GBIF_HALT,
+			ADRENO_REG_RBBM_GBIF_HALT_ACK,
+			gpudev->gbif_gx_halt_mask, "GX");
+
+	/* Halt all CX traffic */
+	do_gbif_halt(adreno_dev, ADRENO_REG_GBIF_HALT,
+		ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_arb_halt_mask, "CX");
+}
+
 /**
  * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe
  * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared
@@ -1942,6 +1997,14 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 	/* Clear any GPU faults that might have been left over */
 	adreno_clear_gpu_fault(adreno_dev);
 
+	/*
+	 * Keep high bus vote to reduce AHB latency
+	 * during FW loading and wakeup.
+	 */
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_HIGH);
+
 	/* Put the GPU in a responsive state */
 	status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
 	if (status)
@@ -2160,6 +2223,15 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 		gmu_core_dev_oob_clear(device, oob_boot_slumber);
 
+	/*
+	 * Low vote is enough after wakeup completes, this will make
+	 * sure CPU to GPU AHB infrastructure clocks are running at-least
+	 * at minimum frequency.
+	 */
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_LOW);
+
 	return 0;
 
 error_oob_clear:
@@ -2180,6 +2252,9 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 				pmqos_active_vote);
 
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_OFF);
 	return status;
 }
 
@@ -2288,6 +2363,10 @@ static int adreno_stop(struct kgsl_device *device)
 	 */
 	adreno_set_active_ctxs_null(adreno_dev);
 
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_OFF);
+
 	clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
 
 	return error;
@@ -2384,10 +2463,8 @@ int adreno_reset(struct kgsl_device *device, int fault)
 static int copy_prop(struct kgsl_device_getproperty *param,
 		void *src, size_t size)
 {
-	if (param->sizebytes != size)
-		return -EINVAL;
-
-	if (copy_to_user(param->value, src, param->sizebytes))
+	if (copy_to_user(param->value, src,
+		min_t(u32, size, param->sizebytes)))
 		return -EFAULT;
 
 	return 0;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 4d0569c..87d7e80 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1820,4 +1820,5 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
 	unsigned int fence_mask);
 int adreno_clear_pending_transactions(struct kgsl_device *device);
 void adreno_gmu_send_nmi(struct adreno_device *adreno_dev);
+void adreno_smmu_resume(struct adreno_device *adreno_dev);
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index bf5ff88..69ca42f 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -50,7 +50,6 @@ static u32 a6xx_pwrup_reglist[] = {
 
 /* IFPC only static powerup restore list */
 static u32 a6xx_ifpc_pwrup_reglist[] = {
-	A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
 	A6XX_CP_CHICKEN_DBG,
 	A6XX_CP_DBG_ECO_CNTL,
 	A6XX_CP_PROTECT_CNTL,
@@ -91,14 +90,29 @@ static u32 a6xx_ifpc_pwrup_reglist[] = {
 
 /* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
 static u32 a650_pwrup_reglist[] = {
+	A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
 	A6XX_CP_PROTECT_REG + 47,
 };
 
+/* Applicable to a640 and a680 */
+static u32 a640_pwrup_reglist[] = {
+	A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
+};
+
+/* Applicable to a630 */
+static u32 a630_pwrup_reglist[] = {
+	A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
+};
+
+/* Applicable to a615 family */
 static u32 a615_pwrup_reglist[] = {
+	A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
 	A6XX_UCHE_GBIF_GX_CONFIG,
 };
 
+/* Applicable to a612 */
 static u32 a612_pwrup_reglist[] = {
+	A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
 	A6XX_RBBM_PERFCTR_CNTL,
 };
 
@@ -334,6 +348,10 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
 		reglist[items++] = REGLIST(a612_pwrup_reglist);
 	else if (adreno_is_a615_family(adreno_dev))
 		reglist[items++] = REGLIST(a615_pwrup_reglist);
+	else if (adreno_is_a630(adreno_dev))
+		reglist[items++] = REGLIST(a630_pwrup_reglist);
+	else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
+		reglist[items++] = REGLIST(a640_pwrup_reglist);
 	else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
 		reglist[items++] = REGLIST(a650_pwrup_reglist);
 
@@ -579,24 +597,15 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 }
 
 /*
- * a6xx_microcode_load() - Load microcode
+ * a6xx_zap_load() - Load zap shader
  * @adreno_dev: Pointer to adreno device
  */
-static int a6xx_microcode_load(struct adreno_device *adreno_dev)
+static int a6xx_zap_load(struct adreno_device *adreno_dev)
 {
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
-	uint64_t gpuaddr;
 	void *zap;
 	int ret = 0;
 
-	gpuaddr = fw->memdesc.gpuaddr;
-	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
-				lower_32_bits(gpuaddr));
-	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
-				upper_32_bits(gpuaddr));
-
 	/* Load the zap shader firmware through PIL if its available */
 	if (a6xx_core->zap_name && !adreno_dev->zap_loaded) {
 		zap = subsystem_get(a6xx_core->zap_name);
@@ -821,6 +830,7 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
 {
 	struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
 	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	uint64_t addr;
 	int ret;
 
@@ -839,13 +849,16 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
 	adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
 			ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
 
-	ret = a6xx_microcode_load(adreno_dev);
-	if (ret)
-		return ret;
-
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
 		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, A6XX_APRIV_DEFAULT);
 
+	/* Program the ucode base for CP */
+	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
+			lower_32_bits(fw->memdesc.gpuaddr));
+
+	kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
+			upper_32_bits(fw->memdesc.gpuaddr));
+
 	/* Clear the SQE_HALT to start the CP engine */
 	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
 
@@ -853,6 +866,10 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
 	if (ret)
 		return ret;
 
+	ret = a6xx_zap_load(adreno_dev);
+	if (ret)
+		return ret;
+
 	/* GPU comes up in secured mode, make it unsecured by default */
 	ret = adreno_set_unsecured_mode(adreno_dev, rb);
 	if (ret)
@@ -2278,7 +2295,7 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 	adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;
 
 	/* Set the counter for IFPC */
-	if (gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
 		adreno_dev->perfctr_ifpc_lo =
 			A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;
 
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index b35334c..d9b104e 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -149,8 +149,6 @@ struct cpu_gpu_lock {
 };
 
 #define A6XX_CP_CTXRECORD_MAGIC_REF     0xAE399D6EUL
-/* Size of each CP preemption record */
-#define A6XX_CP_CTXRECORD_SIZE_IN_BYTES     (2112 * 1024)
 /* Size of the user context record block (in bytes) */
 #define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024)
 /* Size of the performance counter save/restore block (in bytes) */
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index f535544..af13818 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -914,12 +914,9 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 
 	/* Collect abort data to help with debugging */
 	gmu_core_regread(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg2);
-	kgsl_regread(device, A6XX_CP_STATUS_1, &reg3);
-	gmu_core_regread(device, A6XX_GMU_RBBM_INT_UNMASKED_STATUS, &reg4);
-	gmu_core_regread(device, A6XX_GMU_GMU_PWR_COL_KEEPALIVE, &reg5);
-	kgsl_regread(device, A6XX_CP_CP2GMU_STATUS, &reg6);
-	kgsl_regread(device, A6XX_CP_CONTEXT_SWITCH_CNTL, &reg7);
-	gmu_core_regread(device, A6XX_GMU_AO_SPARE_CNTL, &reg8);
+	gmu_core_regread(device, A6XX_GMU_RBBM_INT_UNMASKED_STATUS, &reg3);
+	gmu_core_regread(device, A6XX_GMU_GMU_PWR_COL_KEEPALIVE, &reg4);
+	gmu_core_regread(device, A6XX_GMU_AO_SPARE_CNTL, &reg5);
 
 	dev_err(&gmu->pdev->dev,
 		"----------------------[ GMU error ]----------------------\n");
@@ -933,14 +930,23 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 		ts3-ts2);
 	dev_err(&gmu->pdev->dev,
 		"RPMH_POWER_STATE=%x SPTPRAC_PWR_CLK_STATUS=%x\n", reg, reg1);
-	dev_err(&gmu->pdev->dev,
-		"CX_BUSY_STATUS=%x CP_STATUS_1=%x\n", reg2, reg3);
+	dev_err(&gmu->pdev->dev, "CX_BUSY_STATUS=%x\n", reg2);
 	dev_err(&gmu->pdev->dev,
 		"RBBM_INT_UNMASKED_STATUS=%x PWR_COL_KEEPALIVE=%x\n",
-		reg4, reg5);
-	dev_err(&gmu->pdev->dev,
-		"CP2GMU_STATUS=%x CONTEXT_SWITCH_CNTL=%x AO_SPARE_CNTL=%x\n",
-		reg6, reg7, reg8);
+		reg3, reg4);
+	dev_err(&gmu->pdev->dev, "A6XX_GMU_AO_SPARE_CNTL=%x\n", reg5);
+
+	/* Access GX registers only when GX is ON */
+	if (is_on(reg1)) {
+		kgsl_regread(device, A6XX_CP_STATUS_1, &reg6);
+		kgsl_regread(device, A6XX_CP_CP2GMU_STATUS, &reg7);
+		kgsl_regread(device, A6XX_CP_CONTEXT_SWITCH_CNTL, &reg8);
+
+		dev_err(&gmu->pdev->dev, "A6XX_CP_STATUS_1=%x\n", reg6);
+		dev_err(&gmu->pdev->dev,
+			"CP2GMU_STATUS=%x CONTEXT_SWITCH_CNTL=%x\n",
+			reg7, reg8);
+	}
 
 	WARN_ON(1);
 	return -ETIMEDOUT;
@@ -1165,40 +1171,6 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
 
 #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 
-static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
-	u32 mask, const char *client)
-{
-	u32 ack;
-	unsigned long t;
-
-	kgsl_regwrite(device, reg, mask);
-
-	t = jiffies + msecs_to_jiffies(100);
-	do {
-		kgsl_regread(device, ack_reg, &ack);
-		if ((ack & mask) == mask)
-			return;
-
-		/*
-		 * If we are attempting recovery in case of stall-on-fault
-		 * then the halt sequence will not complete as long as SMMU
-		 * is stalled.
-		 */
-		kgsl_mmu_pagefault_resume(&device->mmu);
-
-		usleep_range(10, 100);
-	} while (!time_after(jiffies, t));
-
-	/* Check one last time */
-	kgsl_mmu_pagefault_resume(&device->mmu);
-
-	kgsl_regread(device, ack_reg, &ack);
-	if ((ack & mask) == mask)
-		return;
-
-	dev_err(device->dev, "%s GBIF halt timed out\n", client);
-}
-
 static int a6xx_gmu_suspend(struct kgsl_device *device)
 {
 	int ret = 0;
@@ -1222,21 +1194,8 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
 
 	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
 
-	if (adreno_has_gbif(adreno_dev)) {
-		struct adreno_gpudev *gpudev =
-			ADRENO_GPU_DEVICE(adreno_dev);
-
-		/* Halt GX traffic */
-		if (a6xx_gmu_gx_is_on(device))
-			do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
-				A6XX_RBBM_GBIF_HALT_ACK,
-				gpudev->gbif_gx_halt_mask,
-				"GX");
-
-		/* Halt CX traffic */
-		do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
-			gpudev->gbif_arb_halt_mask, "CX");
-	}
+	if (adreno_has_gbif(adreno_dev))
+		adreno_smmu_resume(adreno_dev);
 
 	if (a6xx_gmu_gx_is_on(device))
 		kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 46a84ab..8c44c4a 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -611,14 +611,23 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	int ret;
 
+	/*
+	 * Reserve CP context record size as
+	 * GMEM size + GPU HW state size i.e 0x110000
+	 */
 	ret = kgsl_allocate_global(device, &rb->preemption_desc,
-		A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED,
+		adreno_dev->gpucore->gmem_size + 0x110000,
+		0, KGSL_MEMDESC_PRIVILEGED,
 		"preemption_desc");
 	if (ret)
 		return ret;
 
+	/*
+	 * Reserve CP context record size as
+	 * GMEM size + GPU HW state size i.e 0x110000
+	 */
 	ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
-		A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
+		adreno_dev->gpucore->gmem_size + 0x110000,
 		KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED);
 	if (ret)
 		return ret;
@@ -757,7 +766,7 @@ void a6xx_preemption_context_destroy(struct kgsl_context *context)
 	gpumem_free_entry(context->user_ctxt_record);
 
 	/* Put the extra ref from gpumem_alloc_entry() */
-	kgsl_mem_entry_put(context->user_ctxt_record);
+	kgsl_mem_entry_put_deferred(context->user_ctxt_record);
 }
 
 int a6xx_preemption_context_init(struct kgsl_context *context)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 88fc29b..b561214 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2092,7 +2092,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	int ret, i;
 	int fault;
 	int halt;
-	bool gx_on;
+	bool gx_on, smmu_stalled = false;
 
 	fault = atomic_xchg(&dispatcher->fault, 0);
 	if (fault == 0)
@@ -2133,18 +2133,20 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	 * proceed if the fault handler has already run in the IRQ thread,
 	 * else return early to give the fault handler a chance to run.
 	 */
-	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
-		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
-		gx_on) {
+	if (gx_on) {
 		unsigned int val;
 
 		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
-		if (val & BIT(24)) {
-			mutex_unlock(&device->mutex);
-			dev_err(device->dev,
-				"SMMU is stalled without a pagefault\n");
-			return -EBUSY;
-		}
+		if (val & BIT(24))
+			smmu_stalled = true;
+	}
+
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
+		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
+		smmu_stalled) {
+		mutex_unlock(&device->mutex);
+		dev_err(device->dev, "SMMU is stalled without a pagefault\n");
+		return -EBUSY;
 	}
 
 	/* Turn off all the timers */
@@ -2211,8 +2213,20 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 		gpudev->gpu_keepalive(adreno_dev, false);
 
 	/* Terminate the stalled transaction and resume the IOMMU */
-	if (fault & ADRENO_IOMMU_PAGE_FAULT)
-		kgsl_mmu_pagefault_resume(&device->mmu);
+	if (fault & ADRENO_IOMMU_PAGE_FAULT) {
+		/*
+		 * This needs to be triggered only if GBIF is supported, GMU is
+		 * not enabled and SMMU is stalled because sequence is only
+		 * valid for GPU which has GBIF and If GMU is enabled this is
+		 * taken care in GMU suspend and it is required only if SMMU is
+		 * stalled.
+		 */
+		if (adreno_has_gbif(adreno_dev) &&
+			!gmu_core_isenabled(device) && smmu_stalled)
+			adreno_smmu_resume(adreno_dev);
+		else
+			kgsl_mmu_pagefault_resume(&device->mmu);
+	}
 
 	/* Reset the dispatcher queue */
 	dispatcher->inflight = 0;
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index d1b2ad0..2fe694f 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -3,6 +3,8 @@
  * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/msm-bus.h>
+
 #include "adreno.h"
 #include "adreno_cp_parser.h"
 #include "adreno_pm4types.h"
@@ -840,6 +842,14 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
 
 	snapshot_frozen_objsize = 0;
 
+	/*
+	 * We read lots of registers during GPU snapshot. Keep
+	 * high bus vote to reduce AHB latency.
+	 */
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_HIGH);
+
 	/* Add GPU specific sections - registers mainly, but other stuff too */
 	if (gpudev->snapshot)
 		gpudev->snapshot(adreno_dev, snapshot);
@@ -945,6 +955,9 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
 			"GPU snapshot froze %zdKb of GPU buffers\n",
 			snapshot_frozen_objsize / 1024);
 
+	if (device->pwrctrl.gpu_cfg)
+		msm_bus_scale_client_update_request(device->pwrctrl.gpu_cfg,
+			KGSL_GPU_CFG_PATH_LOW);
 }
 
 /*
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 57dca4a..8fe0ccb 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -223,15 +223,6 @@ int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
 }
 EXPORT_SYMBOL(kgsl_readtimestamp);
 
-/* Scheduled by kgsl_mem_entry_put_deferred() */
-static void _deferred_put(struct work_struct *work)
-{
-	struct kgsl_mem_entry *entry =
-		container_of(work, struct kgsl_mem_entry, work);
-
-	kgsl_mem_entry_put(entry);
-}
-
 static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
 {
 	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -317,17 +308,10 @@ static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
 }
 #endif
 
-void
-kgsl_mem_entry_destroy(struct kref *kref)
+static void mem_entry_destroy(struct kgsl_mem_entry *entry)
 {
-	struct kgsl_mem_entry *entry = container_of(kref,
-						    struct kgsl_mem_entry,
-						    refcount);
 	unsigned int memtype;
 
-	if (entry == NULL)
-		return;
-
 	/* pull out the memtype before the flags get cleared */
 	memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
 
@@ -379,8 +363,34 @@ kgsl_mem_entry_destroy(struct kref *kref)
 
 	kfree(entry);
 }
+
+static void _deferred_destroy(struct work_struct *work)
+{
+	struct kgsl_mem_entry *entry =
+		container_of(work, struct kgsl_mem_entry, work);
+
+	mem_entry_destroy(entry);
+}
+
+void kgsl_mem_entry_destroy(struct kref *kref)
+{
+	struct kgsl_mem_entry *entry =
+		container_of(kref, struct kgsl_mem_entry, refcount);
+
+	mem_entry_destroy(entry);
+}
 EXPORT_SYMBOL(kgsl_mem_entry_destroy);
 
+void kgsl_mem_entry_destroy_deferred(struct kref *kref)
+{
+	struct kgsl_mem_entry *entry =
+		container_of(kref, struct kgsl_mem_entry, refcount);
+
+	INIT_WORK(&entry->work, _deferred_destroy);
+	queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy_deferred);
+
 /* Allocate a IOVA for memory objects that don't use SVM */
 static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
 		struct kgsl_process_private *process,
@@ -478,8 +488,6 @@ static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
 /* Detach a memory entry from a process and unmap it from the MMU */
 static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
 {
-	unsigned int type;
-
 	if (entry == NULL)
 		return;
 
@@ -492,10 +500,8 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
 		idr_remove(&entry->priv->mem_idr, entry->id);
 	entry->id = 0;
 
-	type = kgsl_memdesc_usermem_type(&entry->memdesc);
-
-	if (type != KGSL_MEM_ENTRY_ION)
-		entry->priv->gpumem_mapped -= entry->memdesc.mapsize;
+	atomic_long_sub(atomic_long_read(&entry->memdesc.mapsize),
+			&entry->priv->gpumem_mapped);
 
 	spin_unlock(&entry->priv->mem_lock);
 
@@ -940,6 +946,13 @@ static struct kgsl_process_private *kgsl_process_private_new(
 	struct kgsl_process_private *private;
 	pid_t tgid = task_tgid_nr(current);
 
+	/*
+	 * Flush mem_workqueue to make sure that any lingering
+	 * structs (process pagetable etc) are released before
+	 * starting over again.
+	 */
+	flush_workqueue(kgsl_driver.mem_workqueue);
+
 	/* Search in the process list */
 	list_for_each_entry(private, &kgsl_driver.process_list, list) {
 		if (private->pid == tgid) {
@@ -1002,7 +1015,7 @@ static void process_release_memory(struct kgsl_process_private *private)
 		if (!entry->pending_free) {
 			entry->pending_free = 1;
 			spin_unlock(&private->mem_lock);
-			kgsl_mem_entry_put(entry);
+			kgsl_mem_entry_put_deferred(entry);
 		} else {
 			spin_unlock(&private->mem_lock);
 		}
@@ -2201,7 +2214,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -2219,7 +2232,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -2263,8 +2276,7 @@ static bool gpuobj_free_fence_func(void *priv)
 			entry->memdesc.gpuaddr, entry->memdesc.size,
 			entry->memdesc.flags);
 
-	INIT_WORK(&entry->work, _deferred_put);
-	queue_work(kgsl_driver.mem_workqueue, &entry->work);
+	kgsl_mem_entry_put_deferred(entry);
 	return true;
 }
 
@@ -2329,7 +2341,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
 	else
 		ret = -EINVAL;
 
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 	return ret;
 }
 
@@ -2359,7 +2371,7 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
 	ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry,
 		context, param->timestamp);
 
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 	kgsl_context_put(context);
 
 	return ret;
@@ -3744,7 +3756,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -3828,7 +3840,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -4476,7 +4488,7 @@ kgsl_gpumem_vm_fault(struct vm_fault *vmf)
 
 	ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
 	if ((ret == 0) || (ret == VM_FAULT_NOPAGE))
-		entry->priv->gpumem_mapped += PAGE_SIZE;
+		atomic_long_add(PAGE_SIZE, &entry->priv->gpumem_mapped);
 
 	return ret;
 }
@@ -4490,7 +4502,7 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma)
 		return;
 
 	entry->memdesc.useraddr = 0;
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 }
 
 static const struct vm_operations_struct kgsl_gpumem_vm_ops = {
@@ -4858,6 +4870,8 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
 			vm_insert_page(vma, addr, page);
 			addr += PAGE_SIZE;
 		}
+		atomic_long_add(m->size, &m->mapsize);
+		atomic_long_add(m->size, &entry->priv->gpumem_mapped);
 	}
 
 	vma->vm_file = file;
@@ -5165,6 +5179,12 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
 }
 EXPORT_SYMBOL(kgsl_device_platform_remove);
 
+static void
+_flush_mem_workqueue(struct work_struct *work)
+{
+	flush_workqueue(kgsl_driver.mem_workqueue);
+}
+
 static void kgsl_core_exit(void)
 {
 	kgsl_events_exit();
@@ -5266,6 +5286,8 @@ static int __init kgsl_core_init(void)
 	kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
 		WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 
+	INIT_WORK(&kgsl_driver.mem_work, _flush_mem_workqueue);
+
 	kthread_init_worker(&kgsl_driver.worker);
 
 	kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 0900100..e263c31 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -120,6 +120,7 @@ struct kgsl_context;
  * @full_cache_threshold: the threshold that triggers a full cache flush
  * @workqueue: Pointer to a single threaded workqueue
  * @mem_workqueue: Pointer to a workqueue for deferring memory entries
+ * @mem_work: Work struct to schedule mem_workqueue flush
  */
 struct kgsl_driver {
 	struct cdev cdev;
@@ -150,6 +151,7 @@ struct kgsl_driver {
 	unsigned int full_cache_threshold;
 	struct workqueue_struct *workqueue;
 	struct workqueue_struct *mem_workqueue;
+	struct work_struct mem_work;
 	struct kthread_worker worker;
 	struct task_struct *worker_thread;
 };
@@ -218,7 +220,7 @@ struct kgsl_memdesc {
 	uint64_t gpuaddr;
 	phys_addr_t physaddr;
 	uint64_t size;
-	uint64_t mapsize;
+	atomic_long_t mapsize;
 	unsigned int priv;
 	struct sg_table *sgt;
 	struct kgsl_memdesc_ops *ops;
@@ -425,6 +427,7 @@ long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data);
 
 void kgsl_mem_entry_destroy(struct kref *kref);
+void kgsl_mem_entry_destroy_deferred(struct kref *kref);
 
 void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
 			int *egl_surface_count, int *egl_image_count);
@@ -542,6 +545,21 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
 		kref_put(&entry->refcount, kgsl_mem_entry_destroy);
 }
 
+/**
+ * kgsl_mem_entry_put_deferred - Puts refcount and triggers deferred
+ *  mem_entry destroy when refcount goes to zero.
+ * @entry: memory entry to be put.
+ *
+ * Use this to put a memory entry when we don't want to block
+ * the caller while destroying memory entry.
+ */
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+	if (entry)
+		kref_put(&entry->refcount, kgsl_mem_entry_destroy_deferred);
+}
+
 /*
  * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
  * @gpuaddr1: Start of first address range
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 43ba370..6943c09 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -158,12 +158,13 @@ static int print_mem_entry(void *data, void *ptr)
 		kgsl_get_egl_counts(entry, &egl_surface_count,
 						&egl_image_count);
 
-	seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d",
+	seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16ld %6d %6d",
 			(uint64_t *)(uintptr_t) m->gpuaddr,
 			(unsigned long *) m->useraddr,
 			m->size, entry->id, flags,
 			memtype_str(usermem_type),
-			usage, (m->sgt ? m->sgt->nents : 0), m->mapsize,
+			usage, (m->sgt ? m->sgt->nents : 0),
+			atomic_long_read(&m->mapsize),
 			egl_surface_count, egl_image_count);
 
 	if (entry->metadata[0] != 0)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 927418e..97746b8 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -451,10 +451,10 @@ struct kgsl_process_private {
 	struct kobject kobj;
 	struct dentry *debug_root;
 	struct {
-		uint64_t cur;
+		atomic_long_t cur;
 		uint64_t max;
 	} stats[KGSL_MEM_ENTRY_MAX];
-	uint64_t gpumem_mapped;
+	atomic_long_t gpumem_mapped;
 	struct idr syncsource_idr;
 	spinlock_t syncsource_lock;
 	int fd_count;
@@ -549,9 +549,10 @@ struct kgsl_device *kgsl_get_device(int dev_idx);
 static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
 	unsigned int type, uint64_t size)
 {
-	priv->stats[type].cur += size;
-	if (priv->stats[type].max < priv->stats[type].cur)
-		priv->stats[type].max = priv->stats[type].cur;
+	u64 ret = atomic_long_add_return(size, &priv->stats[type].cur);
+
+	if (ret > priv->stats[type].max)
+		priv->stats[type].max = ret;
 	add_mm_counter(current->mm, MM_UNRECLAIMABLE, (size >> PAGE_SHIFT));
 }
 
@@ -562,7 +563,7 @@ static inline void kgsl_process_sub_stats(struct kgsl_process_private *priv,
 	struct task_struct *task;
 	struct mm_struct *mm;
 
-	priv->stats[type].cur -= size;
+	atomic_long_sub(size, &priv->stats[type].cur);
 	pid_struct = find_get_pid(priv->pid);
 	if (pid_struct) {
 		task = get_pid_task(pid_struct, PIDTYPE_PID);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index a818997..435f325 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -239,7 +239,7 @@ static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
 
 	case GMU_NONCACHED_USER:
 		/* Set start address for first uncached user alloc */
-		if (next_uncached_kernel_alloc == 0)
+		if (next_uncached_user_alloc == 0)
 			next_uncached_user_alloc = gmu->vma[mem_type].start;
 
 		if (addr == 0)
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index 7e38399..26a283a 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -302,7 +302,7 @@ int gmu_core_dev_wait_for_lowest_idle(struct kgsl_device *device)
 	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
 
 	if (ops && ops->wait_for_lowest_idle)
-		ops->wait_for_lowest_idle(device);
+		return ops->wait_for_lowest_idle(device);
 
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 864d103..cf82890 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -265,7 +265,12 @@ static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
 	unsigned int mask, unsigned int timeout_ms)
 {
 	unsigned int val;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+	u64 ao_pre_poll, ao_post_poll;
+
+	ao_pre_poll = gmu_core_dev_read_ao_counter(device);
 
 	while (time_is_after_jiffies(timeout)) {
 		adreno_read_gmureg(adreno_dev, offset_name, &val);
@@ -274,11 +279,16 @@ static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
 		usleep_range(10, 100);
 	}
 
+	ao_post_poll = gmu_core_dev_read_ao_counter(device);
+
 	/* Check one last time */
 	adreno_read_gmureg(adreno_dev, offset_name, &val);
 	if ((val & mask) == expected_val)
 		return 0;
 
+	dev_err(&gmu->pdev->dev, "kgsl hfi poll timeout: always on: %lld ms\n",
+		div_u64((ao_post_poll - ao_pre_poll) * 52, USEC_PER_SEC));
+
 	return -ETIMEDOUT;
 }
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 977884d..61c6eb0e 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1581,8 +1581,6 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
 
 	ctx->default_pt = mmu->defaultpagetable;
 
-	kgsl_iommu_enable_clk(mmu);
-
 	sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
 
 	/*
@@ -1605,7 +1603,6 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
 		sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
 	}
 	KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
-	kgsl_iommu_disable_clk(mmu);
 
 	return 0;
 }
@@ -1654,6 +1651,21 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
 	int status;
 	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
 
+	kgsl_iommu_enable_clk(mmu);
+
+	status = _setup_user_context(mmu);
+	if (status) {
+		kgsl_iommu_disable_clk(mmu);
+		return status;
+	}
+
+	status = _setup_secure_context(mmu);
+	if (status) {
+		_detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
+		kgsl_iommu_disable_clk(mmu);
+		return status;
+	}
+
 	/* Set the following registers only when the MMU type is QSMMU */
 	if (mmu->subtype != KGSL_IOMMU_SMMU_V500) {
 		/* Enable hazard check from GPU_SMMU_HUM_CFG */
@@ -1666,18 +1678,9 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
 		wmb();
 	}
 
-	status = _setup_user_context(mmu);
-	if (status)
-		return status;
-
-	status = _setup_secure_context(mmu);
-	if (status) {
-		_detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
-		return status;
-	}
-
 	/* Make sure the hardware is programmed to the default pagetable */
 	kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
+	kgsl_iommu_disable_clk(mmu);
 	set_bit(KGSL_MMU_STARTED, &mmu->flags);
 	return 0;
 }
@@ -2443,13 +2446,37 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
 	return ret;
 }
 
+static int get_gpuaddr(struct kgsl_pagetable *pagetable,
+		struct kgsl_memdesc *memdesc, u64 start, u64 end,
+		u64 size, unsigned int align)
+{
+	u64 addr;
+	int ret;
+
+	spin_lock(&pagetable->lock);
+	addr = _get_unmapped_area(pagetable, start, end, size, align);
+	if (addr == (u64) -ENOMEM) {
+		spin_unlock(&pagetable->lock);
+		return -ENOMEM;
+	}
+
+	ret = _insert_gpuaddr(pagetable, addr, size);
+	spin_unlock(&pagetable->lock);
+
+	if (ret == 0) {
+		memdesc->gpuaddr = addr;
+		memdesc->pagetable = pagetable;
+	}
+
+	return ret;
+}
 
 static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
 		struct kgsl_memdesc *memdesc)
 {
 	struct kgsl_iommu_pt *pt = pagetable->priv;
 	int ret = 0;
-	uint64_t addr, start, end, size;
+	u64 start, end, size;
 	unsigned int align;
 
 	if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
@@ -2479,23 +2506,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
 	if (kgsl_memdesc_is_secured(memdesc))
 		start += secure_global_size;
 
-	spin_lock(&pagetable->lock);
-
-	addr = _get_unmapped_area(pagetable, start, end, size, align);
-
-	if (addr == (uint64_t) -ENOMEM) {
-		ret = -ENOMEM;
-		goto out;
+	ret = get_gpuaddr(pagetable, memdesc, start, end, size, align);
+	/* if OoM, retry once after flushing mem_wq */
+	if (ret == -ENOMEM) {
+		flush_workqueue(kgsl_driver.mem_workqueue);
+		ret = get_gpuaddr(pagetable, memdesc, start, end, size, align);
 	}
 
-	ret = _insert_gpuaddr(pagetable, addr, size);
-	if (ret == 0) {
-		memdesc->gpuaddr = addr;
-		memdesc->pagetable = pagetable;
-	}
-
-out:
-	spin_unlock(&pagetable->lock);
 	return ret;
 }
 
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 7077d5d..d5a7f5e 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -7,6 +7,7 @@
 #include <linux/highmem.h>
 #include <linux/of.h>
 #include <linux/scatterlist.h>
+#include <linux/swap.h>
 
 #include "kgsl_device.h"
 #include "kgsl_pool.h"
@@ -23,6 +24,7 @@
  * @reserved_pages: Number of pages reserved at init for the pool
  * @allocation_allowed: Tells if reserved pool gets exhausted, can we allocate
  * from system memory
+ * @max_pages: Limit on number of pages this pool can hold
  * @list_lock: Spinlock for page list in the pool
  * @page_list: List of pages held/reserved in this pool
  */
@@ -31,6 +33,7 @@ struct kgsl_page_pool {
 	int page_count;
 	unsigned int reserved_pages;
 	bool allocation_allowed;
+	unsigned int max_pages;
 	spinlock_t list_lock;
 	struct list_head page_list;
 };
@@ -435,7 +438,7 @@ void kgsl_pool_free_page(struct page *page)
 	if (!kgsl_pool_max_pages ||
 			(kgsl_pool_size_total() < kgsl_pool_max_pages)) {
 		pool = _kgsl_get_pool_from_order(page_order);
-		if (pool != NULL) {
+		if (pool && (pool->page_count < pool->max_pages)) {
 			_kgsl_pool_add_page(pool, page);
 			return;
 		}
@@ -466,11 +469,14 @@ bool kgsl_pool_avaialable(int page_size)
 static void kgsl_pool_reserve_pages(void)
 {
 	int i, j;
+	unsigned int page_count;
 
 	for (i = 0; i < kgsl_num_pools; i++) {
 		struct page *page;
 
-		for (j = 0; j < kgsl_pools[i].reserved_pages; j++) {
+		page_count = min_t(unsigned int, kgsl_pools[i].max_pages,
+				kgsl_pools[i].reserved_pages);
+		for (j = 0; j < page_count; j++) {
 			int order = kgsl_pools[i].pool_order;
 			gfp_t gfp_mask = kgsl_gfp_mask(order);
 
@@ -502,6 +508,9 @@ static unsigned long
 kgsl_pool_shrink_count_objects(struct shrinker *shrinker,
 					struct shrink_control *sc)
 {
+	/* Trigger mem_workqueue flush to free memory */
+	kgsl_schedule_work(&kgsl_driver.mem_work);
+
 	/* Return total pool size as everything in pool can be freed */
 	return kgsl_pool_size_total();
 }
@@ -515,7 +524,7 @@ static struct shrinker kgsl_pool_shrinker = {
 };
 
 static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
-		bool allocation_allowed)
+		bool allocation_allowed, unsigned int max_pages)
 {
 #ifdef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
 	if (order > 0) {
@@ -530,6 +539,7 @@ static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
 	kgsl_pools[kgsl_num_pools].pool_order = order;
 	kgsl_pools[kgsl_num_pools].reserved_pages = reserved_pages;
 	kgsl_pools[kgsl_num_pools].allocation_allowed = allocation_allowed;
+	kgsl_pools[kgsl_num_pools].max_pages = max_pages;
 	spin_lock_init(&kgsl_pools[kgsl_num_pools].list_lock);
 	INIT_LIST_HEAD(&kgsl_pools[kgsl_num_pools].page_list);
 	kgsl_num_pools++;
@@ -538,7 +548,7 @@ static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
 static void kgsl_of_parse_mempools(struct device_node *node)
 {
 	struct device_node *child;
-	unsigned int page_size, reserved_pages = 0;
+	unsigned int page_size, reserved_pages = 0, max_pages = UINT_MAX;
 	bool allocation_allowed;
 
 	for_each_child_of_node(node, child) {
@@ -560,16 +570,32 @@ static void kgsl_of_parse_mempools(struct device_node *node)
 		allocation_allowed = of_property_read_bool(child,
 				"qcom,mempool-allocate");
 
+		of_property_read_u32(child, "qcom,mempool-max-pages",
+				&max_pages);
+
 		kgsl_pool_config(ilog2(page_size >> PAGE_SHIFT), reserved_pages,
-				allocation_allowed);
+				allocation_allowed, max_pages);
 	}
 }
 
 static void kgsl_of_get_mempools(struct device_node *parent)
 {
-	struct device_node *node;
+	struct device_node *node = NULL;
 
-	node = of_find_compatible_node(parent, NULL, "qcom,gpu-mempools");
+	/*
+	 * If available memory is less than 2GB first
+	 * check for low memory pool configuration. If
+	 * low memory configuration is not specified
+	 * then fallback to default pool configuration.
+	 */
+	if (totalram_pages < (SZ_2G >> PAGE_SHIFT))
+		node = of_find_compatible_node(parent, NULL,
+				"qcom,gpu-mempools-lowmem");
+
+	if (node == NULL)
+		node = of_find_compatible_node(parent, NULL,
+				"qcom,gpu-mempools");
+
 	if (node != NULL) {
 		/* Get Max pages limit for mempool */
 		of_property_read_u32(node, "qcom,mempool-max-pages",
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ddf61de..e60d1a5 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -311,6 +311,151 @@ void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
 }
 EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
 
+#if IS_ENABLED(CONFIG_QCOM_CX_IPEAK)
+static int kgsl_pwrctrl_cx_ipeak_vote(struct kgsl_device *device,
+		u64 old_freq, u64 new_freq)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) {
+		struct gpu_cx_ipeak_client *ipeak_client =
+				&pwr->gpu_ipeak_client[i];
+
+		/*
+		 * Set CX Ipeak vote for GPU if it tries to cross
+		 * threshold frequency.
+		 */
+		if (old_freq < ipeak_client->freq &&
+				new_freq >= ipeak_client->freq) {
+			ret = cx_ipeak_update(ipeak_client->client, true);
+			/*
+			 * Hardware damage is possible at peak current
+			 * if mitigation not done to limit peak power.
+			 */
+			if (ret) {
+				dev_err(device->dev,
+					"ipeak voting failed for client%d: %d\n",
+						i, ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void kgsl_pwrctrl_cx_ipeak_unvote(struct kgsl_device *device,
+		u64 old_freq, u64 new_freq)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) {
+		struct gpu_cx_ipeak_client *ipeak_client =
+				&pwr->gpu_ipeak_client[i];
+
+		/*
+		 * Reset CX Ipeak vote for GPU if it goes below
+		 * threshold frequency.
+		 */
+		if (old_freq >= ipeak_client->freq &&
+				new_freq < ipeak_client->freq) {
+			ret = cx_ipeak_update(ipeak_client->client, false);
+
+			/* Failed to withdraw the voting from ipeak driver */
+			if (ret)
+				dev_err(device->dev,
+					"Failed to withdraw ipeak vote for client%d: %d\n",
+					i, ret);
+		}
+	}
+}
+
+static int kgsl_pwrctrl_cx_ipeak_init(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct device_node *node, *child;
+	struct gpu_cx_ipeak_client *cx_ipeak_client;
+	int i = 0, ret;
+
+	node = of_get_child_by_name(device->pdev->dev.of_node,
+				"qcom,gpu-cx-ipeak");
+
+	if (node == NULL)
+		return 0;
+
+	for_each_child_of_node(node, child) {
+		if (i >= ARRAY_SIZE(pwr->gpu_ipeak_client)) {
+			dev_err(device->dev,
+				"dt: too many CX ipeak clients defined\n",
+					i);
+			ret = -EINVAL;
+			of_node_put(child);
+			goto error;
+		}
+
+		cx_ipeak_client = &pwr->gpu_ipeak_client[i];
+
+		if (!of_property_read_u32(child, "qcom,gpu-cx-ipeak-freq",
+				&cx_ipeak_client->freq)) {
+			cx_ipeak_client->client =
+				cx_ipeak_register(child, "qcom,gpu-cx-ipeak");
+
+			if (IS_ERR_OR_NULL(cx_ipeak_client->client)) {
+				ret = IS_ERR(cx_ipeak_client->client) ?
+				PTR_ERR(cx_ipeak_client->client) : -EINVAL;
+				dev_err(device->dev,
+					"Failed to register client%d with CX Ipeak %d\n",
+					i, ret);
+			}
+		} else {
+			ret = -EINVAL;
+			dev_err(device->dev,
+				"Failed to get GPU-CX-Ipeak client%d frequency\n",
+				i);
+		}
+
+		if (ret) {
+			of_node_put(child);
+			goto error;
+		}
+
+		++i;
+	}
+
+	of_node_put(node);
+	return 0;
+
+error:
+	for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) {
+		if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[i].client)) {
+			cx_ipeak_unregister(pwr->gpu_ipeak_client[i].client);
+			pwr->gpu_ipeak_client[i].client = NULL;
+		}
+	}
+
+	of_node_put(node);
+	return ret;
+}
+#else
+static int kgsl_pwrctrl_cx_ipeak_vote(struct kgsl_device *device,
+		u64 old_freq, u64 new_freq)
+{
+	return 0;
+}
+
+static void kgsl_pwrctrl_cx_ipeak_unvote(struct kgsl_device *device,
+		u64 old_freq, u64 new_freq)
+{
+}
+
+static int kgsl_pwrctrl_cx_ipeak_init(struct kgsl_device *device)
+{
+	return 0;
+}
+#endif
+
 /**
  * kgsl_pwrctrl_pwrlevel_change_settings() - Program h/w during powerlevel
  * transitions
@@ -425,6 +570,17 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
 		!test_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags))
 		return;
 
+	/*
+	 * If new freq is equal or above CX Ipeak threshold set the vote
+	 * first before switching to new freq to allow CX Ipeak driver
+	 * to trigger required mitigation, if necessary for safe switch
+	 * to new GPU freq.
+	 */
+	if (kgsl_pwrctrl_cx_ipeak_vote(device,
+			pwr->pwrlevels[old_level].gpu_freq,
+			pwr->pwrlevels[new_level].gpu_freq))
+		return;
+
 	kgsl_pwrscale_update_stats(device);
 
 	/*
@@ -490,6 +646,15 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
 	/* Timestamp the frequency change */
 	device->pwrscale.freq_change_time = ktime_to_ms(ktime_get());
 
+	/*
+	 * If new freq is below CX Ipeak threshold remove the GPU vote
+	 * here after switching to new freq. Its done after switching
+	 * to ensure that we are below CX Ipeak threshold before
+	 * removing the GPU vote.
+	 */
+	kgsl_pwrctrl_cx_ipeak_unvote(device,
+			pwr->pwrlevels[old_level].gpu_freq,
+			pwr->pwrlevels[new_level].gpu_freq);
 }
 EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
 
@@ -1959,6 +2124,14 @@ static inline void _close_pcl(struct kgsl_pwrctrl *pwr)
 	pwr->pcl = 0;
 }
 
+static void _close_gpu_cfg(struct kgsl_pwrctrl *pwr)
+{
+	if (pwr->gpu_cfg)
+		msm_bus_scale_unregister_client(pwr->gpu_cfg);
+
+	pwr->gpu_cfg = 0;
+}
+
 static inline void _close_regulators(struct kgsl_pwrctrl *pwr)
 {
 	int i;
@@ -2032,7 +2205,9 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 	int i, k, m, n = 0, result, freq;
 	struct platform_device *pdev = device->pdev;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct device_node *gpu_cfg_node;
 	struct msm_bus_scale_pdata *bus_scale_table;
+	struct msm_bus_scale_pdata *gpu_cfg_table;
 	struct device_node *gpubw_dev_node = NULL;
 	struct platform_device *p2dev;
 
@@ -2109,6 +2284,23 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 
 	pm_runtime_enable(&pdev->dev);
 
+	gpu_cfg_node =
+		of_find_node_by_name(device->pdev->dev.of_node,
+			"qcom,cpu-to-gpu-cfg-path");
+	if (gpu_cfg_node) {
+		gpu_cfg_table =
+			msm_bus_pdata_from_node(device->pdev, gpu_cfg_node);
+		if (gpu_cfg_table)
+			pwr->gpu_cfg =
+				msm_bus_scale_register_client(gpu_cfg_table);
+
+		if (!pwr->gpu_cfg) {
+			result = -EINVAL;
+			goto error_disable_pm;
+		}
+	}
+
+
 	/* Check if gpu bandwidth vote device is defined in dts */
 	if (pwr->bus_control)
 		/* Check if gpu bandwidth vote device is defined in dts */
@@ -2133,7 +2325,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 		pwr->pcl = msm_bus_scale_register_client(bus_scale_table);
 		if (pwr->pcl == 0) {
 			result = -EINVAL;
-			goto error_disable_pm;
+			goto error_cleanup_gpu_cfg;
 		}
 	}
 
@@ -2184,6 +2376,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 		}
 	}
 
+	result = kgsl_pwrctrl_cx_ipeak_init(device);
+	if (result)
+		goto error_cleanup_bus_ib;
+
 	INIT_WORK(&pwr->thermal_cycle_ws, kgsl_thermal_cycle);
 	timer_setup(&pwr->thermal_timer, kgsl_thermal_timer, 0);
 
@@ -2199,8 +2395,12 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 
 	return result;
 
+error_cleanup_bus_ib:
+	kfree(pwr->bus_ib);
 error_cleanup_pcl:
 	_close_pcl(pwr);
+error_cleanup_gpu_cfg:
+	_close_gpu_cfg(pwr);
 error_disable_pm:
 	pm_runtime_disable(&pdev->dev);
 error_cleanup_regulators:
@@ -2213,6 +2413,14 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 void kgsl_pwrctrl_close(struct kgsl_device *device)
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) {
+		if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[i].client)) {
+			cx_ipeak_unregister(pwr->gpu_ipeak_client[i].client);
+			pwr->gpu_ipeak_client[i].client = NULL;
+		}
+	}
 
 	pwr->power_flags = 0;
 
@@ -2225,6 +2433,8 @@ void kgsl_pwrctrl_close(struct kgsl_device *device)
 
 	_close_pcl(pwr);
 
+	_close_gpu_cfg(pwr);
+
 	pm_runtime_disable(&device->pdev->dev);
 
 	_close_regulators(pwr);
@@ -2251,8 +2461,9 @@ void kgsl_idle_check(struct work_struct *work)
 
 	requested_state = device->requested_state;
 
-	if (device->state == KGSL_STATE_ACTIVE
-		   || device->state ==  KGSL_STATE_NAP) {
+	if ((requested_state != KGSL_STATE_NONE) &&
+		(device->state == KGSL_STATE_ACTIVE
+			|| device->state ==  KGSL_STATE_NAP)) {
 
 		if (!atomic_read(&device->active_cnt)) {
 			spin_lock(&device->submit_lock);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 0f4dc72..15addf7 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -7,6 +7,7 @@
 
 #include <linux/clk.h>
 #include <linux/pm_qos.h>
+#include <soc/qcom/cx_ipeak.h>
 
 /*****************************************************************************
  * power flags
@@ -18,6 +19,10 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
+#define KGSL_GPU_CFG_PATH_OFF	0
+#define KGSL_GPU_CFG_PATH_LOW	1
+#define KGSL_GPU_CFG_PATH_HIGH	2
+
 #define KGSL_MAX_CLKS 17
 #define KGSL_MAX_REGULATORS 2
 
@@ -114,6 +119,16 @@ struct kgsl_regulator {
 };
 
 /**
+ * struct gpu_cx_ipeak_client - Struct holding CX Ipeak client info.
+ * @client:    Client handle used for CX Ipeak vote
+ * @freq:      GPU frequency threshold for which this client need to vote.
+ */
+struct gpu_cx_ipeak_client {
+	struct cx_ipeak_client *client;
+	unsigned int freq;
+};
+
+/**
  * struct kgsl_pwrctrl - Power control settings for a KGSL device
  * @interrupt_num - The interrupt number for the device
  * @grp_clks - Array of clocks structures that we control
@@ -133,6 +148,7 @@ struct kgsl_regulator {
  * @clock_times - Each GPU frequency's accumulated active time in us
  * @regulators - array of pointers to kgsl_regulator structs
  * @pcl - bus scale identifier
+ * @gpu_cfg - CPU to GPU AHB path bus scale identifier
  * @irq_name - resource name for the IRQ
  * @clk_stats - structure of clock statistics
  * @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs
@@ -164,6 +180,7 @@ struct kgsl_regulator {
  * isense_clk_indx - index of isense clock, 0 if no isense
  * isense_clk_on_level - isense clock rate is XO rate below this level.
  * tzone_name - pointer to thermal zone name of GPU temperature sensor
+ * gpu_cx_ipeak_client - CX Ipeak clients used by GPU
  */
 
 struct kgsl_pwrctrl {
@@ -190,6 +207,7 @@ struct kgsl_pwrctrl {
 	u64 clock_times[KGSL_MAX_PWRLEVELS];
 	struct kgsl_regulator regulators[KGSL_MAX_REGULATORS];
 	uint32_t pcl;
+	uint32_t gpu_cfg;
 	const char *irq_name;
 	struct kgsl_clk_stats clk_stats;
 	unsigned int l2pc_cpus_mask;
@@ -221,6 +239,7 @@ struct kgsl_pwrctrl {
 	unsigned int gpu_bimc_int_clk_freq;
 	bool gpu_bimc_interface_enabled;
 	const char *tzone_name;
+	struct gpu_cx_ipeak_client gpu_ipeak_client[2];
 };
 
 int kgsl_pwrctrl_init(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 0a22259..e93d565 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -166,18 +166,21 @@ static ssize_t
 gpumem_mapped_show(struct kgsl_process_private *priv,
 				int type, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%llu\n",
-			priv->gpumem_mapped);
+	return scnprintf(buf, PAGE_SIZE, "%ld\n",
+			atomic_long_read(&priv->gpumem_mapped));
 }
 
 static ssize_t
 gpumem_unmapped_show(struct kgsl_process_private *priv, int type, char *buf)
 {
-	if (priv->gpumem_mapped > priv->stats[type].cur)
+	u64 gpumem_total = atomic_long_read(&priv->stats[type].cur);
+	u64 gpumem_mapped = atomic_long_read(&priv->gpumem_mapped);
+
+	if (gpumem_mapped > gpumem_total)
 		return -EIO;
 
 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
-			priv->stats[type].cur - priv->gpumem_mapped);
+			gpumem_total - gpumem_mapped);
 }
 
 static struct kgsl_mem_entry_attribute debug_memstats[] = {
@@ -194,7 +197,8 @@ static struct kgsl_mem_entry_attribute debug_memstats[] = {
 static ssize_t
 mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", priv->stats[type].cur);
+	return scnprintf(buf, PAGE_SIZE, "%ld\n",
+			atomic_long_read(&priv->stats[type].cur));
 }
 
 /**
@@ -465,7 +469,7 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
 		get_page(page);
 		vmf->page = page;
 
-		memdesc->mapsize += PAGE_SIZE;
+		atomic_long_add(PAGE_SIZE, &memdesc->mapsize);
 
 		return 0;
 	}
@@ -647,7 +651,7 @@ static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
 	else if (ret == -EFAULT)
 		return VM_FAULT_SIGBUS;
 
-	memdesc->mapsize += PAGE_SIZE;
+	atomic_long_add(PAGE_SIZE, &memdesc->mapsize);
 
 	return VM_FAULT_NOPAGE;
 }
@@ -885,6 +889,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 	unsigned int pcount = 0;
 	size_t len;
 	unsigned int align;
+	bool memwq_flush_done = false;
 
 	static DEFINE_RATELIMIT_STATE(_rs,
 					DEFAULT_RATELIMIT_INTERVAL,
@@ -960,6 +965,13 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			if (page_count == -EAGAIN)
 				continue;
 
+			/* if OoM, retry once after flushing mem_wq */
+			if (page_count == -ENOMEM && !memwq_flush_done) {
+				flush_workqueue(kgsl_driver.mem_workqueue);
+				memwq_flush_done = true;
+				continue;
+			}
+
 			/*
 			 * Update sglen and memdesc size,as requested allocation
 			 * not served fully. So that they can be correctly freed
diff --git a/drivers/hid/hid-qvr.c b/drivers/hid/hid-qvr.c
index 5741e4d..6df6010 100644
--- a/drivers/hid/hid-qvr.c
+++ b/drivers/hid/hid-qvr.c
@@ -105,6 +105,7 @@ struct qvr_external_sensor {
 
 static DECLARE_WAIT_QUEUE_HEAD(wq);
 static struct qvr_external_sensor qvr_external_sensor;
+static uint8_t DEBUG_ORIENTATION;
 
 static int read_calibration_len(void)
 {
@@ -237,7 +238,6 @@ static int control_imu_stream(bool status)
 	return -ETIME;
 }
 
-
 static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
 {
 	struct qvr_external_sensor *sensor = &qvr_external_sensor;
@@ -254,7 +254,32 @@ static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
 
 	memcpy((void *)&imuData, (void *)message,
 		sizeof(struct external_imu_format));
-
+	if (!sensor->ts_base) {
+		if (imuData.gNumerator == 1 && imuData.aNumerator == 1)
+			DEBUG_ORIENTATION = 1;
+		else
+			DEBUG_ORIENTATION = 0;
+		pr_debug("qvr msize = %d reportID=%d padding=%d\n"
+			"qvr version=%d numImu=%d nspip=%d pSize=%d\n"
+			"qvr imuID=%d sampleID=%d temp=%d\n",
+			msize, imuData.reportID, imuData.padding,
+			imuData.version, imuData.numIMUs,
+			imuData.numSamplesPerImuPacket,
+			imuData.totalPayloadSize, imuData.imuID,
+			imuData.sampleID, imuData.temperature);
+		pr_debug("qvr gts0=%llu num=%d denom=%d\n"
+			"qvr gx0=%d gy0=%d gz0=%d\n",
+			imuData.gts0, imuData.gNumerator, imuData.gDenominator,
+			imuData.gx0, imuData.gy0, imuData.gz0);
+		pr_debug("qvr ats0=%llu num=%d denom=%d\n"
+			"qvr ax0=%d ay0=%d az0=%d\n",
+			imuData.ats0, imuData.aNumerator, imuData.aDenominator,
+			imuData.ax0, imuData.ay0, imuData.az0);
+		pr_debug("qvr mts0=%llu num=%d denom=%d\n"
+			"mx0=%d my0=%d mz0=%d\n",
+			imuData.mts0, imuData.mNumerator, imuData.mDenominator,
+			imuData.mx0, imuData.my0, imuData.mz0);
+	}
 	if (!sensor->ts_base)
 		sensor->ts_base = ktime_to_ns(ktime_get_boottime());
 	if (!sensor->ts_offset)
@@ -276,15 +301,28 @@ static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
 	else
 		data->mts = data->ats;
 	data->gts = data->ats;
-	data->ax = -imuData.ax0;
-	data->ay = imuData.ay0;
-	data->az = -imuData.az0;
-	data->gx = -imuData.gx0;
-	data->gy = imuData.gy0;
-	data->gz = -imuData.gz0;
-	data->mx = -imuData.my0;
-	data->my = -imuData.mx0;
-	data->mz = -imuData.mz0;
+
+	if (DEBUG_ORIENTATION == 1) {
+		data->ax = -imuData.ax0;
+		data->ay = imuData.ay0;
+		data->az = -imuData.az0;
+		data->gx = -imuData.gx0;
+		data->gy = imuData.gy0;
+		data->gz = -imuData.gz0;
+		data->mx = -imuData.my0;
+		data->my = -imuData.mx0;
+		data->mz = -imuData.mz0;
+	} else {
+		data->ax = -imuData.ay0;
+		data->ay = -imuData.ax0;
+		data->az = -imuData.az0;
+		data->gx = -imuData.gy0;
+		data->gy = -imuData.gx0;
+		data->gz = -imuData.gz0;
+		data->mx = -imuData.my0;
+		data->my = -imuData.mx0;
+		data->mz = -imuData.mz0;
+	}
 
 	trace_qvr_recv_sensor("gyro", data->gts, data->gx, data->gy, data->gz);
 	trace_qvr_recv_sensor("accel", data->ats, data->ax, data->ay, data->az);
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index e0d023719..775f363 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -83,6 +83,7 @@ struct csr_drvdata {
 	bool			set_byte_cntr_support;
 	bool			timestamp_support;
 	bool			enable_flush;
+	bool			aodbg_csr_support;
 };
 
 static LIST_HEAD(csr_list);
@@ -297,6 +298,7 @@ static ssize_t timestamp_show(struct device *dev,
 	uint32_t val, time_val0, time_val1;
 	int ret;
 	unsigned long flags;
+	unsigned long csr_ts_offset = 0;
 
 	struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
@@ -305,6 +307,9 @@ static ssize_t timestamp_show(struct device *dev,
 		return 0;
 	}
 
+	if (drvdata->aodbg_csr_support)
+		csr_ts_offset = 0x14;
+
 	ret = clk_prepare_enable(drvdata->clk);
 	if (ret)
 		return ret;
@@ -312,16 +317,16 @@ static ssize_t timestamp_show(struct device *dev,
 	spin_lock_irqsave(&drvdata->spin_lock, flags);
 	CSR_UNLOCK(drvdata);
 
-	val = csr_readl(drvdata, CSR_TIMESTAMPCTRL);
+	val = csr_readl(drvdata, CSR_TIMESTAMPCTRL - csr_ts_offset);
 
 	val  = val & ~BIT(0);
-	csr_writel(drvdata, val, CSR_TIMESTAMPCTRL);
+	csr_writel(drvdata, val, CSR_TIMESTAMPCTRL - csr_ts_offset);
 
 	val  = val | BIT(0);
-	csr_writel(drvdata, val, CSR_TIMESTAMPCTRL);
+	csr_writel(drvdata, val, CSR_TIMESTAMPCTRL - csr_ts_offset);
 
-	time_val0 = csr_readl(drvdata, CSR_QDSSTIMEVAL0);
-	time_val1 = csr_readl(drvdata, CSR_QDSSTIMEVAL1);
+	time_val0 = csr_readl(drvdata, CSR_QDSSTIMEVAL0 - csr_ts_offset);
+	time_val1 = csr_readl(drvdata, CSR_QDSSTIMEVAL1 - csr_ts_offset);
 
 	CSR_LOCK(drvdata);
 	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
@@ -468,6 +473,13 @@ static int csr_probe(struct platform_device *pdev)
 	else
 		dev_dbg(dev, "timestamp_support operation supported\n");
 
+	drvdata->aodbg_csr_support = of_property_read_bool(pdev->dev.of_node,
+						"qcom,aodbg-csr-support");
+	if (!drvdata->aodbg_csr_support)
+		dev_dbg(dev, "aodbg_csr_support operation not supported\n");
+	else
+		dev_dbg(dev, "aodbg_csr_support operation supported\n");
+
 	if (drvdata->usb_bam_support)
 		drvdata->flushperiod = FLUSHPERIOD_2048;
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a0365e2..a1213c0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2091,16 +2091,16 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
 	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
 			      name, offset)
 
-coresight_etm4x_reg(trcpdcr, TRCPDCR);
-coresight_etm4x_reg(trcpdsr, TRCPDSR);
-coresight_etm4x_reg(trclsr, TRCLSR);
-coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_reg(trcdevid, TRCDEVID);
-coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_reg(trcpidr0, TRCPIDR0);
-coresight_etm4x_reg(trcpidr1, TRCPIDR1);
-coresight_etm4x_reg(trcpidr2, TRCPIDR2);
-coresight_etm4x_reg(trcpidr3, TRCPIDR3);
+coresight_etm4x_cross_read(trcpdcr, TRCPDCR);
+coresight_etm4x_cross_read(trcpdsr, TRCPDSR);
+coresight_etm4x_cross_read(trclsr, TRCLSR);
+coresight_etm4x_cross_read(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_cross_read(trcdevid, TRCDEVID);
+coresight_etm4x_cross_read(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_cross_read(trcpidr0, TRCPIDR0);
+coresight_etm4x_cross_read(trcpidr1, TRCPIDR1);
+coresight_etm4x_cross_read(trcpidr2, TRCPIDR2);
+coresight_etm4x_cross_read(trcpidr3, TRCPIDR3);
 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index f74f00e..a7adfd7 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1029,12 +1029,11 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
 	tmc_sync_etr_buf(drvdata);
 }
 
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
 {
 	CS_UNLOCK(drvdata->base);
 
-	if (flush)
-		tmc_flush_and_stop(drvdata);
+	tmc_flush_and_stop(drvdata);
 	/*
 	 * When operating in sysFS mode the content of the buffer needs to be
 	 * read before the TMC is disabled.
@@ -1458,7 +1457,7 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 	return -EINVAL;
 }
 
-static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
+static void _tmc_disable_etr_sink(struct coresight_device *csdev)
 {
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -1482,10 +1481,10 @@ static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
 				goto out;
 			} else {
 				usb_qdss_close(drvdata->usbch);
-				tmc_etr_disable_hw(drvdata, flush);
+				tmc_etr_disable_hw(drvdata);
 			}
 		} else {
-			tmc_etr_disable_hw(drvdata, flush);
+			tmc_etr_disable_hw(drvdata);
 		}
 		drvdata->mode = CS_MODE_DISABLED;
 	}
@@ -1524,7 +1523,7 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 	mutex_lock(&drvdata->mem_lock);
-	_tmc_disable_etr_sink(csdev, true);
+	_tmc_disable_etr_sink(csdev);
 	mutex_unlock(&drvdata->mem_lock);
 }
 
@@ -1553,12 +1552,14 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 		return 0;
 	}
 
-	_tmc_disable_etr_sink(drvdata->csdev, false);
+	coresight_disable_all_source_link();
+	_tmc_disable_etr_sink(drvdata->csdev);
 	old_mode = drvdata->out_mode;
 	drvdata->out_mode = new_mode;
 	if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) {
 		drvdata->out_mode = old_mode;
 		tmc_enable_etr_sink_sysfs(drvdata->csdev);
+		coresight_enable_all_source_link();
 		dev_err(drvdata->dev, "Switch to %s failed. Fall back to %s.\n",
 			str_tmc_etr_out_mode[new_mode],
 			str_tmc_etr_out_mode[old_mode]);
@@ -1566,6 +1567,7 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
 		return -EINVAL;
 	}
 
+	coresight_enable_all_source_link();
 	mutex_unlock(&drvdata->mem_lock);
 	return 0;
 }
@@ -1622,7 +1624,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 		coresight_disable_all_source_link();
 		spin_lock_irqsave(&drvdata->spinlock, flags);
 
-		tmc_etr_disable_hw(drvdata, true);
+		tmc_etr_disable_hw(drvdata);
 	}
 	drvdata->reading = true;
 out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 22b9fbc..0f93cfe 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -312,7 +312,7 @@ void tmc_free_etr_buf(struct etr_buf *etr_buf);
 void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
 void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
 void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
 void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
 		  struct usb_qdss_ch *ch);
 int tmc_etr_bam_init(struct amba_device *adev,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index c9ef2cc..e22df19 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -21,6 +21,7 @@
 #include <linux/msm_gpi.h>
 #include <linux/ioctl.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
 #define SE_I2C_RX_TRANS_LEN		(0x270)
@@ -74,12 +75,19 @@
 
 #define I2C_TIMEOUT_MIN_USEC	500000
 
+#define MAX_SE	20
+
 enum i2c_se_mode {
 	UNINITIALIZED,
 	FIFO_SE_DMA,
 	GSI_ONLY,
 };
 
+struct dbg_buf_ctxt {
+	void *virt_buf;
+	void *map_buf;
+};
+
 struct geni_i2c_dev {
 	struct device *dev;
 	void __iomem *base;
@@ -118,8 +126,13 @@ struct geni_i2c_dev {
 	enum i2c_se_mode se_mode;
 	bool cmd_done;
 	bool is_shared;
+	u32 dbg_num;
+	struct dbg_buf_ctxt *dbg_buf_ptr;
 };
 
+static struct geni_i2c_dev *gi2c_dev_dbg[MAX_SE];
+static int arr_idx;
+
 struct geni_i2c_err_log {
 	int err;
 	const char *msg;
@@ -209,12 +222,6 @@ static inline void qcom_geni_i2c_calc_timeout(struct geni_i2c_dev *gi2c)
 
 static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
 {
-	if (gi2c->cur)
-		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
-			"len:%d, slv-addr:0x%x, RD/WR:%d timeout:%u\n",
-			gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags,
-			gi2c->xfer_timeout);
-
 	if (err == I2C_NACK || err == GENI_ABORT_DONE) {
 		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n",
 			     gi2c_log[err].msg);
@@ -223,8 +230,6 @@ static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
 		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, "%s\n",
 			     gi2c_log[err].msg);
 	}
-	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s: se-mode:%d\n", __func__,
-							gi2c->se_mode);
 	geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
 err_ret:
 	gi2c->err = gi2c_log[err].err;
@@ -241,7 +246,13 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 	u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
 	struct i2c_msg *cur = gi2c->cur;
 
-	if (!cur || (m_stat & M_CMD_FAILURE_EN) ||
+	if (!cur) {
+		geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
+		GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev, "Spurious irq\n");
+		goto irqret;
+	}
+
+	if ((m_stat & M_CMD_FAILURE_EN) ||
 		    (dm_rx_st & (DM_I2C_CB_ERR)) ||
 		    (m_stat & M_CMD_CANCEL_EN) ||
 		    (m_stat & M_CMD_ABORT_EN)) {
@@ -268,12 +279,6 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 		goto irqret;
 	}
 
-	if (dma) {
-		dev_dbg(gi2c->dev, "i2c dma tx:0x%x, dma rx:0x%x\n", dm_tx_st,
-			dm_rx_st);
-		goto irqret;
-	}
-
 	if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
 		(m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
 		u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
@@ -444,6 +449,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			goto geni_i2c_gsi_xfer_out;
 		}
 	}
+
 	if (!gi2c->rx_c) {
 		gi2c->rx_c = dma_request_slave_channel(gi2c->dev, "rx");
 		if (!gi2c->rx_c) {
@@ -559,6 +565,8 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 						  sizeof(gi2c->go_t));
 
 		if (msgs[i].flags & I2C_M_RD) {
+			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+				"msg[%d].len:%d R\n", i, gi2c->cur->len);
 			sg_init_table(&gi2c->rx_sg, 1);
 			ret = geni_se_iommu_map_buf(rx_dev, &gi2c->rx_ph,
 						dma_buf, msgs[i].len,
@@ -571,6 +579,11 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 								false);
 				goto geni_i2c_gsi_xfer_out;
 
+			} else if (gi2c->dbg_buf_ptr) {
+				gi2c->dbg_buf_ptr[i].virt_buf =
+							(void *)dma_buf;
+				gi2c->dbg_buf_ptr[i].map_buf =
+							(void *)&gi2c->rx_ph;
 			}
 			gi2c->rx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->rx_ph);
@@ -601,6 +614,8 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			rx_cookie = dmaengine_submit(gi2c->rx_desc);
 			dma_async_issue_pending(gi2c->rx_c);
 		} else {
+			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+				"msg[%d].len:%d W\n", i, gi2c->cur->len);
 			ret = geni_se_iommu_map_buf(tx_dev, &gi2c->tx_ph,
 						dma_buf, msgs[i].len,
 						DMA_TO_DEVICE);
@@ -612,7 +627,13 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 								false);
 				goto geni_i2c_gsi_xfer_out;
 
+			} else if (gi2c->dbg_buf_ptr) {
+				gi2c->dbg_buf_ptr[i].virt_buf =
+							(void *)dma_buf;
+				gi2c->dbg_buf_ptr[i].map_buf =
+							(void *)&gi2c->tx_ph;
 			}
+
 			gi2c->tx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->tx_ph);
 			gi2c->tx_t.dword[1] =
@@ -656,9 +677,9 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 						gi2c->xfer_timeout);
 		if (!timeout) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
-				    "GSI Txn timed out: %u len: %d slv:addr: 0x%x R/W: %d\n",
-					gi2c->xfer_timeout, gi2c->cur->len,
-					gi2c->cur->addr, gi2c->cur->flags);
+				"I2C gsi xfer timeout:%u flags:%d addr:0x%x\n",
+				gi2c->xfer_timeout, gi2c->cur->flags,
+				gi2c->cur->addr);
 			geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
 						gi2c->ipcl);
 			gi2c->err = -ETIMEDOUT;
@@ -693,8 +714,15 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 	int i, ret = 0, timeout = 0;
 
 	gi2c->err = 0;
-	gi2c->cur = &msgs[0];
 	reinit_completion(&gi2c->xfer);
+
+	/* Client to respect system suspend */
+	if (!pm_runtime_enabled(gi2c->dev)) {
+		GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
+			"%s: System suspended\n", __func__);
+		return -EACCES;
+	}
+
 	ret = pm_runtime_get_sync(gi2c->dev);
 	if (ret < 0) {
 		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
@@ -704,6 +732,18 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		pm_runtime_set_suspended(gi2c->dev);
 		return ret;
 	}
+
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+		"n:%d addr:0x%x\n", num, msgs[0].addr);
+
+	gi2c->dbg_num = num;
+	kfree(gi2c->dbg_buf_ptr);
+	gi2c->dbg_buf_ptr =
+		kcalloc(num, sizeof(struct dbg_buf_ctxt), GFP_KERNEL);
+	if (!gi2c->dbg_buf_ptr)
+		GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
+			"Buf logging pointer not available\n");
+
 	if (gi2c->se_mode == GSI_ONLY) {
 		ret = geni_i2c_gsi_xfer(adap, msgs, num);
 		goto geni_i2c_txn_ret;
@@ -712,9 +752,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		gi2c->is_shared = false;
 	}
 
-	qcom_geni_i2c_conf(gi2c, 0);
-	dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
-				num, msgs[0].len, msgs[0].flags);
 	for (i = 0; i < num; i++) {
 		int stretch = (i < (num - 1));
 		u32 m_param = 0;
@@ -748,9 +785,8 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		}
 
 		if (msgs[i].flags & I2C_M_RD) {
-			dev_dbg(gi2c->dev,
-				"READ,n:%d,i:%d len:%d, stretch:%d\n",
-					num, i, msgs[i].len, stretch);
+			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+				"msgs[%d].len:%d R\n", i, msgs[i].len);
 			geni_write_reg(msgs[i].len,
 				       gi2c->base, SE_I2C_RX_TRANS_LEN);
 			m_cmd = I2C_READ;
@@ -765,12 +801,16 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 					mode = FIFO_MODE;
 					ret = geni_se_select_mode(gi2c->base,
 								  mode);
+				} else if (gi2c->dbg_buf_ptr) {
+					gi2c->dbg_buf_ptr[i].virt_buf =
+								(void *)dma_buf;
+					gi2c->dbg_buf_ptr[i].map_buf =
+								(void *)&rx_dma;
 				}
 			}
 		} else {
-			dev_dbg(gi2c->dev,
-				"WRITE:n:%d,i:%d len:%d, stretch:%d, m_param:0x%x\n",
-					num, i, msgs[i].len, stretch, m_param);
+			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+				"msgs[%d].len:%d W\n", i, msgs[i].len);
 			geni_write_reg(msgs[i].len, gi2c->base,
 						SE_I2C_TX_TRANS_LEN);
 			m_cmd = I2C_WRITE;
@@ -785,6 +825,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 					mode = FIFO_MODE;
 					ret = geni_se_select_mode(gi2c->base,
 								  mode);
+				} else if (gi2c->dbg_buf_ptr) {
+					gi2c->dbg_buf_ptr[i].virt_buf =
+								(void *)dma_buf;
+					gi2c->dbg_buf_ptr[i].map_buf =
+								(void *)&tx_dma;
 				}
 			}
 			if (mode == FIFO_MODE) /* Get FIFO IRQ */
@@ -795,19 +840,23 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		mb();
 		timeout = wait_for_completion_timeout(&gi2c->xfer,
 						gi2c->xfer_timeout);
-		if (!timeout)
+		if (!timeout) {
+			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+				"I2C xfer timeout: %d\n", gi2c->xfer_timeout);
 			geni_i2c_err(gi2c, GENI_TIMEOUT);
+		}
 
 		if (gi2c->err) {
 			reinit_completion(&gi2c->xfer);
-			gi2c->cur = NULL;
 			geni_cancel_m_cmd(gi2c->base);
 			timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
-			if (!timeout)
+			if (!timeout) {
+				GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+					"Abort\n");
 				geni_abort_m_cmd(gi2c->base);
+			}
 		}
-		gi2c->cur_wr = 0;
-		gi2c->cur_rd = 0;
+
 		if (mode == SE_DMA) {
 			if (gi2c->err) {
 				reinit_completion(&gi2c->xfer);
@@ -825,9 +874,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 					      msgs[i].len);
 			i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
 		}
+
 		ret = gi2c->err;
 		if (gi2c->err) {
-			dev_err(gi2c->dev, "i2c error :%d\n", gi2c->err);
+			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+				"i2c error :%d\n", gi2c->err);
 			break;
 		}
 	}
@@ -837,9 +888,12 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 
 	pm_runtime_mark_last_busy(gi2c->dev);
 	pm_runtime_put_autosuspend(gi2c->dev);
+	gi2c->cur_wr = 0;
+	gi2c->cur_rd = 0;
 	gi2c->cur = NULL;
 	gi2c->err = 0;
-	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			"i2c txn ret:%d\n", ret);
 	return ret;
 }
 
@@ -865,6 +919,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
 	if (!gi2c)
 		return -ENOMEM;
 
+	if (arr_idx++ < MAX_SE)
+		/* Debug purpose */
+		gi2c_dev_dbg[arr_idx] = gi2c;
+
 	gi2c->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -901,14 +959,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
 		return ret;
 	}
-
 	gi2c->i2c_rsc.m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
 	if (IS_ERR(gi2c->i2c_rsc.m_ahb_clk)) {
 		ret = PTR_ERR(gi2c->i2c_rsc.m_ahb_clk);
 		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
 		return ret;
 	}
-
 	gi2c->i2c_rsc.s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
 	if (IS_ERR(gi2c->i2c_rsc.s_ahb_clk)) {
 		ret = PTR_ERR(gi2c->i2c_rsc.s_ahb_clk);
@@ -987,6 +1043,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
 				   gi2c->irq, ret);
 		return ret;
 	}
+
 	disable_irq(gi2c->irq);
 	i2c_set_adapdata(&gi2c->adap, gi2c);
 	gi2c->adap.dev.parent = gi2c->dev;
@@ -998,7 +1055,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
 	pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
 	pm_runtime_use_autosuspend(gi2c->dev);
 	pm_runtime_enable(gi2c->dev);
-	i2c_add_adapter(&gi2c->adap);
+	ret = i2c_add_adapter(&gi2c->adap);
+	if (ret) {
+		dev_err(gi2c->dev, "Add adapter failed\n");
+		return ret;
+	}
 
 	dev_dbg(gi2c->dev, "I2C probed\n");
 	return 0;
@@ -1017,6 +1078,9 @@ static int geni_i2c_remove(struct platform_device *pdev)
 
 static int geni_i2c_resume_noirq(struct device *device)
 {
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
+
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1034,6 +1098,7 @@ static int geni_i2c_runtime_suspend(struct device *dev)
 	} else {
 		se_geni_resources_off(&gi2c->i2c_rsc);
 	}
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1050,7 +1115,6 @@ static int geni_i2c_runtime_resume(struct device *dev)
 	}
 
 	ret = se_geni_resources_on(&gi2c->i2c_rsc);
-
 	if (ret)
 		return ret;
 
@@ -1070,23 +1134,27 @@ static int geni_i2c_runtime_resume(struct device *dev)
 			gi2c->se_mode = GSI_ONLY;
 			geni_se_select_mode(gi2c->base, GSI_DMA);
 			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
-				    "i2c in GSI ONLY mode\n");
+					"i2c GSI mode\n");
 		} else {
 			int gi2c_tx_depth = get_tx_fifo_depth(gi2c->base);
 
 			gi2c->se_mode = FIFO_SE_DMA;
-
 			gi2c->tx_wm = gi2c_tx_depth - 1;
 			geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
 			se_config_packing(gi2c->base, 8, 4, true);
+			qcom_geni_i2c_conf(gi2c, 0);
 			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
 				    "i2c fifo/se-dma mode. fifo depth:%d\n",
 				    gi2c_tx_depth);
 		}
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "i2c-%d: %s\n",
+			gi2c->adap.nr, dev_name(gi2c->dev));
 	}
+
 	if (gi2c->se_mode == FIFO_SE_DMA)
 		enable_irq(gi2c->irq);
 
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1103,6 +1171,8 @@ static int geni_i2c_suspend_noirq(struct device *device)
 		return -EBUSY;
 	}
 	if (!pm_runtime_status_suspended(device)) {
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			"%s\n", __func__);
 		geni_i2c_runtime_suspend(device);
 		pm_runtime_disable(device);
 		pm_runtime_set_suspended(device);
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index 7d3e7c5..d36280e 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -836,18 +836,14 @@ static int i3c_geni_execute_write_command
 static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
 {
 	struct i3c_master_controller *m = &gi3c->ctrlr;
-	struct i3c_bus *bus = i3c_master_get_bus(m);
-	u8 last_dyn_addr = 0;
 	int ret;
 
 	while (1) {
 		u8 rx_buf[8], tx_buf[8];
 		struct i3c_xfer_params xfer = { FIFO_MODE };
-		struct i3c_device_info info = { 0 };
-		struct i3c_dev_desc *i3cdev;
-		bool new_device = true;
+		struct i3c_dev_boardinfo *i3cboardinfo;
 		u64 pid;
-		u8 bcr, dcr, addr;
+		u8 bcr, dcr, init_dyn_addr = 0, addr = 0;
 
 		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
 			"i3c entdaa read\n");
@@ -869,31 +865,38 @@ static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
 			((u64)rx_buf[4] <<  8) |
 			((u64)rx_buf[5]);
 
-		i3c_bus_for_each_i3cdev(bus, i3cdev) {
-
-			if (!i3cdev->dev)
-				continue;
-
-			i3c_device_get_info(i3cdev->dev, &info);
-			if (pid == info.pid &&
-				dcr == info.dcr &&
-				bcr == info.bcr) {
-				new_device = false;
-				addr = (info.dyn_addr) ? info.dyn_addr :
-					info.static_addr;
+		list_for_each_entry(i3cboardinfo, &m->boardinfo.i3c, node) {
+			if (pid == i3cboardinfo->pid) {
+				GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+				"PID 0x:%x matched with boardinfo\n", pid);
 				break;
 			}
 		}
 
-		if (new_device) {
-			ret = i3c_master_get_free_addr(m,
-						last_dyn_addr + 1);
-			if (ret < 0)
-				goto daa_err;
-			addr = last_dyn_addr = (u8)ret;
-			set_new_addr_slot(gi3c->newaddrslots, addr);
+		if (i3cboardinfo)
+			addr = init_dyn_addr = i3cboardinfo->init_dyn_addr;
+
+		addr = ret = i3c_master_get_free_addr(m, addr);
+
+		if (ret < 0) {
+			GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"error during get_free_addr ret:%d for pid:0x:%x\n"
+				, ret, pid);
+			goto daa_err;
+		} else if (ret == init_dyn_addr) {
+			GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"assigning requested addr:0x%x for pid:0x:%x\n"
+				, ret, pid);
+		} else if (init_dyn_addr) {
+			GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"Can't assign req addr:0x%x for pid:0x%x assigning avl addr:0x%x\n"
+				, init_dyn_addr, pid, addr);
+		} else {
+			GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			"assigning addr:0x%x for pid:x:%x\n", ret, pid);
 		}
 
+		set_new_addr_slot(gi3c->newaddrslots, addr);
 		tx_buf[0] = (addr & I3C_ADDR_MASK) << 1;
 		tx_buf[0] |= ~(hweight8(addr & I3C_ADDR_MASK) & 1);
 
@@ -1138,6 +1141,7 @@ static int geni_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
 	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
 	struct geni_i3c_i2c_dev_data *data;
+	struct i3c_dev_boardinfo *i3cboardinfo;
 
 	data = devm_kzalloc(gi3c->se.dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
@@ -1145,6 +1149,12 @@ static int geni_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
 
 	data->ibi = -1;
 	i3c_dev_set_master_data(dev, data);
+	if (!dev->boardinfo) {
+		list_for_each_entry(i3cboardinfo, &m->boardinfo.i3c, node) {
+			if (dev->info.pid == i3cboardinfo->pid)
+				dev->boardinfo = i3cboardinfo;
+		}
+	}
 
 	return 0;
 }
@@ -1155,6 +1165,16 @@ static int geni_i3c_master_reattach_i3c_dev
 	u8 old_dyn_addr
 )
 {
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct i3c_dev_boardinfo *i3cboardinfo;
+
+	if (!dev->boardinfo) {
+		list_for_each_entry(i3cboardinfo, &m->boardinfo.i3c, node) {
+			if (dev->info.pid == i3cboardinfo->pid)
+				dev->boardinfo = i3cboardinfo;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 283b968..2accf6f 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -78,10 +78,10 @@
  * clock rate, fast average samples with no measurement in queue.
  * Set the timeout to a max of 100ms.
  */
-#define ADC_CONV_TIME_MIN_US			263
-#define ADC_CONV_TIME_MAX_US			264
-#define ADC_CONV_TIME_RETRY_POLL		570
-#define ADC_CONV_TIME_RETRY				190
+#define ADC_POLL_DELAY_MIN_US			10000
+#define ADC_POLL_DELAY_MAX_US			10001
+#define ADC_CONV_TIME_RETRY_POLL		40
+#define ADC_CONV_TIME_RETRY			30
 #define ADC_CONV_TIMEOUT			msecs_to_jiffies(100)
 
 /* CAL peripheral */
@@ -288,7 +288,7 @@ static int adc_poll_wait_eoc(struct adc_chip *adc, bool poll_only)
 		status1 &= ADC_USR_STATUS1_REQ_STS_EOC_MASK;
 		if (status1 == ADC_USR_STATUS1_EOC)
 			return 0;
-		usleep_range(ADC_CONV_TIME_MIN_US, ADC_CONV_TIME_MAX_US);
+		usleep_range(ADC_POLL_DELAY_MIN_US, ADC_POLL_DELAY_MAX_US);
 	}
 
 	return -ETIMEDOUT;
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c
index 9c965a2..858fa15 100644
--- a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c
@@ -3354,10 +3354,30 @@ static int syna_tcm_probe(struct platform_device *pdev)
 		goto err_get_regulator;
 	}
 
-	retval = synaptics_tcm_pinctrl_init(tcm_hcd);
+	retval = syna_tcm_enable_regulator(tcm_hcd, true);
 	if (retval < 0) {
-		LOGE(tcm_hcd->pdev->dev.parent, "Failed to init pinctrl\n");
-		goto err_pinctrl_init;
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable regulators\n");
+		goto err_enable_regulator;
+	}
+
+	retval = syna_tcm_config_gpio(tcm_hcd);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to configure GPIO's\n");
+		goto err_config_gpio;
+	}
+
+	retval = synaptics_tcm_pinctrl_init(tcm_hcd);
+	if (!retval && tcm_hcd->ts_pinctrl) {
+		retval = pinctrl_select_state(
+				tcm_hcd->ts_pinctrl,
+				tcm_hcd->pinctrl_state_active);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"%s: Failed to select %s pinstate %d\n",
+					__func__, PINCTRL_STATE_ACTIVE, retval);
+		}
 	}
 
 	sysfs_dir = kobject_create_and_add(PLATFORM_DRIVER_NAME,
@@ -3481,7 +3501,21 @@ static int syna_tcm_probe(struct platform_device *pdev)
 	kobject_put(tcm_hcd->sysfs_dir);
 
 err_sysfs_create_dir:
-err_pinctrl_init:
+	if (bdata->irq_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->reset_gpio, false, 0, 0);
+
+err_config_gpio:
+	syna_tcm_enable_regulator(tcm_hcd, false);
+
+err_enable_regulator:
+	syna_tcm_get_regulator(tcm_hcd, false);
+
 err_get_regulator:
 	device_init_wakeup(&pdev->dev, 0);
 
@@ -3502,33 +3536,8 @@ static int syna_tcm_probe(struct platform_device *pdev)
 static int syna_tcm_deferred_probe(struct device *dev)
 {
 	int retval;
-	const struct syna_tcm_board_data *bdata;
 	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
 
-	retval = pinctrl_select_state(
-			tcm_hcd->ts_pinctrl,
-			tcm_hcd->pinctrl_state_active);
-
-	if (retval < 0) {
-		LOGE(tcm_hcd->pdev->dev.parent,
-				"Failed to pinctrl_select_state\n");
-		goto err_pinctrl_select_state;
-	}
-
-	retval = syna_tcm_enable_regulator(tcm_hcd, true);
-	if (retval < 0) {
-		LOGE(tcm_hcd->pdev->dev.parent,
-				"Failed to enable regulators\n");
-		goto err_enable_regulator;
-	}
-
-	retval = syna_tcm_config_gpio(tcm_hcd);
-	if (retval < 0) {
-		LOGE(tcm_hcd->pdev->dev.parent,
-				"Failed to configure GPIO's\n");
-		goto err_config_gpio;
-	}
-
 	retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
 	if (retval < 0) {
 		LOGE(tcm_hcd->pdev->dev.parent,
@@ -3559,27 +3568,6 @@ static int syna_tcm_deferred_probe(struct device *dev)
 #endif
 err_enable_irq:
 
-err_config_gpio:
-	syna_tcm_enable_regulator(tcm_hcd, false);
-
-err_enable_regulator:
-	syna_tcm_get_regulator(tcm_hcd, false);
-
-err_pinctrl_select_state:
-	if (!tcm_hcd->hw_if || !tcm_hcd->hw_if->bdata)
-		return -EINVAL;
-
-	bdata = tcm_hcd->hw_if->bdata;
-
-	if (bdata->irq_gpio >= 0)
-		syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio, false, 0, 0);
-
-	if (bdata->power_gpio >= 0)
-		syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio, false, 0, 0);
-
-	if (bdata->reset_gpio >= 0)
-		syna_tcm_set_gpio(tcm_hcd, bdata->reset_gpio, false, 0, 0);
-
 	return retval;
 }
 
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c
index 88af3f8..809d9a8f2 100644
--- a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c
@@ -86,8 +86,8 @@ static int parse_dt(struct device *dev, struct syna_tcm_board_data *bdata)
 	retval = of_property_read_u32(np, "synaptics,power-on-state",
 			&bdata->power_on_state);
 	if (retval < 0) {
-		LOGE(dev, "Failed to read synaptics,power-on-state\n");
-		return retval;
+		LOGD(dev, "Failed to read synaptics,power-on-state\n");
+		bdata->power_on_state = 0;
 	}
 
 	retval = of_property_read_u32(np, "synaptics,power-delay-ms",
@@ -101,7 +101,7 @@ static int parse_dt(struct device *dev, struct syna_tcm_board_data *bdata)
 			"synaptics,reset-gpio", 0, NULL);
 	if (!gpio_is_valid(retval)) {
 		if (retval != -EPROBE_DEFER)
-			dev_err(dev, "Error getting irq_gpio\n");
+			dev_err(dev, "Error getting reset gpio\n");
 		return retval;
 	}
 	bdata->reset_gpio = retval;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ef2e6a2..fbdbdae 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -86,7 +86,7 @@
 #define ARM_MMU500_ACR_SMTNMB_TLBEN	(1 << 8)
 
 #define TLB_LOOP_TIMEOUT		500000	/* 500ms */
-#define TLB_SPIN_COUNT			10
+#define TLB_LOOP_INC_MAX		1000      /*1ms*/
 
 #define ARM_SMMU_IMPL_DEF0(smmu) \
 	((smmu)->base + (2 * (1 << (smmu)->pgshift)))
@@ -1280,16 +1280,17 @@ static void __arm_smmu_tlb_sync_timeout(struct arm_smmu_device *smmu)
 static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
 			void __iomem *sync, void __iomem *status)
 {
-	unsigned int spin_cnt, delay;
+	unsigned int inc, delay;
 
 	writel_relaxed(QCOM_DUMMY_VAL, sync);
-	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
-		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
-			if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
-				return 0;
-			cpu_relax();
-		}
-		udelay(delay);
+	for (delay = 1, inc = 1; delay < TLB_LOOP_TIMEOUT; delay += inc) {
+		if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
+			return 0;
+
+		cpu_relax();
+		udelay(inc);
+		if (inc < TLB_LOOP_INC_MAX)
+			inc *= 2;
 	}
 	trace_tlbsync_timeout(smmu->dev, 0);
 	__arm_smmu_tlb_sync_timeout(smmu);
@@ -3255,8 +3256,13 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 				  prot, &size);
 		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
 
-
 		if (ret == -ENOMEM) {
+			/* unmap any partially mapped iova */
+			if (size) {
+				arm_smmu_secure_domain_unlock(smmu_domain);
+				arm_smmu_unmap(domain, iova, size);
+				arm_smmu_secure_domain_lock(smmu_domain);
+			}
 			arm_smmu_prealloc_memory(smmu_domain,
 						 batch_size, &nonsecure_pool);
 			spin_lock_irqsave(&smmu_domain->cb_lock, flags);
@@ -3271,8 +3277,8 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 							 &nonsecure_pool);
 		}
 
-		/* Returns 0 on error */
-		if (!ret) {
+		/* Returns -ve val on error */
+		if (ret < 0) {
 			size_to_unmap = iova + size - __saved_iova_start;
 			goto out;
 		}
@@ -3280,16 +3286,17 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 		iova += batch_size;
 		idx_start = idx_end;
 		sg_start = sg_end;
+		size = 0;
 	}
 
 out:
 	arm_smmu_assign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 
 	if (size_to_unmap) {
 		arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
 		iova = __saved_iova_start;
 	}
-	arm_smmu_secure_domain_unlock(smmu_domain);
 	return iova - __saved_iova_start;
 }
 
@@ -3881,6 +3888,12 @@ static int __arm_smmu_domain_set_attr(struct iommu_domain *domain,
 		break;
 	}
 	case DOMAIN_ATTR_SECURE_VMID:
+		/* can't be changed while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+
 		if (smmu_domain->secure_vmid != VMID_INVAL) {
 			ret = -ENODEV;
 			WARN(1, "secure vmid already set!");
@@ -3894,6 +3907,12 @@ static int __arm_smmu_domain_set_attr(struct iommu_domain *domain,
 		 * force DOMAIN_ATTR_ATOMIC to bet set.
 		 */
 	case DOMAIN_ATTR_FAST:
+		/* can't be changed while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+
 		if (*((int *)data)) {
 			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
 			smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
@@ -5971,6 +5990,9 @@ static ssize_t arm_smmu_debug_tcu_testbus_sel_write(struct file *file,
 	if (kstrtou64(buf, 0, &sel))
 		goto invalid_format;
 
+	if (sel != 1 && sel != 2)
+		goto invalid_format;
+
 	if (kstrtou64(comma + 1, 0, &val))
 		goto invalid_format;
 
@@ -5982,8 +6004,6 @@ static ssize_t arm_smmu_debug_tcu_testbus_sel_write(struct file *file,
 	else if (sel == 2)
 		arm_smmu_debug_tcu_testbus_select(base,
 				tcu_base, PTW_AND_CACHE_TESTBUS, WRITE, val);
-	else
-		goto invalid_format;
 
 	arm_smmu_power_off(smmu->pwr);
 
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7b65083..f9d3df9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -316,6 +316,48 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 }
 EXPORT_SYMBOL(iommu_dma_init_domain);
 
+/*
+ * Should be called prior to using dma-apis
+ */
+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+			   u64 size)
+{
+	struct iommu_domain *domain;
+	struct iova_domain *iovad;
+	unsigned long pfn_lo, pfn_hi;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain || !domain->iova_cookie)
+		return -EINVAL;
+
+	iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+
+	/* iova will be freed automatically by put_iova_domain() */
+	pfn_lo = iova_pfn(iovad, base);
+	pfn_hi = iova_pfn(iovad, base + size - 1);
+	if (!reserve_iova(iovad, pfn_lo, pfn_hi))
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Should be called prior to using dma-apis.
+ */
+int iommu_dma_enable_best_fit_algo(struct device *dev)
+{
+	struct iommu_domain *domain;
+	struct iova_domain *iovad;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain || !domain->iova_cookie)
+		return -EINVAL;
+
+	iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+	iovad->best_fit = true;
+	return 0;
+}
+
 /**
  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
  *                    page flags.
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 2fcc75f..53ff42a 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -75,6 +75,8 @@ static phys_addr_t __atomic_get_phys(void *addr)
 
 static bool __in_atomic_pool(void *start, size_t size)
 {
+	if (!atomic_pool)
+		return false;
 	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
 }
 
@@ -301,7 +303,7 @@ static bool __bit_is_sooner(unsigned long candidate,
 	return true;
 }
 
-
+#ifdef CONFIG_ARM64
 static int __init atomic_pool_init(void)
 {
 	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -363,6 +365,7 @@ static int __init atomic_pool_init(void)
 	return -ENOMEM;
 }
 arch_initcall(atomic_pool_init);
+#endif
 
 static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
 				  dma_addr_t iova, size_t size)
@@ -769,7 +772,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
 	*handle = DMA_ERROR_CODE;
 	size = ALIGN(size, SZ_4K);
 
-	if (!gfpflags_allow_blocking(gfp))
+	if (atomic_pool && !gfpflags_allow_blocking(gfp))
 		return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle,
 					      is_coherent);
 	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 84b2706..aeccbf3b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -634,7 +634,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
 	arm_lpae_iopte prot;
 	struct scatterlist *s;
 	size_t mapped = 0;
-	int i, ret;
+	int i;
+	int ret = -EINVAL;
 	unsigned int min_pagesz;
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 	struct map_state ms;
@@ -705,7 +706,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
 out_err:
 	/* Return the size of the partial mapping so that they can be undone */
 	*size = mapped;
-	return 0;
+	return ret;
 }
 
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 6c62822..7f8c864 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -154,8 +154,8 @@ struct io_pgtable_cfg {
  *
  * @map:		Map a physically contiguous memory region.
  * @map_sg:		Map a scatterlist.  Returns the number of bytes mapped,
- *			or 0 on failure.  The size parameter contains the size
- *			of the partial mapping in case of failure.
+ *			or -ve val on failure.  The size parameter contains the
+ *			size of the partial mapping in case of failure.
  * @unmap:		Unmap a physically contiguous memory region.
  * @iova_to_phys:	Translate iova to physical address.
  * @is_iova_coherent:	Checks coherency of given IOVA. Returns True if coherent
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 74f0dbf..a27debb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -61,6 +61,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+	iovad->best_fit = false;
 	init_iova_rcaches(iovad);
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -248,6 +249,69 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	return 0;
 }
 
+static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
+		unsigned long size, unsigned long limit_pfn,
+			struct iova *new, bool size_aligned)
+{
+	struct rb_node *curr, *prev;
+	struct iova *curr_iova, *prev_iova;
+	unsigned long flags;
+	unsigned long align_mask = ~0UL;
+	struct rb_node *candidate_rb_parent;
+	unsigned long new_pfn, candidate_pfn = ~0UL;
+	unsigned long gap, candidate_gap = ~0UL;
+
+	if (size_aligned)
+		align_mask <<= limit_align(iovad, fls_long(size - 1));
+
+	/* Walk the tree backwards */
+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+	curr = &iovad->anchor.node;
+	prev = rb_prev(curr);
+	for (; prev; curr = prev, prev = rb_prev(curr)) {
+		curr_iova = rb_entry(curr, struct iova, node);
+		prev_iova = rb_entry(prev, struct iova, node);
+
+		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+		new_pfn = (limit_pfn - size) & align_mask;
+		gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
+		if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
+				&& (gap < candidate_gap)) {
+			candidate_gap = gap;
+			candidate_pfn = new_pfn;
+			candidate_rb_parent = curr;
+			if (gap == size)
+				goto insert;
+		}
+	}
+
+	curr_iova = rb_entry(curr, struct iova, node);
+	limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+	new_pfn = (limit_pfn - size) & align_mask;
+	gap = curr_iova->pfn_lo - iovad->start_pfn;
+	if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
+			gap < candidate_gap) {
+		candidate_gap = gap;
+		candidate_pfn = new_pfn;
+		candidate_rb_parent = curr;
+	}
+
+insert:
+	if (candidate_pfn == ~0UL) {
+		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+		return -ENOMEM;
+	}
+
+	/* pfn_lo will point to size aligned address if size_aligned is set */
+	new->pfn_lo = candidate_pfn;
+	new->pfn_hi = new->pfn_lo + size - 1;
+
+	/* If we have 'prev', it's a valid place to start the insertion. */
+	iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+	return 0;
+}
+
 static struct kmem_cache *iova_cache;
 static unsigned int iova_cache_users;
 static DEFINE_MUTEX(iova_cache_mutex);
@@ -323,8 +387,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
 	if (!new_iova)
 		return NULL;
 
-	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
-			new_iova, size_aligned);
+	if (iovad->best_fit) {
+		ret = __alloc_and_insert_iova_best_fit(iovad, size,
+				limit_pfn + 1, new_iova, size_aligned);
+	} else {
+		ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
+				new_iova, size_aligned);
+	}
 
 	if (ret) {
 		free_iova_mem(new_iova);
diff --git a/drivers/irqchip/qcom-mpm-bengal.c b/drivers/irqchip/qcom-mpm-bengal.c
index 9250a7a..1ea828f 100644
--- a/drivers/irqchip/qcom-mpm-bengal.c
+++ b/drivers/irqchip/qcom-mpm-bengal.c
@@ -11,5 +11,7 @@ const struct mpm_pin mpm_bengal_gic_chip_data[] = {
 	{86, 215}, /* mpm_wake,spmi_m */
 	{90, 292}, /* eud_p0_dpse_int_mx */
 	{91, 292}, /* eud_p0_dmse_int_mx */
+	{5, 328}, /* lpass_irq_out_sdc */
+	{24, 111}, /* bi_px_lpi_1_aoss_mx */
 	{-1},
 };
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index d90ca95..c984d88 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -391,7 +391,7 @@ static int msm_cvp_map_buf_user_persist(struct msm_cvp_inst *inst,
 		return -EINVAL;
 	}
 
-	if (in_buf->fd > 0) {
+	if (in_buf->fd >= 0) {
 		dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
 		if (!dma_buf) {
 			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
@@ -462,7 +462,7 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
 		return -EINVAL;
 	}
 
-	if (in_buf->fd > 0) {
+	if (in_buf->fd >= 0) {
 		dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
 		if (!dma_buf) {
 			dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
@@ -738,7 +738,7 @@ static int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
 			return -EINVAL;
 		}
 
-		if (new_buf->fd <= 0 && !new_buf->dbuf)
+		if ((new_buf->fd < 0 || new_buf->size == 0) && !new_buf->dbuf)
 			continue;
 
 		rc = msm_cvp_map_buf_user_persist(inst, new_buf, &iova);
@@ -817,7 +817,8 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
 				return -EINVAL;
 			}
 
-			if (new_buf->fd <= 0 && !new_buf->dbuf)
+			if ((new_buf->fd < 0 || new_buf->size == 0) &&
+				!new_buf->dbuf)
 				continue;
 
 			rc = msm_cvp_map_buf_cpu(inst, new_buf, &iova, frame);
diff --git a/drivers/media/platform/msm/cvp/msm_smem.c b/drivers/media/platform/msm/cvp/msm_smem.c
index d3ab2dd..a0a8496 100644
--- a/drivers/media/platform/msm/cvp/msm_smem.c
+++ b/drivers/media/platform/msm/cvp/msm_smem.c
@@ -205,7 +205,7 @@ int msm_cvp_smem_map_dma_buf(struct msm_cvp_inst *inst,
 		return rc;
 	}
 
-	if (smem->fd > 0) {
+	if (smem->fd >= 0) {
 		dbuf = msm_cvp_smem_get_dma_buf(smem->fd);
 		if (!dbuf) {
 			rc = -EINVAL;
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index bf90c3b..4b8866f 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -7,6 +7,7 @@
  * Includes
  * -------------------------------------------------------------------------
  */
+#include <dt-bindings/msm/msm-bus-ids.h>
 #include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
@@ -1583,8 +1584,9 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
 			sizeof(core_clks[i].clk_name));
 		core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name);
 		if (IS_ERR(core_clks[i].clk)) {
-			NPU_ERR("unable to get clk: %s\n", clock_name);
-			rc = -EINVAL;
+			if (PTR_ERR(core_clks[i].clk) != -EPROBE_DEFER)
+				NPU_ERR("unable to get clk: %s\n", clock_name);
+			rc = PTR_ERR(core_clks[i].clk);
 			break;
 		}
 
@@ -1645,15 +1647,15 @@ static int npu_parse_dt_regulator(struct npu_device *npu_dev)
 
 static int npu_parse_dt_bw(struct npu_device *npu_dev)
 {
-	int ret, len;
-	uint32_t ports[2];
+	int ret, len, num_paths, i;
+	uint32_t ports[MAX_PATHS * 2];
 	struct platform_device *pdev = npu_dev->pdev;
 	struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
 
 	if (of_find_property(pdev->dev.of_node, "qcom,src-dst-ports", &len)) {
 		len /= sizeof(ports[0]);
-		if (len != 2) {
-			NPU_ERR("Unexpected number of ports\n");
+		if (len % 2 || len > ARRAY_SIZE(ports)) {
+			NPU_ERR("Unexpected number of ports %d\n", len);
 			return -EINVAL;
 		}
 
@@ -1663,6 +1665,7 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
 			NPU_ERR("Failed to read bw property\n");
 			return ret;
 		}
+		num_paths = len / 2;
 	} else {
 		NPU_ERR("can't find bw property\n");
 		return -EINVAL;
@@ -1675,13 +1678,15 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
 	bwctrl->bw_data.name = dev_name(&pdev->dev);
 	bwctrl->bw_data.active_only = false;
 
-	bwctrl->bw_levels[0].vectors[0].src = ports[0];
-	bwctrl->bw_levels[0].vectors[0].dst = ports[1];
-	bwctrl->bw_levels[1].vectors[0].src = ports[0];
-	bwctrl->bw_levels[1].vectors[0].dst = ports[1];
-	bwctrl->bw_levels[0].num_paths = 1;
-	bwctrl->bw_levels[1].num_paths = 1;
-	bwctrl->num_paths = 1;
+	for (i = 0; i < num_paths; i++) {
+		bwctrl->bw_levels[0].vectors[i].src = ports[2 * i];
+		bwctrl->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+		bwctrl->bw_levels[1].vectors[i].src = ports[2 * i];
+		bwctrl->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+	}
+	bwctrl->bw_levels[0].num_paths = num_paths;
+	bwctrl->bw_levels[1].num_paths = num_paths;
+	bwctrl->num_paths = num_paths;
 
 	bwctrl->bus_client = msm_bus_scale_register_client(&bwctrl->bw_data);
 	if (!bwctrl->bus_client) {
@@ -1696,7 +1701,7 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
 
 int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
 {
-	int i, ret;
+	int i, j, ret;
 	struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
 
 	if (!bwctrl->bus_client) {
@@ -1709,10 +1714,17 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
 
 	i = (bwctrl->cur_idx + 1) % DBL_BUF;
 
-	bwctrl->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
-	bwctrl->bw_levels[i].vectors[0].ab = new_ab / bwctrl->num_paths * MBYTE;
-	bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
-	bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
+	for (j = 0; j < bwctrl->num_paths; j++) {
+		if ((bwctrl->bw_levels[i].vectors[j].dst ==
+			MSM_BUS_SLAVE_CLK_CTL) && (new_ib > 0)) {
+			bwctrl->bw_levels[i].vectors[j].ib = 1;
+			bwctrl->bw_levels[i].vectors[j].ab = 1;
+		} else {
+			bwctrl->bw_levels[i].vectors[j].ib = new_ib * MBYTE;
+			bwctrl->bw_levels[i].vectors[j].ab =
+				new_ab * MBYTE / bwctrl->num_paths;
+		}
+	}
 
 	ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
 	if (ret) {
@@ -1801,13 +1813,12 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
 		NPU_DBG("fmax %x\n", fmax);
 
 		switch (fmax) {
-		case 1:
-		case 2:
-			fmax_pwrlvl = NPU_PWRLEVEL_NOM;
-			break;
-		case 3:
+		case 0x34:
 			fmax_pwrlvl = NPU_PWRLEVEL_SVS_L1;
 			break;
+		case 0x48:
+			fmax_pwrlvl = NPU_PWRLEVEL_NOM;
+			break;
 		default:
 			fmax_pwrlvl = pwr->max_pwrlevel;
 			break;
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index cef5a96..15883db 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -144,6 +144,8 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
 			dst_off += 1;
 		}
 	}
+
+	__iowmb();
 }
 
 int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h
index 4e988b8..1d5f520 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu/npu_hw_access.h
@@ -19,9 +19,9 @@
 #define IPC_MEM_OFFSET_FROM_SSTCM 0x00018000
 #define SYS_CACHE_SCID 23
 
-#define QFPROM_FMAX_REG_OFFSET 0x000001C8
-#define QFPROM_FMAX_BITS_MASK  0x0000000C
-#define QFPROM_FMAX_BITS_SHIFT 2
+#define QFPROM_FMAX_REG_OFFSET 0x00006010
+#define QFPROM_FMAX_BITS_MASK  0x0003FC00
+#define QFPROM_FMAX_BITS_SHIFT 10
 
 #define REGW(npu_dev, off, val) npu_core_reg_write(npu_dev, off, val)
 #define REGR(npu_dev, off) npu_core_reg_read(npu_dev, off)
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 9941ce5..1b666e1 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -66,7 +66,7 @@ static int npu_notify_aop(struct npu_device *npu_dev, bool on);
 static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
 	uint32_t pwr_level, bool post);
 static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
-static void disable_fw_nolock(struct npu_device *npu_dev);
+static int disable_fw_nolock(struct npu_device *npu_dev);
 static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
 static void npu_queue_network_cmd(struct npu_network *network,
 	struct npu_network_cmd *cmd);
@@ -97,6 +97,7 @@ static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
 {
 	uint32_t reg_val = NPU_CPC_PWR_ON;
 	uint32_t wait_cnt = 0, max_wait_ms;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
 	max_wait_ms = NPU_FW_TIMEOUT_MS;
 
@@ -107,10 +108,16 @@ static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
 			break;
 		}
 
+		if ((host_ctx->wdg_irq_sts != 0) ||
+			(host_ctx->err_irq_sts != 0)) {
+			NPU_WARN("fw is in bad state, skip wait\n");
+			return -EIO;
+		}
+
 		wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
 		if (wait_cnt > max_wait_ms) {
 			NPU_ERR("timeout wait for cpc power off\n");
-			return -EPERM;
+			return -ETIMEDOUT;
 		}
 		msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
 	} while (1);
@@ -139,7 +146,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 	}
 
 	ret = wait_for_completion_timeout(
-		&host_ctx->npu_power_up_done, NW_CMD_TIMEOUT);
+		&host_ctx->npu_power_up_done, NW_PWR_UP_TIMEOUT);
 	if (!ret) {
 		NPU_ERR("Wait for npu powers up timed out\n");
 		ret = -ETIMEDOUT;
@@ -181,7 +188,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
 	}
 
 	ret = wait_for_completion_timeout(
-		&host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT);
+		&host_ctx->fw_shutdown_done, NW_RSC_TIMEOUT_MS);
 	if (!ret) {
 		NPU_ERR("Wait for fw shutdown timedout\n");
 		ret = -ETIMEDOUT;
@@ -327,17 +334,20 @@ static int enable_fw_nolock(struct npu_device *npu_dev)
 	reinit_completion(&host_ctx->fw_bringup_done);
 	ret = npu_notify_fw_pwr_state(npu_dev, npu_dev->pwrctrl.active_pwrlevel,
 		true);
-	if (ret) {
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("notify fw power state timed out\n");
+		goto enable_pw_fail;
+	} else if (ret) {
 		NPU_ERR("notify fw power state failed\n");
 		goto notify_fw_pwr_fail;
 	}
 
 	ret = wait_for_completion_timeout(
-		&host_ctx->fw_bringup_done, NW_CMD_TIMEOUT);
+		&host_ctx->fw_bringup_done, NW_RSC_TIMEOUT_MS);
 	if (!ret) {
 		NPU_ERR("Wait for fw bringup timedout\n");
 		ret = -ETIMEDOUT;
-		goto notify_fw_pwr_fail;
+		goto enable_pw_fail;
 	} else {
 		ret = 0;
 	}
@@ -375,17 +385,21 @@ int enable_fw(struct npu_device *npu_dev)
 	ret = enable_fw_nolock(npu_dev);
 	mutex_unlock(&host_ctx->lock);
 
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("Enable fw timedout, force SSR\n");
+		host_error_hdlr(npu_dev, true);
+	}
 	return ret;
 }
 
-static void disable_fw_nolock(struct npu_device *npu_dev)
+static int disable_fw_nolock(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int ret = 0;
 
 	if (!host_ctx->fw_ref_cnt) {
 		NPU_WARN("fw_ref_cnt is 0\n");
-		return;
+		return ret;
 	}
 
 	host_ctx->fw_ref_cnt--;
@@ -393,27 +407,36 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
 
 	if (host_ctx->fw_state != FW_ENABLED) {
 		NPU_ERR("fw is not enabled\n");
-		return;
+		return ret;
 	}
 
 	if (host_ctx->fw_ref_cnt > 0)
-		return;
+		return ret;
 
 	/* turn on auto ACK for warm shuts down */
 	npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, 3);
 	reinit_completion(&host_ctx->fw_shutdown_done);
-	if (npu_notify_fw_pwr_state(npu_dev, NPU_PWRLEVEL_OFF, false)) {
+	ret = npu_notify_fw_pwr_state(npu_dev, NPU_PWRLEVEL_OFF, false);
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("notify fw pwr off timed out\n");
+		goto fail;
+	} else if (ret) {
 		NPU_WARN("notify fw pwr off failed\n");
 		msleep(500);
 	}
 
 	if (!host_ctx->auto_pil_disable) {
 		ret = wait_for_completion_timeout(
-			&host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT);
-		if (!ret)
+			&host_ctx->fw_shutdown_done, NW_RSC_TIMEOUT_MS);
+		if (!ret) {
 			NPU_ERR("Wait for fw shutdown timedout\n");
-		else
+			ret = -ETIMEDOUT;
+			goto fail;
+		} else {
 			ret = wait_npu_cpc_power_off(npu_dev);
+			if (ret)
+				goto fail;
+		}
 	}
 
 	npu_disable_irq(npu_dev);
@@ -430,15 +453,24 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
 		host_ctx->fw_state = FW_UNLOADED;
 		NPU_DBG("fw is unloaded\n");
 	}
+
+fail:
+	return ret;
 }
 
 void disable_fw(struct npu_device *npu_dev)
 {
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+	int ret = 0;
 
 	mutex_lock(&host_ctx->lock);
-	disable_fw_nolock(npu_dev);
+	ret = disable_fw_nolock(npu_dev);
 	mutex_unlock(&host_ctx->lock);
+
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("disable fw timedout, force SSR\n");
+		host_error_hdlr(npu_dev, true);
+	}
 }
 
 /* notify fw current power level */
@@ -844,10 +876,11 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 
 	if (host_ctx->wdg_irq_sts) {
 		NPU_INFO("watchdog irq triggered\n");
-		npu_dump_debug_info(npu_dev);
 		fw_alive = false;
 	}
 
+	npu_dump_debug_info(npu_dev);
+
 	/*
 	 * if fw is still alive, notify fw before power off
 	 * otherwise if ssr happens or notify fw returns failure
@@ -874,13 +907,16 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	NPU_INFO("npu subsystem is restarted\n");
 
 	ret = wait_for_completion_timeout(
-		&host_ctx->npu_power_up_done, NW_CMD_TIMEOUT);
+		&host_ctx->npu_power_up_done, NW_PWR_UP_TIMEOUT);
 	if (!ret) {
 		NPU_ERR("Wait for npu powers up timed out\n");
 		ret = -ETIMEDOUT;
 		goto fw_start_done;
 	}
 
+	host_ctx->wdg_irq_sts = 0;
+	host_ctx->err_irq_sts = 0;
+
 	/* Keep reading ctrl status until NPU is ready */
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
@@ -907,8 +943,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
 	}
 
 	complete(&host_ctx->fw_deinit_done);
-	host_ctx->wdg_irq_sts = 0;
-	host_ctx->err_irq_sts = 0;
 
 	/* flush all pending npu cmds */
 	for (i = 0; i < MAX_LOADED_NETWORK; i++) {
@@ -1116,7 +1150,13 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
 		if (!wait_cnt) {
 			NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
 				status_bits, ctrl_sts, status_reg);
-			return -EPERM;
+			return -ETIMEDOUT;
+		}
+
+		if ((host_ctx->wdg_irq_sts != 0) ||
+			(host_ctx->err_irq_sts != 0)) {
+			NPU_WARN("fw is in bad state, skip wait\n");
+			return -EIO;
 		}
 
 		if (poll)
@@ -2211,12 +2251,11 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 			struct msm_npu_load_network_ioctl_v2 *load_ioctl,
 			struct msm_npu_patch_info_v2 *patch_info)
 {
-	int ret = 0, i;
+	int ret = 0, retry_cnt = 1, i;
 	struct npu_device *npu_dev = client->npu_dev;
 	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 	struct npu_network *network;
 	struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
-	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	struct npu_network_cmd *load_cmd = NULL;
 	uint32_t num_patch_params, pkt_size;
@@ -2305,6 +2344,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
+retry:
 	ret = wait_for_completion_timeout(
 		&load_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
@@ -2321,9 +2361,16 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
 			network->id, load_cmd->trans_id);
-		npu_dump_debug_info(npu_dev);
+		if (retry_cnt > 0) {
+			NPU_WARN("Retry IPC queue\n");
+			retry_cnt--;
+			mutex_unlock(&host_ctx->lock);
+			host_session_msg_hdlr(npu_dev);
+			goto retry;
+		}
+
 		ret = -ETIMEDOUT;
-		goto error_load_network;
+		goto free_load_cmd;
 	}
 
 	ret = load_cmd->ret_status;
@@ -2342,18 +2389,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 
 	return ret;
 
-error_load_network:
-	NPU_DBG("Unload network %lld\n", network->id);
-	/* send NPU_IPC_CMD_UNLOAD command to fw */
-	unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
-	unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
-	unload_packet.header.trans_id =
-		atomic_add_return(1, &host_ctx->ipc_trans_id);
-	unload_packet.header.flags = 0;
-	unload_packet.network_hdl = (uint32_t)network->network_hdl;
-	npu_send_network_cmd(npu_dev, network, &unload_packet, NULL);
-	/* wait 200 ms to make sure fw has processed this command */
-	msleep(200);
 free_load_cmd:
 	npu_dequeue_network_cmd(network, load_cmd);
 	npu_free_network_cmd(host_ctx, load_cmd);
@@ -2363,6 +2398,16 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 	free_network(host_ctx, client, network->id);
 err_deinit_fw:
 	mutex_unlock(&host_ctx->lock);
+
+	/*
+	 * treat load network timed out as error in order to
+	 * force SSR
+	 */
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("Error handling after load network failure\n");
+		host_error_hdlr(npu_dev, true);
+	}
+
 	disable_fw(npu_dev);
 	return ret;
 }
@@ -2370,7 +2415,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
 int32_t npu_host_unload_network(struct npu_client *client,
 			struct msm_npu_unload_network_ioctl *unload)
 {
-	int ret = 0;
+	int ret = 0, retry_cnt = 1;
 	struct npu_device *npu_dev = client->npu_dev;
 	struct ipc_cmd_unload_pkt unload_packet;
 	struct npu_network *network;
@@ -2443,6 +2488,7 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
+retry:
 	ret = wait_for_completion_timeout(
 		&unload_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
@@ -2459,7 +2505,14 @@ int32_t npu_host_unload_network(struct npu_client *client,
 	if (!ret) {
 		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
 			network->id, unload_cmd->trans_id);
-		npu_dump_debug_info(npu_dev);
+		if (retry_cnt > 0) {
+			NPU_WARN("Retry IPC queue\n");
+			retry_cnt--;
+			mutex_unlock(&host_ctx->lock);
+			host_session_msg_hdlr(npu_dev);
+			goto retry;
+		}
+
 		ret = -ETIMEDOUT;
 		goto free_unload_cmd;
 	}
@@ -2484,6 +2537,15 @@ int32_t npu_host_unload_network(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
+	/*
+	 * treat unload network timed out as error in order to
+	 * force SSR
+	 */
+	if (ret == -ETIMEDOUT) {
+		NPU_ERR("Error handling after load network failure\n");
+		host_error_hdlr(npu_dev, true);
+	}
+
 	disable_fw(npu_dev);
 
 	return ret;
@@ -2501,7 +2563,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	uint32_t num_patch_params, pkt_size;
 	bool async_ioctl = !!exec_ioctl->async;
-	int i;
+	int i, retry_cnt = 1;
 
 	mutex_lock(&host_ctx->lock);
 	network = get_network_by_hdl(host_ctx, client,
@@ -2601,6 +2663,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 
 	mutex_unlock(&host_ctx->lock);
 
+retry:
 	ret = wait_for_completion_timeout(
 		&exec_cmd->cmd_done,
 		(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
@@ -2616,7 +2679,14 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
 	if (!ret) {
 		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
 			network->id, exec_cmd->trans_id);
-		npu_dump_debug_info(npu_dev);
+		if (retry_cnt > 0) {
+			NPU_WARN("Retry IPC queue\n");
+			retry_cnt--;
+			mutex_unlock(&host_ctx->lock);
+			host_session_msg_hdlr(npu_dev);
+			goto retry;
+		}
+
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
 	}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 6a24c64..049552a 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -18,8 +18,12 @@
  * Defines
  * -------------------------------------------------------------------------
  */
-#define NW_CMD_TIMEOUT_MS (1000 * 5) /* set for 5 seconds */
+#define NW_RSC_TIMEOUT_MS (1000 * 5) /* set for 5 seconds */
+#define NW_RSC_TIMEOUT msecs_to_jiffies(NW_RSC_TIMEOUT_MS)
+#define NW_CMD_TIMEOUT_MS (1000 * 20) /* set for 20 seconds */
 #define NW_CMD_TIMEOUT msecs_to_jiffies(NW_CMD_TIMEOUT_MS)
+#define NW_PWR_UP_TIMEOUT_MS (1000 * 60) /* set for 60 seconds */
+#define NW_PWR_UP_TIMEOUT msecs_to_jiffies(NW_PWR_UP_TIMEOUT_MS)
 #define NW_DEBUG_TIMEOUT_MS (1000 * 60 * 30) /* set for 30 minutes */
 #define NW_DEBUG_TIMEOUT msecs_to_jiffies(NW_DEBUG_TIMEOUT_MS)
 #define NPU_MBOX_IDLE_TIMEOUT_MS 500 /* set for 500ms */
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index 7809130..eb467c0 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -29,9 +29,9 @@ void synx_external_callback(s32 sync_obj, int status, void *data)
 	}
 
 	if (row) {
-		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_lock(&synx_dev->row_locks[row->index]);
 		row->signaling_id = sync_obj;
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 
 		pr_debug("signaling synx 0x%x from external callback %d\n",
 			synx_obj, sync_obj);
@@ -138,23 +138,23 @@ int synx_register_callback(s32 synx_obj,
 	if (!row || !cb_func)
 		return -EINVAL;
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 
-	state = synx_status_locked(row);
+	state = synx_status(row);
 	/* do not register if callback registered earlier */
 	list_for_each_entry(temp_cb_info, &row->callback_list, list) {
 		if (temp_cb_info->callback_func == cb_func &&
 			temp_cb_info->cb_data == userdata) {
 			pr_err("duplicate registration for synx 0x%x\n",
 				synx_obj);
-			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			mutex_unlock(&synx_dev->row_locks[row->index]);
 			return -EALREADY;
 		}
 	}
 
-	synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC);
+	synx_cb = kzalloc(sizeof(*synx_cb), GFP_KERNEL);
 	if (!synx_cb) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		return -ENOMEM;
 	}
 
@@ -171,12 +171,12 @@ int synx_register_callback(s32 synx_obj,
 			synx_cb->synx_obj);
 		queue_work(synx_dev->work_queue,
 			&synx_cb->cb_dispatch_work);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		return 0;
 	}
 
 	list_add_tail(&synx_cb->list, &row->callback_list);
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	return 0;
 }
@@ -196,9 +196,9 @@ int synx_deregister_callback(s32 synx_obj,
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 
-	state = synx_status_locked(row);
+	state = synx_status(row);
 	pr_debug("de-registering callback for synx 0x%x\n",
 		synx_obj);
 	list_for_each_entry_safe(synx_cb, temp, &row->callback_list, list) {
@@ -216,7 +216,7 @@ int synx_deregister_callback(s32 synx_obj,
 		}
 	}
 
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 	return 0;
 }
 
@@ -250,17 +250,17 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 
 	if (!row->index) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		pr_err("object already cleaned up at %d\n",
 			row->index);
 		return -EINVAL;
 	}
 
-	if (synx_status_locked(row) != SYNX_STATE_ACTIVE) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	if (synx_status(row) != SYNX_STATE_ACTIVE) {
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		pr_err("object already signaled synx at %d\n",
 			row->index);
 		return -EALREADY;
@@ -270,7 +270,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 	if (status == SYNX_STATE_SIGNALED_ERROR)
 		dma_fence_set_error(row->fence, -EINVAL);
 
-	rc = dma_fence_signal_locked(row->fence);
+	rc = dma_fence_signal(row->fence);
 	if (rc < 0) {
 		pr_err("unable to signal synx at %d, err: %d\n",
 			row->index, rc);
@@ -308,7 +308,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status)
 		}
 		row->num_bound_synxs = 0;
 	}
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	for (i = 0; i < idx; i++) {
 		sync_id = bind_descs[i].external_desc.id[0];
@@ -450,11 +450,11 @@ static int synx_release_core(struct synx_table_row *row)
 	 * (definitely for merged synx on invoing deinit)
 	 * be carefull while accessing the metadata
 	 */
+	mutex_lock(&synx_dev->row_locks[row->index]);
 	fence = row->fence;
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
 	idx = row->index;
 	if (!idx) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[idx]);
+		mutex_unlock(&synx_dev->row_locks[idx]);
 		pr_err("object already cleaned up at %d\n", idx);
 		return -EINVAL;
 	}
@@ -468,7 +468,7 @@ static int synx_release_core(struct synx_table_row *row)
 
 	/* do not reference fence and row in the function after this */
 	dma_fence_put(fence);
-	spin_unlock_bh(&synx_dev->row_spinlocks[idx]);
+	mutex_unlock(&synx_dev->row_locks[idx]);
 	pr_debug("Exit %s\n", __func__);
 
 	return 0;
@@ -502,14 +502,14 @@ int synx_wait(s32 synx_obj, u64 timeout_ms)
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 	if (!row->index) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		pr_err("object already cleaned up at %d\n",
 			row->index);
 		return -EINVAL;
 	}
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	timeleft = dma_fence_wait_timeout(row->fence, (bool) 0,
 					msecs_to_jiffies(timeout_ms));
@@ -560,11 +560,11 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 	if (!data)
 		return -ENOMEM;
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
-	if (synx_status_locked(row) != SYNX_STATE_ACTIVE) {
+	mutex_lock(&synx_dev->row_locks[row->index]);
+	if (synx_status(row) != SYNX_STATE_ACTIVE) {
 		pr_err("bind to non-active synx is prohibited 0x%x\n",
 			synx_obj);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		kfree(data);
 		return -EINVAL;
 	}
@@ -572,7 +572,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 	if (row->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) {
 		pr_err("max number of bindings reached for synx_objs 0x%x\n",
 			synx_obj);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		kfree(data);
 		return -ENOMEM;
 	}
@@ -583,7 +583,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 			row->bound_synxs[i].external_desc.id[0]) {
 			pr_err("duplicate binding for external sync %d\n",
 				external_sync.id[0]);
-			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			mutex_unlock(&synx_dev->row_locks[row->index]);
 			kfree(data);
 			return -EALREADY;
 		}
@@ -598,7 +598,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 	if (rc < 0) {
 		pr_err("callback registration failed for %d\n",
 			external_sync.id[0]);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		kfree(data);
 		return rc;
 	}
@@ -607,7 +607,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
 		   &external_sync, sizeof(struct synx_external_desc));
 	row->bound_synxs[row->num_bound_synxs].external_data = data;
 	row->num_bound_synxs = row->num_bound_synxs + 1;
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	pr_debug("added external sync %d to bindings of 0x%x\n",
 		external_sync.id[0], synx_obj);
@@ -647,10 +647,10 @@ int synx_addrefcount(s32 synx_obj, s32 count)
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 	while (count--)
 		dma_fence_get(row->fence);
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	return 0;
 }
@@ -661,6 +661,7 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj)
 	struct dma_fence *fence;
 	struct synx_obj_node *obj_node;
 	struct synx_table_row *row = NULL;
+	u32 index;
 
 	pr_debug("Enter %s\n", __func__);
 
@@ -675,31 +676,35 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj)
 	if (!obj_node)
 		return -ENOMEM;
 
-	/* new global synx id */
-	id = synx_create_handle(row);
-	if (id < 0) {
-		fence = row->fence;
-		if (is_merged_synx(row)) {
-			clear_bit(row->index, synx_dev->bitmap);
-			memset(row, 0, sizeof(*row));
-		}
-		/* release the reference obtained during export */
-		dma_fence_put(fence);
-		kfree(obj_node);
-		return -EINVAL;
-	}
-
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 	if (!row->index) {
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		pr_err("object already cleaned up at %d\n",
 			row->index);
 		kfree(obj_node);
 		return -EINVAL;
 	}
+
+	/* new global synx id */
+	id = synx_create_handle(row);
+	if (id < 0) {
+		fence = row->fence;
+		index = row->index;
+		if (is_merged_synx(row)) {
+			memset(row, 0, sizeof(*row));
+			clear_bit(index, synx_dev->bitmap);
+			mutex_unlock(&synx_dev->row_locks[index]);
+		}
+		/* release the reference obtained during export */
+		dma_fence_put(fence);
+		kfree(obj_node);
+		pr_err("error creating handle for import\n");
+		return -EINVAL;
+	}
+
 	obj_node->synx_obj = id;
 	list_add(&obj_node->list, &row->synx_obj_list);
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	*new_synx_obj = id;
 	pr_debug("Exit %s\n", __func__);
@@ -722,7 +727,7 @@ int synx_export(s32 synx_obj, u32 *import_key)
 	if (rc < 0)
 		return rc;
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 	/*
 	 * to make sure the synx is not lost if the process dies or
 	 * synx is released before any other process gets a chance to
@@ -731,7 +736,7 @@ int synx_export(s32 synx_obj, u32 *import_key)
 	 * be a dangling reference and needs to be garbage collected.
 	 */
 	dma_fence_get(row->fence);
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 	pr_debug("Exit %s\n", __func__);
 
 	return 0;
@@ -960,16 +965,16 @@ static int synx_handle_register_user_payload(
 		userpayload_info.payload,
 		SYNX_PAYLOAD_WORDS * sizeof(__u64));
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 
-	state = synx_status_locked(row);
+	state = synx_status(row);
 	if (state == SYNX_STATE_SIGNALED_SUCCESS ||
 		state == SYNX_STATE_SIGNALED_ERROR) {
 		user_payload_kernel->data.status = state;
-		spin_lock_bh(&client->eventq_lock);
+		mutex_lock(&client->eventq_lock);
 		list_add_tail(&user_payload_kernel->list, &client->eventq);
-		spin_unlock_bh(&client->eventq_lock);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&client->eventq_lock);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 		wake_up_all(&client->wq);
 		return 0;
 	}
@@ -982,14 +987,14 @@ static int synx_handle_register_user_payload(
 				user_payload_kernel->data.payload_data[1]) {
 			pr_err("callback already registered on 0x%x\n",
 				synx_obj);
-			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			mutex_unlock(&synx_dev->row_locks[row->index]);
 			kfree(user_payload_kernel);
 			return -EALREADY;
 		}
 	}
 
 	list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	pr_debug("Exit %s\n", __func__);
 	return 0;
@@ -1028,7 +1033,7 @@ static int synx_handle_deregister_user_payload(
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_lock(&synx_dev->row_locks[row->index]);
 
 	state = synx_status_locked(row);
 	list_for_each_entry_safe(user_payload_kernel, temp,
@@ -1044,7 +1049,7 @@ static int synx_handle_deregister_user_payload(
 		}
 	}
 
-	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	mutex_unlock(&synx_dev->row_locks[row->index]);
 
 	if (match_found)
 		kfree(user_payload_kernel);
@@ -1066,9 +1071,9 @@ static int synx_handle_deregister_user_payload(
 		data->synx_obj = synx_obj;
 		data->status = SYNX_CALLBACK_RESULT_CANCELED;
 
-		spin_lock_bh(&client->eventq_lock);
+		mutex_lock(&client->eventq_lock);
 		list_add_tail(&user_payload_kernel->list, &client->eventq);
-		spin_unlock_bh(&client->eventq_lock);
+		mutex_unlock(&client->eventq_lock);
 		pr_debug("registered cancellation callback\n");
 		wake_up_all(&client->wq);
 	}
@@ -1236,17 +1241,17 @@ static ssize_t synx_read(struct file *filep,
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&client->eventq_lock);
+	mutex_lock(&client->eventq_lock);
 	user_payload_kernel = list_first_entry_or_null(
 							&client->eventq,
 							struct synx_cb_data,
 							list);
 	if (!user_payload_kernel) {
-		spin_unlock_bh(&client->eventq_lock);
+		mutex_unlock(&client->eventq_lock);
 		return 0;
 	}
 	list_del_init(&user_payload_kernel->list);
-	spin_unlock_bh(&client->eventq_lock);
+	mutex_unlock(&client->eventq_lock);
 
 	rc = size;
 	if (copy_to_user(buf,
@@ -1272,11 +1277,11 @@ static unsigned int synx_poll(struct file *filep,
 	client = filep->private_data;
 
 	poll_wait(filep, &client->wq, poll_table);
-	spin_lock_bh(&client->eventq_lock);
+	mutex_lock(&client->eventq_lock);
 	/* if list has pending cb events, notify */
 	if (!list_empty(&client->eventq))
 		rc = POLLPRI;
-	spin_unlock_bh(&client->eventq_lock);
+	mutex_unlock(&client->eventq_lock);
 
 	pr_debug("Exit %s\n", __func__);
 
@@ -1299,7 +1304,7 @@ static int synx_open(struct inode *inode, struct file *filep)
 	client->device = synx_dev;
 	init_waitqueue_head(&client->wq);
 	INIT_LIST_HEAD(&client->eventq);
-	spin_lock_init(&client->eventq_lock);
+	mutex_init(&client->eventq_lock);
 
 	mutex_lock(&synx_dev->table_lock);
 	list_add_tail(&client->list, &synx_dev->client_list);
@@ -1313,6 +1318,31 @@ static int synx_open(struct inode *inode, struct file *filep)
 	return 0;
 }
 
+static void synx_object_cleanup(struct synx_client *client)
+{
+	int i;
+	struct synx_cb_data *payload_info, *temp_payload_info;
+
+	for (i = 1; i < SYNX_MAX_OBJS; i++) {
+		struct synx_table_row *row =
+			synx_dev->synx_table + i;
+
+		mutex_lock(&synx_dev->row_locks[row->index]);
+		if (row->index) {
+			list_for_each_entry_safe(payload_info,
+				temp_payload_info,
+				&row->user_payload_list, list) {
+				if (payload_info->client == client) {
+					list_del_init(&payload_info->list);
+					kfree(payload_info);
+					pr_debug("cleaned up client payload\n");
+				}
+			}
+		}
+		mutex_unlock(&synx_dev->row_locks[row->index]);
+	}
+}
+
 static void synx_table_cleanup(void)
 {
 	int rc = 0;
@@ -1402,6 +1432,7 @@ static int synx_close(struct inode *inode, struct file *filep)
 	client = filep->private_data;
 
 	mutex_lock(&synx_dev->table_lock);
+	synx_object_cleanup(client);
 	synx_table_cleanup();
 	list_del_init(&client->list);
 	kfree(client);
@@ -1521,7 +1552,7 @@ static int __init synx_init(void)
 	mutex_init(&synx_dev->vtbl_lock);
 
 	for (idx = 0; idx < SYNX_MAX_OBJS; idx++)
-		spin_lock_init(&synx_dev->row_spinlocks[idx]);
+		mutex_init(&synx_dev->row_locks[idx]);
 
 	idr_init(&synx_dev->synx_ids);
 	spin_lock_init(&synx_dev->idr_lock);
diff --git a/drivers/media/platform/msm/synx/synx_debugfs.c b/drivers/media/platform/msm/synx/synx_debugfs.c
index d216e78..65a4763 100644
--- a/drivers/media/platform/msm/synx/synx_debugfs.c
+++ b/drivers/media/platform/msm/synx/synx_debugfs.c
@@ -74,7 +74,7 @@ static ssize_t synx_table_read(struct file *file,
 		if (!row->index)
 			continue;
 
-		spin_lock_bh(&dev->row_spinlocks[row->index]);
+		mutex_lock(&dev->row_locks[row->index]);
 		if (columns & NAME_COLUMN)
 			cur += scnprintf(cur, end - cur,
 				"|%10s|", row->name);
@@ -82,7 +82,7 @@ static ssize_t synx_table_read(struct file *file,
 			cur += scnprintf(cur, end - cur,
 				"|%11d|", row->num_bound_synxs);
 		if (columns & STATE_COLUMN) {
-			state = synx_status_locked(row);
+			state = synx_status(row);
 			cur += scnprintf(cur, end - cur,
 				"|%10d|", state);
 		}
@@ -101,7 +101,7 @@ static ssize_t synx_table_read(struct file *file,
 					"|0x%8x|", obj_node->synx_obj);
 				}
 		}
-		spin_unlock_bh(&dev->row_spinlocks[row->index]);
+		mutex_unlock(&dev->row_locks[row->index]);
 		cur += scnprintf(cur, end - cur, "\n");
 	}
 	if (columns & ERROR_CODES && !list_empty(
diff --git a/drivers/media/platform/msm/synx/synx_private.h b/drivers/media/platform/msm/synx/synx_private.h
index d1ece71..ac182af 100644
--- a/drivers/media/platform/msm/synx/synx_private.h
+++ b/drivers/media/platform/msm/synx/synx_private.h
@@ -129,6 +129,7 @@ struct synx_obj_node {
  *
  * @name              : Optional string representation of the synx object
  * @fence             : dma fence backing the synx object
+ * @spinlock          : Spinlock for the dma fence
  * @synx_obj_list     : List of synx integer handles mapped
  * @index             : Index of the spin lock table associated with synx obj
  * @num_bound_synxs   : Number of external bound synx objects
@@ -141,6 +142,7 @@ struct synx_obj_node {
 struct synx_table_row {
 	char name[SYNX_OBJ_NAME_LEN];
 	struct dma_fence *fence;
+	spinlock_t *spinlock;
 	struct list_head synx_obj_list;
 	s32 index;
 	u32 num_bound_synxs;
@@ -190,7 +192,7 @@ struct synx_import_data {
  * @dev           : Device type
  * @class         : Device class
  * @synx_table    : Table of all synx objects
- * @row_spinlocks : Spinlock array, one for each row in the table
+ * @row_locks     : Mutex lock array, one for each row in the table
  * @table_lock    : Mutex used to lock the table
  * @open_cnt      : Count of file open calls made on the synx driver
  * @work_queue    : Work queue used for dispatching kernel callbacks
@@ -211,7 +213,7 @@ struct synx_device {
 	dev_t dev;
 	struct class *class;
 	struct synx_table_row synx_table[SYNX_MAX_OBJS];
-	spinlock_t row_spinlocks[SYNX_MAX_OBJS];
+	struct mutex row_locks[SYNX_MAX_OBJS];
 	struct mutex table_lock;
 	int open_cnt;
 	struct workqueue_struct *work_queue;
@@ -233,14 +235,14 @@ struct synx_device {
  * specific details
  *
  * @device      : Pointer to synx device structure
- * @eventq_lock : Spinlock for the event queue
+ * @eventq_lock : Mutex for the event queue
  * @wq          : Queue for the polling process
  * @eventq      : All the user callback payloads
  * @list        : List member used to append this node to client_list
  */
 struct synx_client {
 	struct synx_device *device;
-	spinlock_t eventq_lock;
+	struct mutex eventq_lock;
 	wait_queue_head_t wq;
 	struct list_head eventq;
 	struct list_head list;
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index 22d95b4..21da434 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -30,6 +30,7 @@ int synx_init_object(struct synx_table_row *table,
 	struct dma_fence_ops *ops)
 {
 	struct dma_fence *fence = NULL;
+	spinlock_t *spinlock = NULL;
 	struct synx_table_row *row = table + idx;
 	struct synx_obj_node *obj_node;
 
@@ -40,16 +41,26 @@ int synx_init_object(struct synx_table_row *table,
 	if (!fence)
 		return -ENOMEM;
 
-	obj_node = kzalloc(sizeof(*obj_node), GFP_KERNEL);
-	if (!obj_node) {
+	spinlock = kzalloc(sizeof(*spinlock), GFP_KERNEL);
+	if (!spinlock) {
 		kfree(fence);
 		return -ENOMEM;
 	}
 
-	dma_fence_init(fence, ops, &synx_dev->row_spinlocks[idx],
-		synx_dev->dma_context, 1);
+	spin_lock_init(spinlock);
 
+	obj_node = kzalloc(sizeof(*obj_node), GFP_KERNEL);
+	if (!obj_node) {
+		kfree(spinlock);
+		kfree(fence);
+		return -ENOMEM;
+	}
+
+	dma_fence_init(fence, ops, spinlock, synx_dev->dma_context, 1);
+
+	mutex_lock(&synx_dev->row_locks[idx]);
 	row->fence = fence;
+	row->spinlock = spinlock;
 	obj_node->synx_obj = id;
 	row->index = idx;
 	INIT_LIST_HEAD(&row->synx_obj_list);
@@ -59,9 +70,10 @@ int synx_init_object(struct synx_table_row *table,
 	list_add(&obj_node->list, &row->synx_obj_list);
 	if (name)
 		strlcpy(row->name, name, sizeof(row->name));
+	mutex_unlock(&synx_dev->row_locks[idx]);
 
 	pr_debug("synx obj init: id:0x%x state:%u fence: 0x%pK\n",
-		synx_status_locked(row), fence);
+		synx_status(row), fence);
 
 	return 0;
 }
@@ -85,6 +97,7 @@ int synx_init_group_object(struct synx_table_row *table,
 	if (!obj_node)
 		return -ENOMEM;
 
+	mutex_lock(&synx_dev->row_locks[idx]);
 	row->fence = &array->base;
 	obj_node->synx_obj = id;
 	row->index = idx;
@@ -93,8 +106,10 @@ int synx_init_group_object(struct synx_table_row *table,
 	INIT_LIST_HEAD(&row->user_payload_list);
 
 	list_add(&obj_node->list, &row->synx_obj_list);
+	mutex_unlock(&synx_dev->row_locks[idx]);
+
 	pr_debug("synx group obj init: id:%d state:%u fence: 0x%pK\n",
-		id, synx_status_locked(row), row->fence);
+		id, synx_status(row), row->fence);
 
 	return 0;
 }
@@ -109,7 +124,7 @@ void synx_callback_dispatch(struct synx_table_row *row)
 	if (!row)
 		return;
 
-	state = synx_status_locked(row);
+	state = synx_status(row);
 
 	/* dispatch the kernel callbacks registered (if any) */
 	list_for_each_entry_safe(synx_cb,
@@ -130,9 +145,9 @@ void synx_callback_dispatch(struct synx_table_row *row)
 			pr_err("invalid client member in cb list\n");
 			continue;
 		}
-		spin_lock_bh(&client->eventq_lock);
+		mutex_lock(&client->eventq_lock);
 		list_move_tail(&payload_info->list, &client->eventq);
-		spin_unlock_bh(&client->eventq_lock);
+		mutex_unlock(&client->eventq_lock);
 		/*
 		 * since cb can be registered by multiple clients,
 		 * wake the process right away
@@ -160,19 +175,20 @@ int synx_deinit_object(struct synx_table_row *row)
 	struct synx_callback_info *synx_cb, *temp_cb;
 	struct synx_cb_data  *upayload_info, *temp_upayload;
 	struct synx_obj_node *obj_node, *temp_obj_node;
+	unsigned long flags;
 
 	if (!row || !synx_dev)
 		return -EINVAL;
 
 	index = row->index;
-	spin_lock_bh(&synx_dev->idr_lock);
+	spin_lock_irqsave(&synx_dev->idr_lock, flags);
 	list_for_each_entry_safe(obj_node,
 		temp_obj_node, &row->synx_obj_list, list) {
 		if ((struct synx_table_row *)idr_remove(&synx_dev->synx_ids,
 				obj_node->synx_obj) != row) {
 			pr_err("removing data in idr table failed 0x%x\n",
 				obj_node->synx_obj);
-			spin_unlock_bh(&synx_dev->idr_lock);
+			spin_unlock_irqrestore(&synx_dev->idr_lock, flags);
 			return -EINVAL;
 		}
 		pr_debug("removed synx obj at 0x%x successful\n",
@@ -180,7 +196,7 @@ int synx_deinit_object(struct synx_table_row *row)
 		list_del_init(&obj_node->list);
 		kfree(obj_node);
 	}
-	spin_unlock_bh(&synx_dev->idr_lock);
+	spin_unlock_irqrestore(&synx_dev->idr_lock, flags);
 
 	/*
 	 * release the fence memory only for individual obj.
@@ -188,6 +204,7 @@ int synx_deinit_object(struct synx_table_row *row)
 	 * in its registered release function.
 	 */
 	if (!is_merged_synx(row)) {
+		kfree(row->spinlock);
 		kfree(row->fence);
 
 		/*
@@ -207,9 +224,9 @@ int synx_deinit_object(struct synx_table_row *row)
 				pr_err("invalid client member in cb list\n");
 				continue;
 			}
-			spin_lock_bh(&client->eventq_lock);
+			mutex_lock(&client->eventq_lock);
 			list_move_tail(&upayload_info->list, &client->eventq);
-			spin_unlock_bh(&client->eventq_lock);
+			mutex_unlock(&client->eventq_lock);
 			/*
 			 * since cb can be registered by multiple clients,
 			 * wake the process right away
@@ -228,8 +245,8 @@ int synx_deinit_object(struct synx_table_row *row)
 		}
 	}
 
-	clear_bit(row->index, synx_dev->bitmap);
 	memset(row, 0, sizeof(*row));
+	clear_bit(index, synx_dev->bitmap);
 
 	pr_debug("destroying synx obj at %d successful\n", index);
 	return 0;
@@ -338,9 +355,9 @@ s32 synx_merge_error(s32 *synx_objs, u32 num_objs)
 			return -EINVAL;
 		}
 
-		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_lock(&synx_dev->row_locks[row->index]);
 		synx_release_reference(row->fence);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 	}
 
 	return 0;
@@ -369,9 +386,9 @@ int synx_util_validate_merge(s32 *synx_objs,
 			return -EINVAL;
 		}
 
-		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_lock(&synx_dev->row_locks[row->index]);
 		count += synx_add_reference(row->fence);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 	}
 
 	fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
@@ -390,9 +407,9 @@ int synx_util_validate_merge(s32 *synx_objs,
 			return -EINVAL;
 		}
 
-		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_lock(&synx_dev->row_locks[row->index]);
 		count += synx_fence_add(row->fence, fences, count);
-		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		mutex_unlock(&synx_dev->row_locks[row->index]);
 	}
 
 	/* eliminate duplicates */
@@ -540,14 +557,15 @@ void *synx_from_handle(s32 synx_obj)
 {
 	s32 base;
 	struct synx_table_row *row;
+	unsigned long flags;
 
 	if (!synx_dev)
 		return NULL;
 
-	spin_lock_bh(&synx_dev->idr_lock);
+	spin_lock_irqsave(&synx_dev->idr_lock, flags);
 	row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids,
 		synx_obj);
-	spin_unlock_bh(&synx_dev->idr_lock);
+	spin_unlock_irqrestore(&synx_dev->idr_lock, flags);
 
 	if (!row) {
 		pr_err(
@@ -570,14 +588,15 @@ s32 synx_create_handle(void *pObj)
 {
 	s32 base = current->tgid << 16;
 	s32 id;
+	unsigned long flags;
 
 	if (!synx_dev)
 		return -EINVAL;
 
-	spin_lock_bh(&synx_dev->idr_lock);
+	spin_lock_irqsave(&synx_dev->idr_lock, flags);
 	id = idr_alloc(&synx_dev->synx_ids, pObj,
 			base, base + 0x10000, GFP_ATOMIC);
-	spin_unlock_bh(&synx_dev->idr_lock);
+	spin_unlock_irqrestore(&synx_dev->idr_lock, flags);
 
 	pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n",
 		id, base, current->tgid);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7c623c1..8fa9fa0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -460,6 +460,9 @@ static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
 	int ret = 0;
 	int retry_count = 0;
 
+	if (qseecom.support_bus_scaling)
+		return scm_call2(smc_id, desc);
+
 	do {
 		ret = scm_call2_noretry(smc_id, desc);
 		if (ret == -EBUSY) {
@@ -1804,17 +1807,20 @@ static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
 	else
 		qclk = &qseecom.ce_drv;
 
-	if (qclk->clk_access_cnt > 0) {
-		qclk->clk_access_cnt--;
-	} else {
+	if (qclk->clk_access_cnt > 2) {
 		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
 		ret = -EINVAL;
+		goto err_dec_ref_cnt;
 	}
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
 
+err_dec_ref_cnt:
 	mutex_unlock(&clk_access_lock);
 	return ret;
 }
 
+
 static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
 {
 	int32_t ret = 0;
@@ -2086,6 +2092,14 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
 			if (ptr_svc->svc.listener_id == lstnr) {
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
+				ret = qseecom_dmabuf_cache_operations(
+					ptr_svc->dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+				if (ret) {
+					rc = -EINVAL;
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
 				break;
 			}
@@ -2421,6 +2435,14 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
 			if (ptr_svc->svc.listener_id == lstnr) {
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
+				ret = qseecom_dmabuf_cache_operations(
+					ptr_svc->dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+				if (ret) {
+					rc = -EINVAL;
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
 				break;
 			}
@@ -3077,6 +3099,16 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 
 	if (!ptr_app->ref_cnt) {
 		ret = __qseecom_unload_app(data, data->client.app_id);
+		if (ret == -EBUSY) {
+			/*
+			 * If unload failed due to EBUSY, don't free mem
+			 * just restore app ref_cnt and return -EBUSY
+			 */
+			pr_warn("unload ta %d(%s) EBUSY\n",
+				data->client.app_id, data->client.app_name);
+			ptr_app->ref_cnt++;
+			return ret;
+		}
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
 		list_del(&ptr_app->list);
 		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
@@ -3092,7 +3124,6 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
 	return ret;
 }
 
-
 static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
 {
 	struct qseecom_unload_app_pending_list *entry = NULL;
diff --git a/drivers/misc/wigig_sensing.c b/drivers/misc/wigig_sensing.c
index bf8323e..61372a1 100644
--- a/drivers/misc/wigig_sensing.c
+++ b/drivers/misc/wigig_sensing.c
@@ -433,7 +433,7 @@ static int wigig_sensing_change_state(struct wigig_sensing_ctx *ctx,
 				      enum wigig_sensing_stm_e new_state)
 {
 	enum wigig_sensing_stm_e curr_state;
-	bool transition_allowed = false;
+	bool transition_allowed = true;
 
 	if (!state) {
 		pr_err("state is NULL\n");
@@ -441,75 +441,39 @@ static int wigig_sensing_change_state(struct wigig_sensing_ctx *ctx,
 	}
 	if (new_state <= WIGIG_SENSING_STATE_MIN ||
 	    new_state >= WIGIG_SENSING_STATE_MAX) {
-		pr_err("new_state is invalid\n");
+		pr_err("new_state (%d) is invalid\n", new_state);
 		return -EINVAL;
 	}
 
 	curr_state = state->state;
-	if (new_state == curr_state) {
-		pr_debug("Already in the requested state, bailing out\n");
-		return 0;
-	}
 
-	if ((new_state == WIGIG_SENSING_STATE_SYS_ASSERT &&
-	     !state->fw_is_ready) ||
-	    (new_state == WIGIG_SENSING_STATE_SPI_READY)) {
+	/* Moving to SYS_ASSEERT state is always allowed */
+	if (new_state == WIGIG_SENSING_STATE_SYS_ASSERT)
 		transition_allowed = true;
-	} else {
-		switch (curr_state) {
-		case WIGIG_SENSING_STATE_INITIALIZED:
-			if (new_state == WIGIG_SENSING_STATE_SPI_READY &&
-			    state->fw_is_ready)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_SPI_READY:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED &&
-			    state->enabled)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_READY_STOPPED:
-			if (new_state == WIGIG_SENSING_STATE_SEARCH        ||
-			    new_state == WIGIG_SENSING_STATE_FACIAL        ||
-			    new_state == WIGIG_SENSING_STATE_GESTURE       ||
-			    new_state == WIGIG_SENSING_STATE_CUSTOM        ||
-			    new_state == WIGIG_SENSING_STATE_GET_PARAMS)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_SEARCH:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
-			    new_state == WIGIG_SENSING_STATE_SEARCH ||
-			    new_state == WIGIG_SENSING_STATE_FACIAL ||
-			    new_state == WIGIG_SENSING_STATE_GESTURE)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_FACIAL:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
-			    new_state == WIGIG_SENSING_STATE_SEARCH)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_GESTURE:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED ||
-			    new_state == WIGIG_SENSING_STATE_SEARCH)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_CUSTOM:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_GET_PARAMS:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED)
-				transition_allowed = true;
-			break;
-		case WIGIG_SENSING_STATE_SYS_ASSERT:
-			if (new_state == WIGIG_SENSING_STATE_READY_STOPPED &&
-			    state->fw_is_ready)
-				transition_allowed = true;
-			break;
-		default:
-			pr_err("new_state is invalid\n");
-			return -EINVAL;
-		}
-	}
+	/*
+	 * Moving from INITIALIZED state is allowed only to READY_STOPPED state
+	 */
+	else if (curr_state == WIGIG_SENSING_STATE_INITIALIZED &&
+	    new_state != WIGIG_SENSING_STATE_READY_STOPPED)
+		transition_allowed = false;
+	/*
+	 * Moving to GET_PARAMS state is allowed only from READY_STOPPED state
+	 */
+	else if (curr_state != WIGIG_SENSING_STATE_READY_STOPPED &&
+		 new_state == WIGIG_SENSING_STATE_GET_PARAMS)
+		transition_allowed = false;
+	/*
+	 * Moving from GET_PARAMS state is allowed only to READY_STOPPED state
+	 */
+	else if (curr_state == WIGIG_SENSING_STATE_GET_PARAMS &&
+		 new_state != WIGIG_SENSING_STATE_READY_STOPPED)
+		transition_allowed = false;
+	/*
+	 * Moving from SYS_ASSERT state is allowed only to READY_STOPPED state
+	 */
+	else if (curr_state == WIGIG_SENSING_STATE_SYS_ASSERT &&
+		 new_state != WIGIG_SENSING_STATE_READY_STOPPED)
+		transition_allowed = false;
 
 	if (transition_allowed) {
 		pr_info("state transition (%d) --> (%d)\n", curr_state,
@@ -542,19 +506,23 @@ static int wigig_sensing_ioc_change_mode(struct wigig_sensing_ctx *ctx,
 					 struct wigig_sensing_change_mode req)
 {
 	struct wigig_sensing_stm sim_state;
-	enum wigig_sensing_stm_e new_state;
 	int rc;
 	u32 ch;
 
-	pr_info("mode = %d, channel = %d\n", req.mode, req.channel);
+	pr_info("mode = %d, channel = %d, has_channel = %d\n",
+		req.mode, req.channel, req.has_channel);
 	if (!ctx)
 		return -EINVAL;
 
+	/* Save the request for later use */
+	ctx->stm.mode_request = req.mode;
+
 	/* Simulate a state change */
-	new_state = convert_mode_to_state(req.mode);
+	ctx->stm.state_request = convert_mode_to_state(req.mode);
 	sim_state = ctx->stm;
-	rc = wigig_sensing_change_state(ctx, &sim_state, new_state);
-	if (rc || sim_state.state != new_state) {
+	rc = wigig_sensing_change_state(ctx, &sim_state,
+					ctx->stm.state_request);
+	if (rc || sim_state.state != ctx->stm.state_request) {
 		pr_err("State change not allowed\n");
 		rc = -EFAULT;
 		goto End;
@@ -563,6 +531,7 @@ static int wigig_sensing_ioc_change_mode(struct wigig_sensing_ctx *ctx,
 	/* Send command to FW */
 	ctx->stm.change_mode_in_progress = true;
 	ch = req.has_channel ? req.channel : 0;
+	ctx->stm.channel_request = ch;
 	ctx->stm.burst_size_ready = false;
 	/* Change mode command must not be called during DRI processing */
 	mutex_lock(&ctx->dri_lock);
@@ -583,29 +552,25 @@ static int wigig_sensing_ioc_change_mode(struct wigig_sensing_ctx *ctx,
 		/* Interrupted by a signal */
 		pr_err("wait_event_interruptible_timeout() interrupted by a signal (%d)\n",
 		       rc);
-		return rc;
+		goto End;
 	}
 	if (rc == 0) {
 		/* Timeout, FW did not respond in time */
 		pr_err("wait_event_interruptible_timeout() timed out\n");
-		return -ETIME;
-	}
-
-	/* Change internal state */
-	rc = wigig_sensing_change_state(ctx, &ctx->stm, new_state);
-	if (rc || ctx->stm.state != new_state) {
-		pr_err("wigig_sensing_change_state() failed\n");
-		rc = -EFAULT;
+		rc = -ETIME;
 		goto End;
 	}
 
-	ctx->dropped_bursts = 0;
-	ctx->stm.channel_request = ch;
-	ctx->stm.mode = req.mode;
-	ctx->stm.change_mode_in_progress = false;
+	if (ctx->stm.state != ctx->stm.state_request) {
+		pr_err("wigig_sensing_change_state() failed\n");
+		rc = -EFAULT;
+	}
 
 End:
-	return ctx->stm.burst_size;
+	ctx->stm.state_request = WIGIG_SENSING_STATE_MIN;
+	ctx->stm.channel_request = 0;
+	ctx->stm.mode_request = WIGIG_SENSING_MODE_STOP;
+	return (rc == 0) ? ctx->stm.burst_size : rc;
 }
 
 static int wigig_sensing_ioc_clear_data(struct wigig_sensing_ctx *ctx)
@@ -682,7 +647,8 @@ static unsigned int wigig_sensing_poll(struct file *filp, poll_table *wait)
 
 	poll_wait(filp, &ctx->data_wait_q, wait);
 
-	if (circ_cnt(&ctx->cir_data.b, ctx->cir_data.size_bytes))
+	if (!ctx->stm.change_mode_in_progress &&
+	    circ_cnt(&ctx->cir_data.b, ctx->cir_data.size_bytes))
 		mask |= (POLLIN | POLLRDNORM);
 
 	if (ctx->event_pending)
@@ -706,6 +672,15 @@ static ssize_t wigig_sensing_read(struct file *filp, char __user *buf,
 	    (!d->b.buf))
 		return -ENODEV;
 
+	if (ctx->stm.change_mode_in_progress)
+		return -EINVAL;
+
+	/* Read buffer too small */
+	if (count < ctx->stm.burst_size) {
+		pr_err("Read buffer must be larger than burst size\n");
+		return -EINVAL;
+	}
+
 	/* No data in the buffer */
 	while (circ_cnt(&d->b, d->size_bytes) == 0) {
 		if (filp->f_flags & O_NONBLOCK)
@@ -715,11 +690,11 @@ static ssize_t wigig_sensing_read(struct file *filp, char __user *buf,
 			circ_cnt(&d->b, d->size_bytes) != 0))
 			return -ERESTARTSYS;
 	}
-
 	if (mutex_lock_interruptible(&d->lock))
 		return -ERESTARTSYS;
 
 	copy_size = min_t(u32, circ_cnt(&d->b, d->size_bytes), count);
+	copy_size -= copy_size % ctx->stm.burst_size;
 	size_to_end = circ_cnt_to_end(&d->b, d->size_bytes);
 	tail = d->b.tail;
 	pr_debug("copy_size=%u, size_to_end=%u, head=%u, tail=%u\n",
@@ -933,17 +908,11 @@ static int wigig_sensing_handle_fifo_ready_dri(struct wigig_sensing_ctx *ctx)
 		goto End;
 	}
 
-	if (!ctx->stm.enabled && burst_size != 0) {
-		pr_info("Invalid burst size while disabled %d\n", burst_size);
-		rc = -EFAULT;
-		goto End;
-	}
-
 	ctx->stm.burst_size = burst_size;
-	if (!ctx->stm.enabled ||
-	    ctx->stm.state >= WIGIG_SENSING_STATE_SYS_ASSERT ||
-	    ctx->stm.state < WIGIG_SENSING_STATE_SPI_READY) {
-		pr_err("Received burst_size in an unexpected state\n");
+	if (ctx->stm.state >= WIGIG_SENSING_STATE_SYS_ASSERT ||
+	    ctx->stm.state < WIGIG_SENSING_STATE_READY_STOPPED) {
+		pr_err("Received burst_size in an unexpected state (%d)\n",
+		       ctx->stm.state);
 		rc = -EFAULT;
 		goto End;
 	}
@@ -976,9 +945,25 @@ static int wigig_sensing_handle_fifo_ready_dri(struct wigig_sensing_ctx *ctx)
 		ctx->temp_buffer = 0;
 	}
 
-	wake_up_interruptible(&ctx->cmd_wait_q);
+	/* Change internal state */
+	rc = wigig_sensing_change_state(ctx, &ctx->stm, ctx->stm.state_request);
+	if (rc || ctx->stm.state != ctx->stm.state_request) {
+		pr_err("wigig_sensing_change_state() failed\n");
+		rc = -EFAULT;
+		goto End;
+	}
+
+	/* Initialize head and tail pointers to 0 */
+	wigig_sensing_ioc_clear_data(ctx);
+
+	ctx->dropped_bursts = 0;
+	ctx->stm.channel = ctx->stm.channel_request;
+	ctx->stm.mode = ctx->stm.mode_request;
+
 End:
+	ctx->stm.change_mode_in_progress = false;
 	mutex_unlock(&ctx->spi_lock);
+	wake_up_interruptible(&ctx->cmd_wait_q);
 	return rc;
 }
 
@@ -994,9 +979,9 @@ static int wigig_sensing_chip_data_ready(struct wigig_sensing_ctx *ctx,
 	u32 idx = 0;
 	u32 spi_transaction_size;
 	u32 available_space_to_end;
+	u32 orig_head;
 
 	if (stm_state == WIGIG_SENSING_STATE_INITIALIZED ||
-	    stm_state == WIGIG_SENSING_STATE_SPI_READY ||
 	    stm_state == WIGIG_SENSING_STATE_READY_STOPPED ||
 	    stm_state == WIGIG_SENSING_STATE_SYS_ASSERT) {
 		pr_err("Received data ready interrupt in an unexpected stm_state, disregarding\n");
@@ -1042,8 +1027,14 @@ static int wigig_sensing_chip_data_ready(struct wigig_sensing_ctx *ctx,
 	spi_transaction_size =
 		calc_spi_transaction_size(fill_level, SPI_MAX_TRANSACTION_SIZE);
 	local = d->b;
+	orig_head = local.head;
 	mutex_lock(&ctx->spi_lock);
 	while (fill_level > 0) {
+		if (ctx->stm.change_mode_in_progress) {
+			local.head = orig_head;
+			break;
+		}
+
 		bytes_to_read = (fill_level < spi_transaction_size) ?
 			fill_level : spi_transaction_size;
 		available_space_to_end =
@@ -1204,7 +1195,7 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 			ctx->stm.spi_malfunction = false;
 			if (ctx->stm.state == WIGIG_SENSING_STATE_INITIALIZED)
 				wigig_sensing_change_state(ctx, &ctx->stm,
-					WIGIG_SENSING_STATE_SPI_READY);
+					WIGIG_SENSING_STATE_READY_STOPPED);
 		}
 
 		pr_debug("Reading SANITY register\n");
@@ -1241,14 +1232,30 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 		goto bail_out;
 	}
 
+	if (spi_status.b.int_sysassert) {
+		pr_info_ratelimited("SYSASSERT INTERRUPT\n");
+		ctx->stm.fw_is_ready = false;
+
+		rc = wigig_sensing_change_state(ctx, &ctx->stm,
+				WIGIG_SENSING_STATE_SYS_ASSERT);
+		if (rc != 0 ||
+		    ctx->stm.state != WIGIG_SENSING_STATE_SYS_ASSERT)
+			pr_err("State change to WIGIG_SENSING_SYS_ASSERT failed\n");
+
+		/* Send asynchronous RESET event to application */
+		wigig_sensing_send_event(ctx, WIGIG_SENSING_EVENT_RESET);
+
+		ctx->stm.spi_malfunction = true;
+		memset(&ctx->inb_cmd, 0, sizeof(ctx->inb_cmd));
+		spi_status.v &= ~INT_SYSASSERT;
+		goto deassert_and_bail_out;
+	}
 	if (spi_status.b.int_fw_ready) {
-		pr_debug("FW READY INTERRUPT\n");
+		pr_info_ratelimited("FW READY INTERRUPT\n");
 		ctx->stm.fw_is_ready = true;
 		ctx->stm.channel_request = 0;
 		ctx->stm.burst_size = 0;
 		ctx->stm.mode = WIGIG_SENSING_MODE_STOP;
-		ctx->stm.enabled = true;
-
 		wigig_sensing_change_state(ctx, &ctx->stm,
 					   WIGIG_SENSING_STATE_READY_STOPPED);
 
@@ -1267,27 +1274,11 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 			pr_debug("Change mode in progress, aborting data processing\n");
 		spi_status.v &= ~INT_DATA_READY;
 	}
-	if (spi_status.b.int_sysassert) {
-		pr_debug("SYSASSERT INTERRUPT\n");
-		ctx->stm.fw_is_ready = false;
-
-		rc = wigig_sensing_change_state(ctx, &ctx->stm,
-				WIGIG_SENSING_STATE_SYS_ASSERT);
-		if (rc != 0 ||
-		    ctx->stm.state != WIGIG_SENSING_STATE_SYS_ASSERT)
-			pr_err("State change to WIGIG_SENSING_SYS_ASSERT failed\n");
-
-		/* Send asynchronous RESET event to application */
-		wigig_sensing_send_event(ctx, WIGIG_SENSING_EVENT_RESET);
-
-		ctx->stm.spi_malfunction = true;
-		spi_status.v &= ~INT_SYSASSERT;
-	}
 	if (spi_status.b.int_deep_sleep_exit ||
 	    (ctx->stm.waiting_for_deep_sleep_exit &&
 	     ctx->stm.waiting_for_deep_sleep_exit_first_pass)) {
 		if (spi_status.b.int_deep_sleep_exit)
-			pr_debug("DEEP SLEEP EXIT INTERRUPT\n");
+			pr_info_ratelimited("DEEP SLEEP EXIT INTERRUPT\n");
 
 		if (ctx->stm.waiting_for_deep_sleep_exit) {
 			additional_inb_command = ctx->inb_cmd;
@@ -1300,7 +1291,7 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 		spi_status.v &= ~INT_DEEP_SLEEP_EXIT;
 	}
 	if (spi_status.b.int_fifo_ready) {
-		pr_debug("FIFO READY INTERRUPT\n");
+		pr_info_ratelimited("FIFO READY INTERRUPT\n");
 		wigig_sensing_handle_fifo_ready_dri(ctx);
 
 		spi_status.v &= ~INT_FIFO_READY;
@@ -1314,6 +1305,7 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 		pr_err("Unexpected interrupt received, spi_status=0x%X\n",
 		       spi_status.v & CLEAR_LOW_23_BITS);
 
+deassert_and_bail_out:
 	/* Notify FW we are done with interrupt handling */
 	rc = wigig_sensing_deassert_dri(ctx, additional_inb_command);
 	if (rc)
diff --git a/drivers/misc/wigig_sensing.h b/drivers/misc/wigig_sensing.h
index eaf2023..1389c3f 100644
--- a/drivers/misc/wigig_sensing.h
+++ b/drivers/misc/wigig_sensing.h
@@ -131,7 +131,6 @@ struct spi_fifo {
 enum wigig_sensing_stm_e {
 	WIGIG_SENSING_STATE_MIN = 0,
 	WIGIG_SENSING_STATE_INITIALIZED,
-	WIGIG_SENSING_STATE_SPI_READY,
 	WIGIG_SENSING_STATE_READY_STOPPED,
 	WIGIG_SENSING_STATE_SEARCH,
 	WIGIG_SENSING_STATE_FACIAL,
@@ -144,10 +143,8 @@ enum wigig_sensing_stm_e {
 
 struct wigig_sensing_stm {
 	bool auto_recovery;
-	bool enabled;
 	bool fw_is_ready;
 	bool spi_malfunction;
-	bool sys_assert;
 	bool waiting_for_deep_sleep_exit;
 	bool waiting_for_deep_sleep_exit_first_pass;
 	bool burst_size_ready;
@@ -155,7 +152,10 @@ struct wigig_sensing_stm {
 	enum wigig_sensing_stm_e state;
 	enum wigig_sensing_mode mode;
 	u32 burst_size;
+	u32 channel;
 	u32 channel_request;
+	enum wigig_sensing_stm_e state_request;
+	enum wigig_sensing_mode mode_request;
 };
 
 struct wigig_sensing_ctx {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 904386c..f1953c9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1889,6 +1889,8 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
 	    err && mmc_blk_reset(md, card->host, type)) {
 		pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
 		mqrq->retries = MMC_NO_RETRIES;
+		if (mmc_card_sd(card))
+			mmc_card_set_removed(card);
 		return;
 	}
 
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index b449908..72d286c 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -172,6 +172,10 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
 
 	if (irq < 0)
 		host->caps |= MMC_CAP_NEEDS_POLL;
+	ret = mmc_gpio_set_cd_wake(host, true);
+	if (ret)
+		dev_err(mmc_dev(host), "%s: enabling cd irq wake failed ret=%d\n",
+				      __func__, ret);
 }
 EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
 
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 28ed3a9..d458ad2 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -865,19 +865,23 @@ static int msm_init_cm_dll(struct sdhci_host *host,
 			| CORE_CK_OUT_EN), host->ioaddr +
 			msm_host_offset->CORE_DLL_CONFIG);
 
-	wait_cnt = 50;
-	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
-	while (!(readl_relaxed(host->ioaddr +
-		msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
-		/* max. wait for 50us sec for LOCK bit to be set */
-		if (--wait_cnt == 0) {
-			pr_err("%s: %s: DLL failed to LOCK\n",
-				mmc_hostname(mmc), __func__);
-			rc = -ETIMEDOUT;
-			goto out;
+	/* For hs400es mode, no need to wait for core dll lock */
+	if (!(msm_host->enhanced_strobe &&
+				mmc_card_strobe(msm_host->mmc->card))) {
+		wait_cnt = 50;
+		/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
+		while (!(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
+			/* max. wait for 50us sec for LOCK bit to be set */
+			if (--wait_cnt == 0) {
+				pr_err("%s: %s: DLL failed to LOCK\n",
+					mmc_hostname(mmc), __func__);
+				rc = -ETIMEDOUT;
+				goto out;
+			}
+			/* wait for 1us before polling again */
+			udelay(1);
 		}
-		/* wait for 1us before polling again */
-		udelay(1);
 	}
 
 out:
@@ -2225,33 +2229,6 @@ static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
 	return rc;
 }
 
-/*
- * Internal work. Work to set 0 bandwidth for msm bus.
- */
-static void sdhci_msm_bus_work(struct work_struct *work)
-{
-	struct sdhci_msm_host *msm_host;
-	struct sdhci_host *host;
-	unsigned long flags;
-
-	msm_host = container_of(work, struct sdhci_msm_host,
-				msm_bus_vote.vote_work.work);
-	host =  platform_get_drvdata(msm_host->pdev);
-
-	if (!msm_host->msm_bus_vote.client_handle)
-		return;
-
-	spin_lock_irqsave(&host->lock, flags);
-	/* don't vote for 0 bandwidth if any request is in progress */
-	if (!host->mrq) {
-		sdhci_msm_bus_set_vote(msm_host,
-			msm_host->msm_bus_vote.min_bw_vote, &flags);
-	} else
-		pr_warn("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
-			   mmc_hostname(host->mmc), __func__);
-	spin_unlock_irqrestore(&host->lock, flags);
-}
-
 /*****************************************************************************\
  *                                                                           *
  * MSM Command Queue Engine (CQE)                                            *
@@ -2437,7 +2414,7 @@ static void sdhci_msm_cqe_add_host(struct sdhci_host *host,
  * This function cancels any scheduled delayed work and sets the bus
  * vote based on bw (bandwidth) argument.
  */
-static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+static void sdhci_msm_bus_get_and_set_vote(struct sdhci_host *host,
 						unsigned int bw)
 {
 	int vote;
@@ -2445,31 +2422,12 @@ static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 
-	cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
 	spin_lock_irqsave(&host->lock, flags);
 	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
 	sdhci_msm_bus_set_vote(msm_host, vote, &flags);
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-#define MSM_MMC_BUS_VOTING_DELAY	200 /* msecs */
-
-/* This function queues a work which will set the bandwidth requiement to 0 */
-static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
-{
-	unsigned long flags;
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = pltfm_host->priv;
-
-	spin_lock_irqsave(&host->lock, flags);
-	if (msm_host->msm_bus_vote.min_bw_vote !=
-		msm_host->msm_bus_vote.curr_vote)
-		queue_delayed_work(system_wq,
-				   &msm_host->msm_bus_vote.vote_work,
-				   msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
-	spin_unlock_irqrestore(&host->lock, flags);
-}
-
 static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
 				struct platform_device *pdev)
 {
@@ -2540,22 +2498,11 @@ static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
 	if (!msm_host->msm_bus_vote.client_handle)
 		return;
 
-	bw = sdhci_get_bw_required(host, ios);
 	if (enable) {
-		sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+		bw = sdhci_get_bw_required(host, ios);
+		sdhci_msm_bus_get_and_set_vote(host, bw);
 	} else {
-		/*
-		 * If clock gating is enabled, then remove the vote
-		 * immediately because clocks will be disabled only
-		 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
-		 * additional delay is required to remove the bus vote.
-		 */
-#ifdef CONFIG_MMC_CLKGATE
-		if (host->mmc->clkgate_delay)
-			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
-		else
-#endif
-			sdhci_msm_bus_queue_work(host);
+		sdhci_msm_bus_get_and_set_vote(host, 0);
 	}
 }
 
@@ -3475,18 +3422,17 @@ static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
 	sdhci_msm_registers_restore(host);
 	goto out;
 
-disable_bus_aggr_clk:
-	if (!IS_ERR(msm_host->bus_aggr_clk))
-		clk_disable_unprepare(msm_host->bus_aggr_clk);
 disable_host_clk:
 	if (!IS_ERR(msm_host->clk))
 		clk_disable_unprepare(msm_host->clk);
+disable_bus_aggr_clk:
+	if (!IS_ERR(msm_host->bus_aggr_clk))
+		clk_disable_unprepare(msm_host->bus_aggr_clk);
 disable_pclk:
 	if (!IS_ERR(msm_host->pclk))
 		clk_disable_unprepare(msm_host->pclk);
 remove_vote:
-	if (msm_host->msm_bus_vote.client_handle)
-		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_voting(host, 0);
 out:
 	return rc;
 }
@@ -3504,6 +3450,8 @@ static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
 			clk_disable_unprepare(msm_host->bus_aggr_clk);
 		if (!IS_ERR(msm_host->pclk))
 			clk_disable_unprepare(msm_host->pclk);
+		if (!IS_ERR(msm_host->ice_clk))
+			clk_disable_unprepare(msm_host->ice_clk);
 		sdhci_msm_bus_voting(host, 0);
 		atomic_set(&msm_host->controller_clock, 0);
 		pr_debug("%s: %s: disabled controller clock\n",
@@ -3590,8 +3538,6 @@ static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
 			clk_disable_unprepare(msm_host->sleep_clk);
 		if (!IS_ERR_OR_NULL(msm_host->ff_clk))
 			clk_disable_unprepare(msm_host->ff_clk);
-		if (!IS_ERR(msm_host->ice_clk))
-			clk_disable_unprepare(msm_host->ice_clk);
 		if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 			clk_disable_unprepare(msm_host->bus_clk);
 		sdhci_msm_disable_controller_clock(host);
@@ -3615,12 +3561,48 @@ static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
 		clk_disable_unprepare(msm_host->pclk);
 	atomic_set(&msm_host->controller_clock, 0);
 remove_vote:
-	if (msm_host->msm_bus_vote.client_handle)
-		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		sdhci_msm_bus_voting(host, 0);
 out:
 	return rc;
 }
 
+/*
+ * After MCLK ugating, toggle the FIFO write clock to get
+ * the FIFO pointers and flags to valid state.
+ */
+static void sdhci_msm_toggle_fifo_write_clk(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct mmc_card *card = host->mmc->card;
+
+	if (msm_host->tuning_done ||
+			(card && mmc_card_strobe(card) &&
+			msm_host->enhanced_strobe)) {
+		/*
+		 * set HC_REG_DLL_CONFIG_3[1] to select MCLK as
+		 * DLL input clock
+		 */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3))
+			| RCLK_TOGGLE), host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3);
+		/* ensure above write as toggling same bit quickly */
+		wmb();
+		udelay(2);
+		/*
+		 * clear HC_REG_DLL_CONFIG_3[1] to select RCLK as
+		 * DLL input clock
+		 */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3))
+			& ~RCLK_TOGGLE), host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3);
+	}
+}
+
 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	int rc;
@@ -3727,43 +3709,28 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
 					| CORE_HC_SELECT_IN_EN), host->ioaddr +
 					msm_host_offset->CORE_VENDOR_SPEC);
 		}
-		/*
-		 * After MCLK ugating, toggle the FIFO write clock to get
-		 * the FIFO pointers and flags to valid state.
-		 */
-		if (msm_host->tuning_done ||
-				(card && mmc_card_strobe(card) &&
-				msm_host->enhanced_strobe)) {
-			/*
-			 * set HC_REG_DLL_CONFIG_3[1] to select MCLK as
-			 * DLL input clock
-			 */
-			writel_relaxed(((readl_relaxed(host->ioaddr +
-				msm_host_offset->CORE_DLL_CONFIG_3))
-				| RCLK_TOGGLE), host->ioaddr +
-				msm_host_offset->CORE_DLL_CONFIG_3);
-			/* ensure above write as toggling same bit quickly */
-			wmb();
-			udelay(2);
-			/*
-			 * clear HC_REG_DLL_CONFIG_3[1] to select RCLK as
-			 * DLL input clock
-			 */
-			writel_relaxed(((readl_relaxed(host->ioaddr +
-				msm_host_offset->CORE_DLL_CONFIG_3))
-				& ~RCLK_TOGGLE), host->ioaddr +
-				msm_host_offset->CORE_DLL_CONFIG_3);
-		}
+
+		sdhci_msm_toggle_fifo_write_clk(host);
+
 		if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
 			/*
 			 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
 			 * CORE_DLL_STATUS to be set.  This should get set
 			 * with in 15 us at 200 MHz.
+			 * No need to check for DLL lock for HS400es mode
 			 */
-			rc = readl_poll_timeout(host->ioaddr +
+			if (card && mmc_card_strobe(card) &&
+						msm_host->enhanced_strobe) {
+				rc = readl_poll_timeout(host->ioaddr +
+					msm_host_offset->CORE_DLL_STATUS,
+					dll_lock, (dll_lock &
+					CORE_DDR_DLL_LOCK), 10, 1000);
+			} else {
+				rc = readl_poll_timeout(host->ioaddr +
 					msm_host_offset->CORE_DLL_STATUS,
 					dll_lock, (dll_lock & (CORE_DLL_LOCK |
 					CORE_DDR_DLL_LOCK)), 10, 1000);
+			}
 			if (rc == -ETIMEDOUT)
 				pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
 						mmc_hostname(host->mmc),
@@ -5157,9 +5124,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 	if (ret)
 		goto sleep_clk_disable;
 
-	if (msm_host->msm_bus_vote.client_handle)
-		INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
-				  sdhci_msm_bus_work);
 	sdhci_msm_bus_voting(host, 1);
 
 	/* Setup regulators */
@@ -5428,6 +5392,13 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 		goto vreg_deinit;
 	}
 
+	/*
+	 * To avoid polling and to avoid this R1b command conversion
+	 * to R1 command if the requested busy timeout > host's max
+	 * busy timeout in case of sanitize, erase or any R1b command
+	 */
+	host->mmc->max_busy_timeout = 0;
+
 	msm_host->pltfm_init_done = true;
 
 	pm_runtime_set_active(&pdev->dev);
@@ -5482,8 +5453,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 vreg_deinit:
 	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
 bus_unregister:
-	if (msm_host->msm_bus_vote.client_handle)
-		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_voting(host, 0);
 	sdhci_msm_bus_unregister(msm_host);
 sleep_clk_disable:
 	if (!IS_ERR(msm_host->sleep_clk))
@@ -5568,10 +5538,9 @@ static int sdhci_msm_remove(struct platform_device *pdev)
 	sdhci_msm_setup_pins(pdata, true);
 	sdhci_msm_setup_pins(pdata, false);
 
-	if (msm_host->msm_bus_vote.client_handle) {
-		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_voting(host, 0);
+	if (msm_host->msm_bus_vote.client_handle)
 		sdhci_msm_bus_unregister(msm_host);
-	}
 
 	sdhci_pltfm_free(pdev);
 
@@ -5643,22 +5612,13 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
 defer_disable_host_irq:
 	disable_irq(msm_host->pwr_irq);
 
-	/*
-	 * Remove the vote immediately only if clocks are off in which
-	 * case we might have queued work to remove vote but it may not
-	 * be completed before runtime suspend or system suspend.
-	 */
-	if (!atomic_read(&msm_host->clks_on)) {
-		if (msm_host->msm_bus_vote.client_handle)
-			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
-	}
-
 	if (host->is_crypto_en) {
 		ret = sdhci_msm_ice_suspend(host);
 		if (ret < 0)
 			pr_err("%s: failed to suspend crypto engine %d\n",
 					mmc_hostname(host->mmc), ret);
 	}
+	sdhci_msm_disable_controller_clock(host);
 	trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
 			ktime_to_us(ktime_sub(ktime_get(), start)));
 	return 0;
@@ -5672,13 +5632,18 @@ static int sdhci_msm_runtime_resume(struct device *dev)
 	int ret;
 	ktime_t start = ktime_get();
 
+	ret = sdhci_msm_enable_controller_clock(host);
+	if (ret) {
+		pr_err("%s: Failed to enable reqd clocks\n",
+				mmc_hostname(host->mmc));
+		goto skip_ice_resume;
+	}
+
+	if (host->mmc &&
+			(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+		sdhci_msm_toggle_fifo_write_clk(host);
+
 	if (host->is_crypto_en) {
-		ret = sdhci_msm_enable_controller_clock(host);
-		if (ret) {
-			pr_err("%s: Failed to enable reqd clocks\n",
-					mmc_hostname(host->mmc));
-			goto skip_ice_resume;
-		}
 		ret = sdhci_msm_ice_resume(host);
 		if (ret)
 			pr_err("%s: failed to resume crypto engine %d\n",
@@ -5702,16 +5667,10 @@ static int sdhci_msm_runtime_resume(struct device *dev)
 static int sdhci_msm_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	int ret = 0;
 	int sdio_cfg = 0;
 	ktime_t start = ktime_get();
 
-	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
-			 (msm_host->mmc->slot.cd_irq >= 0))
-		disable_irq(msm_host->mmc->slot.cd_irq);
-
 	if (pm_runtime_suspended(dev)) {
 		pr_debug("%s: %s: already runtime suspended\n",
 		mmc_hostname(host->mmc), __func__);
@@ -5719,7 +5678,6 @@ static int sdhci_msm_suspend(struct device *dev)
 	}
 	ret = sdhci_msm_runtime_suspend(dev);
 out:
-	sdhci_msm_disable_controller_clock(host);
 	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
 		sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
 		if (sdio_cfg)
@@ -5734,16 +5692,10 @@ static int sdhci_msm_suspend(struct device *dev)
 static int sdhci_msm_resume(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	int ret = 0;
 	int sdio_cfg = 0;
 	ktime_t start = ktime_get();
 
-	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
-			 (msm_host->mmc->slot.cd_irq >= 0))
-		enable_irq(msm_host->mmc->slot.cd_irq);
-
 	if (pm_runtime_suspended(dev)) {
 		pr_debug("%s: %s: runtime suspended, defer system resume\n",
 		mmc_hostname(host->mmc), __func__);
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 051dfa8..87f651c 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -158,7 +158,6 @@ struct sdhci_msm_bus_vote {
 	int min_bw_vote;
 	int max_bw_vote;
 	bool is_max_bw_needed;
-	struct delayed_work vote_work;
 	struct device_attribute max_bus_bw;
 };
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index b215129..016e209 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -257,6 +257,8 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
 
 	unregister_netdevice(dev);
 
+	qmi_rmnet_qos_exit_post();
+
 	rmnet_unregister_real_device(real_dev, port);
 }
 
@@ -268,6 +270,7 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
 	struct rmnet_port *port;
 	unsigned long bkt_ep;
 	LIST_HEAD(list);
+	HLIST_HEAD(cleanup_list);
 
 	if (!rmnet_is_real_dev_registered(real_dev))
 		return;
@@ -284,14 +287,23 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
 		rmnet_vnd_dellink(ep->mux_id, port, ep);
 
 		hlist_del_init_rcu(&ep->hlnode);
-		synchronize_rcu();
+		hlist_add_head(&ep->hlnode, &cleanup_list);
+	}
+
+	synchronize_rcu();
+
+	hlist_for_each_entry_safe(ep, tmp_ep, &cleanup_list, hlnode) {
+		hlist_del(&ep->hlnode);
 		kfree(ep);
 	}
+
 	/* Unregistering devices in context before freeing port.
 	 * If this API becomes non-context their order should switch.
 	 */
 	unregister_netdevice_many(&list);
 
+	qmi_rmnet_qos_exit_post();
+
 	rmnet_unregister_real_device(real_dev, port);
 }
 
@@ -711,6 +723,17 @@ struct net_device *rmnet_get_real_dev(void *port)
 }
 EXPORT_SYMBOL(rmnet_get_real_dev);
 
+int rmnet_get_dlmarker_info(void *port)
+{
+	if (!port)
+		return 0;
+
+	return ((struct rmnet_port *)port)->data_format &
+		(RMNET_INGRESS_FORMAT_DL_MARKER_V1 |
+		RMNET_INGRESS_FORMAT_DL_MARKER_V2);
+}
+EXPORT_SYMBOL(rmnet_get_dlmarker_info);
+
 #endif
 
 /* Startup/Shutdown */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 2359401..1b55d9c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -145,6 +145,7 @@ struct rmnet_priv_stats {
 	u64 csum_sw;
 	u64 csum_hw;
 	struct rmnet_coal_stats coal;
+	u64 ul_prio;
 };
 
 struct rmnet_priv {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
index 1eb72ca..df92f4b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_genl.c
@@ -48,7 +48,9 @@ struct genl_family rmnet_core_genl_family = {
 #define RMNET_PID_STATS_HT_SIZE (8)
 #define RMNET_PID_STATS_HT rmnet_pid_ht
 DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
-spinlock_t rmnet_pid_ht_splock; /* Spinlock definition for pid hash table */
+
+/* Spinlock definition for pid hash table */
+static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
 
 #define RMNET_GENL_SEC_TO_MSEC(x)   ((x) * 1000)
 #define RMNET_GENL_SEC_TO_NSEC(x)   ((x) * 1000000000)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index ad9453b..f239139 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -373,7 +373,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
 		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
 		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
-	} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
+	} else if ((port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) ||
+		   (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
 		additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
 		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
 	}
@@ -391,7 +392,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 #endif
 
 	if (csum_type)
-		rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
+		rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+						 csum_type);
 
 	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
 					      port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 04048f6..9d05381 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -71,7 +71,9 @@ struct rmnet_map_header {
 struct rmnet_map_v5_csum_header {
 	u8  next_hdr:1;
 	u8  header_type:7;
-	u8  hw_reserved:7;
+	u8  hw_reserved:5;
+	u8  priority:1;
+	u8  hw_reserved_bit:1;
 	u8  csum_valid_required:1;
 	__be16 reserved;
 } __aligned(1);
@@ -251,6 +253,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct rmnet_port *port,
 				      struct net_device *orig_dev,
 				      int csum_type);
 bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index da5e47c..3488cf4 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -481,7 +481,20 @@ void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
 	priv->stats.csum_sw++;
 }
 
+static void rmnet_map_v5_check_priority(struct sk_buff *skb,
+					struct net_device *orig_dev,
+					struct rmnet_map_v5_csum_header *hdr)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+
+	if (skb->priority) {
+		priv->stats.ul_prio++;
+		hdr->priority = 1;
+	}
+}
+
 void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+					 struct rmnet_port *port,
 					 struct net_device *orig_dev)
 {
 	struct rmnet_priv *priv = netdev_priv(orig_dev);
@@ -492,6 +505,13 @@ void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
 	memset(ul_header, 0, sizeof(*ul_header));
 	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
 
+	if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
+		rmnet_map_v5_check_priority(skb, orig_dev, ul_header);
+
+	/* Allow priority w/o csum offload */
+	if (!(port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5))
+		return;
+
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		void *iph = (char *)ul_header + sizeof(*ul_header);
 		void *trans;
@@ -532,6 +552,7 @@ void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
  * packets that are supported for UL checksum offload.
  */
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct rmnet_port *port,
 				      struct net_device *orig_dev,
 				      int csum_type)
 {
@@ -540,7 +561,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
 		break;
 	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
-		rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
+		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
 		break;
 	default:
 		break;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 2ce29bf..9b4590c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -18,6 +18,9 @@
 #define RMNET_INGRESS_FORMAT_DL_MARKER  (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
 RMNET_INGRESS_FORMAT_DL_MARKER_V2)
 
+/* UL Packet prioritization */
+#define RMNET_EGRESS_FORMAT_PRIORITY            BIT(28)
+
 /* Power save feature*/
 #define RMNET_INGRESS_FORMAT_PS                 BIT(27)
 #define RMNET_FORMAT_PS_NOTIF                   BIT(26)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 59aa361..6186e65 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -119,8 +119,7 @@ static void rmnet_vnd_uninit(struct net_device *dev)
 
 	qos = priv->qos_info;
 	RCU_INIT_POINTER(priv->qos_info, NULL);
-	synchronize_rcu();
-	qmi_rmnet_qos_exit(dev, qos);
+	qmi_rmnet_qos_exit_pre(qos);
 }
 
 static void rmnet_get_stats64(struct net_device *dev,
@@ -224,6 +223,7 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
 	"Coalescing packets over VEID1",
 	"Coalescing packets over VEID2",
 	"Coalescing packets over VEID3",
+	"Uplink priority packets",
 };
 
 static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 05d8c8e..8a0bfa4 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -34,6 +34,8 @@
  */
 #define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 
+#define WIL_DISABLE_EDMG 255
+
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
@@ -150,13 +152,28 @@ enum wil_nl_60g_evt_type {
 	NL_60G_EVT_FW_WMI,
 	NL_60G_EVT_DRIVER_SHUTOWN,
 	NL_60G_EVT_DRIVER_DEBUG_EVENT,
+	NL_60G_EVT_DRIVER_GENERIC,
 };
 
+enum wil_nl_60g_generic_evt {
+	NL_60G_GEN_EVT_FW_STATE,
+};
+
+struct wil_nl_60g_generic_event { /* NL_60G_EVT_DRIVER_GENERIC */
+	u32 evt_id; /* wil_nl_60g_generic_evt */
+} __packed;
+
+struct wil_nl_60g_fw_state_event {
+	struct wil_nl_60g_generic_event hdr;
+	u32 fw_state; /* wil_fw_state */
+} __packed;
+
 enum wil_nl_60g_debug_cmd {
 	NL_60G_DBG_FORCE_WMI_SEND,
 	NL_60G_GEN_RADAR_ALLOC_BUFFER,
 	NL_60G_GEN_FW_RESET,
 	NL_60G_GEN_GET_DRIVER_CAPA,
+	NL_60G_GEN_GET_FW_STATE,
 };
 
 struct wil_nl_60g_send_receive_wmi {
@@ -530,6 +547,8 @@ static const char * const key_usage_str[] = {
 	[WMI_KEY_USE_PAIRWISE]	= "PTK",
 	[WMI_KEY_USE_RX_GROUP]	= "RX_GTK",
 	[WMI_KEY_USE_TX_GROUP]	= "TX_GTK",
+	[WMI_KEY_USE_STORE_PTK]	= "STORE_PTK",
+	[WMI_KEY_USE_APPLY_PTK]	= "APPLY_PTK",
 };
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type)
@@ -588,6 +607,9 @@ int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch)
 	case 12:
 		*wmi_ch = WMI_CHANNEL_12;
 		break;
+	case WIL_DISABLE_EDMG:
+		*wmi_ch = 0;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -741,7 +763,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
 /*
  * Find @idx-th active STA for specific MID for station dump.
  */
-static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
+int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
 {
 	int i;
 
@@ -1781,6 +1803,7 @@ void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
 		return;
 
 	switch (key_usage) {
+	case WMI_KEY_USE_STORE_PTK:
 	case WMI_KEY_USE_PAIRWISE:
 		for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
 			cc = &cs->tid_crypto_rx[tid].key_id[key_index];
@@ -1878,6 +1901,16 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
 		return -EINVAL;
 	}
 
+	spin_lock_bh(&wil->eap_lock);
+	if (pairwise && wdev->iftype == NL80211_IFTYPE_STATION &&
+	    (vif->ptk_rekey_state == WIL_REKEY_M3_RECEIVED ||
+	     vif->ptk_rekey_state == WIL_REKEY_WAIT_M4_SENT)) {
+		key_usage = WMI_KEY_USE_STORE_PTK;
+		vif->ptk_rekey_state = WIL_REKEY_WAIT_M4_SENT;
+		wil_dbg_misc(wil, "Store EAPOL key\n");
+	}
+	spin_unlock_bh(&wil->eap_lock);
+
 	rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
 				params->key, key_usage);
 	if (!rc && !IS_ERR(cs)) {
@@ -2331,6 +2364,11 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
 	struct wil6210_vif *vif = ndev_to_vif(ndev);
 	int rc;
 	u32 privacy = 0;
+	u16 len = 0, proberesp_len = 0;
+	u8 *ies = NULL, *proberesp;
+	bool ssid_changed = false;
+	const u8 *ie;
+
 
 	wil_dbg_misc(wil, "change_beacon, mid=%d\n", vif->mid);
 	wil_print_bcon_data(bcon);
@@ -2343,6 +2381,27 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
 	memcpy(vif->ssid, wdev->ssid, wdev->ssid_len);
 	vif->ssid_len = wdev->ssid_len;
 
+	/* extract updated SSID from the probe response IE */
+	proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+						    bcon->probe_resp_len,
+						    &proberesp_len);
+	rc = _wil_cfg80211_merge_extra_ies(proberesp,
+					   proberesp_len,
+					   bcon->proberesp_ies,
+					   bcon->proberesp_ies_len,
+					   &ies, &len);
+
+	if (!rc) {
+		ie = cfg80211_find_ie(WLAN_EID_SSID, ies, len);
+		if (ie && ie[1] <= IEEE80211_MAX_SSID_LEN)
+			if (ie[1] != vif->ssid_len ||
+			    memcmp(&ie[2], vif->ssid, ie[1])) {
+				memcpy(vif->ssid, &ie[2], ie[1]);
+				vif->ssid_len = ie[1];
+				ssid_changed = true;
+			}
+	}
+
 	/* in case privacy has changed, need to restart the AP */
 	if (vif->privacy != privacy) {
 		wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
@@ -2356,9 +2415,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
 					    vif->hidden_ssid,
 					    vif->pbss);
 	} else {
+		if (ssid_changed) {
+			rc = wmi_set_ssid(vif, vif->ssid_len, vif->ssid);
+			if (rc)
+				goto out;
+		}
 		rc = _wil_cfg80211_set_ies(vif, bcon);
 	}
 
+	if (ssid_changed) {
+		wdev->ssid_len = vif->ssid_len;
+		memcpy(wdev->ssid, vif->ssid, vif->ssid_len);
+	}
+
+out:
 	return rc;
 }
 
@@ -2931,6 +3001,23 @@ wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
 	return rc;
 }
 
+static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+					    struct net_device *dev,
+					    s32 rssi_thold, u32 rssi_hyst)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	wil->cqm_rssi_thold = rssi_thold;
+
+	rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
+	if (rc)
+		/* reset stored value upon failure */
+		wil->cqm_rssi_thold = 0;
+
+	return rc;
+}
+
 static const struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2962,6 +3049,7 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
 	.start_p2p_device = wil_cfg80211_start_p2p_device,
 	.stop_p2p_device = wil_cfg80211_stop_p2p_device,
 	.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+	.set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
 	.suspend = wil_cfg80211_suspend,
 	.resume = wil_cfg80211_resume,
 	.sched_scan_start = wil_cfg80211_sched_scan_start,
@@ -3703,6 +3791,62 @@ static int wil_brp_set_ant_limit(struct wiphy *wiphy, struct wireless_dev *wdev,
 					 antenna_num_limit);
 }
 
+static void wil_nl_60g_fw_state_evt(struct wil6210_priv *wil)
+{
+	struct sk_buff *vendor_event = NULL;
+	struct wil_nl_60g_event *evt;
+	struct wil_nl_60g_fw_state_event *fw_state_event;
+
+	if (!wil->publish_nl_evt)
+		return;
+
+	wil_dbg_misc(wil, "report fw_state event to user-space (%d)\n",
+		     wil->fw_state);
+
+	evt = kzalloc(sizeof(*evt) + sizeof(*fw_state_event), GFP_KERNEL);
+	if (!evt)
+		return;
+
+	evt->evt_type = NL_60G_EVT_DRIVER_GENERIC;
+	evt->buf_len = sizeof(*fw_state_event);
+
+	fw_state_event = (struct wil_nl_60g_fw_state_event *)evt->buf;
+	fw_state_event->hdr.evt_id = NL_60G_GEN_EVT_FW_STATE;
+	fw_state_event->fw_state = wil->fw_state;
+
+	vendor_event = cfg80211_vendor_event_alloc(wil_to_wiphy(wil),
+						   NULL,
+						   4 + NLMSG_HDRLEN +
+						   sizeof(*evt) +
+						   sizeof(*fw_state_event),
+						   QCA_EVENT_UNSPEC_INDEX,
+						   GFP_KERNEL);
+	if (!vendor_event) {
+		wil_err(wil, "failed to allocate vendor_event\n");
+		goto out;
+	}
+
+	if (nla_put(vendor_event, WIL_ATTR_60G_BUF,
+		    sizeof(*evt) + sizeof(*fw_state_event), evt)) {
+		wil_err(wil, "failed to fill WIL_ATTR_60G_BUF\n");
+		kfree_skb(vendor_event);
+		goto out;
+	}
+
+	cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+
+out:
+	kfree(evt);
+}
+
+void wil_nl_60g_fw_state_change(struct wil6210_priv *wil,
+				enum wil_fw_state fw_state)
+{
+	wil_dbg_misc(wil, "fw_state change:%d => %d", wil->fw_state, fw_state);
+	wil->fw_state = fw_state;
+	wil_nl_60g_fw_state_evt(wil);
+}
+
 static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
 				 const void *data, int data_len)
 {
@@ -3744,6 +3888,7 @@ static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
 
 		wil_dbg_wmi(wil, "Publish wmi event %s\n",
 			    publish ? "enabled" : "disabled");
+		wil_nl_60g_fw_state_evt(wil);
 		break;
 	case NL_60G_CMD_DEBUG:
 		if (!tb[WIL_ATTR_60G_BUF]) {
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index cbd2ad5..78920dd 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -378,6 +378,7 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
 		}
 		clear_bit(wil_vif_fwconnecting, vif->status);
 		clear_bit(wil_vif_ft_roam, vif->status);
+		vif->ptk_rekey_state = WIL_REKEY_IDLE;
 
 		break;
 	case NL80211_IFTYPE_AP:
@@ -620,6 +621,12 @@ static void wil_fw_error_worker(struct work_struct *work)
 	struct net_device *ndev = wil->main_ndev;
 
 	wil_dbg_misc(wil, "fw error worker\n");
+	if (wil->fw_state == WIL_FW_STATE_READY)
+		wil_nl_60g_fw_state_change(wil,
+					   WIL_FW_STATE_ERROR);
+	else
+		wil_nl_60g_fw_state_change(wil,
+					   WIL_FW_STATE_ERROR_BEFORE_READY);
 
 	if (!ndev || !(ndev->flags & IFF_UP)) {
 		wil_info(wil, "No recovery - interface is down\n");
@@ -749,6 +756,8 @@ int wil_priv_init(struct wil6210_priv *wil)
 	INIT_LIST_HEAD(&wil->pending_wmi_ev);
 	spin_lock_init(&wil->wmi_ev_lock);
 	spin_lock_init(&wil->net_queue_lock);
+	spin_lock_init(&wil->eap_lock);
+
 	init_waitqueue_head(&wil->wq);
 	init_rwsem(&wil->mem_lock);
 
@@ -795,6 +804,7 @@ int wil_priv_init(struct wil6210_priv *wil)
 	wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
 
 	wil->amsdu_en = 1;
+	wil->fw_state = WIL_FW_STATE_DOWN;
 
 	return 0;
 
@@ -1505,6 +1515,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 	} else {
 		wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
 			 jiffies_to_msecs(to-left), wil->hw_version);
+		wil_nl_60g_fw_state_change(wil, WIL_FW_STATE_READY);
 	}
 	return 0;
 }
@@ -1700,6 +1711,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 		ether_addr_copy(ndev->perm_addr, mac);
 		ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+		wil->fw_state = WIL_FW_STATE_UNKNOWN;
 		return 0;
 	}
 
@@ -1734,6 +1746,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 				rc);
 	}
 
+	wil_nl_60g_fw_state_change(wil, WIL_FW_STATE_DOWN);
 	set_bit(wil_status_resetting, wil->status);
 	mutex_lock(&wil->vif_mutex);
 	wil_abort_scan_all_vifs(wil, false);
@@ -1745,6 +1758,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 			cancel_work_sync(&vif->disconnect_worker);
 			wil6210_disconnect(vif, NULL,
 					   WLAN_REASON_DEAUTH_LEAVING);
+			vif->ptk_rekey_state = WIL_REKEY_IDLE;
 		}
 	}
 	wil_bcast_fini_all(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index d94c651..e3eaab8 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,6 +127,9 @@ static u16 wil_select_queue(struct net_device *ndev,
 	else if (skb->priority == 0 || skb->priority > 7)
 		skb->priority = cfg80211_classify8021d(skb, NULL);
 
+	if (unlikely(skb->priority >= ARRAY_SIZE(wil_1d_to_queue)))
+		skb->priority = 0;
+
 	qid = wil_1d_to_queue[skb->priority];
 
 	wil_dbg_txrx(wil, "select queue for priority %d -> queue %d\n",
@@ -276,6 +279,7 @@ static void wil_vif_deinit(struct wil6210_vif *vif)
 	cancel_work_sync(&vif->p2p.delayed_listen_work);
 	wil_probe_client_flush(vif);
 	cancel_work_sync(&vif->probe_client_worker);
+	cancel_work_sync(&vif->enable_tx_key_worker);
 }
 
 void wil_vif_free(struct wil6210_vif *vif)
@@ -343,6 +347,7 @@ static void wil_vif_init(struct wil6210_vif *vif)
 	INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker);
 	INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired);
 	INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
+	INIT_WORK(&vif->enable_tx_key_worker, wil_enable_tx_key_worker);
 
 	INIT_LIST_HEAD(&vif->probe_client_pending);
 
@@ -613,6 +618,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
 	cancel_work_sync(&vif->disconnect_worker);
 	wil_probe_client_flush(vif);
 	cancel_work_sync(&vif->probe_client_worker);
+	cancel_work_sync(&vif->enable_tx_key_worker);
 	/* for VIFs, ndev will be freed by destructor after RTNL is unlocked.
 	 * the main interface will be freed in wil_if_free, we need to keep it
 	 * a bit longer so logging macros will work.
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f2b1564..d326f99 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -387,6 +387,13 @@ static int wil_platform_rop_notify(void *wil_handle,
 		set_bit(wil_status_resetting, wil->status);
 		set_bit(wil_status_pci_linkdown, wil->status);
 
+		if (wil->fw_state == WIL_FW_STATE_READY)
+			wil_nl_60g_fw_state_change(wil,
+						   WIL_FW_STATE_ERROR);
+		else
+			wil_nl_60g_fw_state_change(wil,
+					WIL_FW_STATE_ERROR_BEFORE_READY);
+
 		schedule_work(&wil->pci_linkdown_recovery_worker);
 		break;
 	default:
@@ -536,7 +543,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		mutex_unlock(&wil->mutex);
 		if (rc) {
 			wil_err(wil, "failed to load WMI only FW\n");
-			goto if_remove;
+			/* ignore the error to allow debugging */
 		}
 	}
 
@@ -557,8 +564,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	return 0;
 
-if_remove:
-	wil_if_remove(wil);
 bus_disable:
 	wil_if_pcie_disable(wil);
 err_iounmap:
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index ed1081c..a231090 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -318,6 +318,43 @@ fst_link_loss_store(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR_RW(fst_link_loss);
 
 static ssize_t
+fst_config_store(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	u8 addr[ETH_ALEN];
+	int rc;
+	u8 enabled, entry_mcs, exit_mcs, slevel;
+
+	/* <ap_bssid> <enabled> <entry_mcs> <exit_mcs> <sensitivity_level> */
+	if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu %hhu %hhu",
+		   &addr[0], &addr[1], &addr[2],
+		   &addr[3], &addr[4], &addr[5],
+		   &enabled, &entry_mcs, &exit_mcs, &slevel) != 10)
+		return -EINVAL;
+
+	if (entry_mcs > WIL_MCS_MAX || exit_mcs > WIL_MCS_MAX ||
+	    entry_mcs < exit_mcs || slevel > WMI_FST_SWITCH_SENSITIVITY_HIGH)
+		return -EINVAL;
+
+	wil_dbg_misc(wil,
+		     "fst_config %sabled for [%pM] with entry/exit MCS %d/%d, sensitivity %s\n",
+		     enabled ? "en" : "dis", addr, entry_mcs, exit_mcs,
+		     (slevel == WMI_FST_SWITCH_SENSITIVITY_LOW) ?
+			"LOW" : (slevel == WMI_FST_SWITCH_SENSITIVITY_HIGH) ?
+					"HIGH" : "MED");
+
+	rc = wmi_set_fst_config(wil, addr, enabled, entry_mcs, exit_mcs,
+				slevel);
+	if (!rc)
+		rc = count;
+
+	return rc;
+}
+
+static DEVICE_ATTR_WO(fst_config);
+
+static ssize_t
 vr_profile_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -427,6 +464,7 @@ static struct attribute *wil6210_sysfs_entries[] = {
 	&dev_attr_fst_link_loss.attr,
 	&dev_attr_snr_thresh.attr,
 	&dev_attr_vr_profile.attr,
+	&dev_attr_fst_config.attr,
 	NULL
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 659e30c3..bac1ec3 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -739,6 +739,182 @@ static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
 }
 
 /*
+ * Check if skb is ptk eapol key message
+ *
+ * returns a pointer to the start of the eapol key structure, NULL
+ * if frame is not PTK eapol key
+ */
+static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil,
+						  struct sk_buff *skb)
+{
+	u8 *buf;
+	const struct wil_1x_hdr *hdr;
+	struct wil_eapol_key *key;
+	u16 key_info;
+	int len = skb->len;
+
+	if (!skb_mac_header_was_set(skb)) {
+		wil_err(wil, "mac header was not set\n");
+		return NULL;
+	}
+
+	len -= skb_mac_offset(skb);
+
+	if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) +
+	    sizeof(struct wil_eapol_key))
+		return NULL;
+
+	buf = skb_mac_header(skb) + sizeof(struct ethhdr);
+
+	hdr = (const struct wil_1x_hdr *)buf;
+	if (hdr->type != WIL_1X_TYPE_EAPOL_KEY)
+		return NULL;
+
+	key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr));
+	if (key->type != WIL_EAPOL_KEY_TYPE_WPA &&
+	    key->type != WIL_EAPOL_KEY_TYPE_RSN)
+		return NULL;
+
+	key_info = be16_to_cpu(key->key_info);
+	if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */
+		return NULL;
+
+	return key;
+}
+
+static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+	struct wil_eapol_key *key;
+	u16 key_info;
+
+	key = wil_is_ptk_eapol_key(wil, skb);
+	if (!key)
+		return false;
+
+	key_info = be16_to_cpu(key->key_info);
+	if (key_info & (WIL_KEY_INFO_MIC |
+			WIL_KEY_INFO_ENCR_KEY_DATA)) {
+		/* 3/4 of 4-Way Handshake */
+		wil_dbg_misc(wil, "EAPOL key message 3\n");
+		return true;
+	}
+	/* 1/4 of 4-Way Handshake */
+	wil_dbg_misc(wil, "EAPOL key message 1\n");
+
+	return false;
+}
+
+static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+	struct wil_eapol_key *key;
+	u32 *nonce, i;
+
+	key = wil_is_ptk_eapol_key(wil, skb);
+	if (!key)
+		return false;
+
+	nonce = (u32 *)key->key_nonce;
+	for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) {
+		if (*nonce != 0) {
+			/* message 2/4 */
+			wil_dbg_misc(wil, "EAPOL key message 2\n");
+			return false;
+		}
+	}
+	wil_dbg_misc(wil, "EAPOL key message 4\n");
+
+	return true;
+}
+
+void wil_enable_tx_key_worker(struct work_struct *work)
+{
+	struct wil6210_vif *vif = container_of(work,
+			struct wil6210_vif, enable_tx_key_worker);
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	int rc, cid;
+
+	rtnl_lock();
+	if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) {
+		wil_dbg_misc(wil, "Invalid rekey state = %d\n",
+			     vif->ptk_rekey_state);
+		rtnl_unlock();
+		return;
+	}
+
+	cid =  wil_find_cid_by_idx(wil, vif->mid, 0);
+	if (!wil_cid_valid(cid)) {
+		wil_err(wil, "Invalid cid = %d\n", cid);
+		rtnl_unlock();
+		return;
+	}
+
+	wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n");
+	rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL,
+				WMI_KEY_USE_APPLY_PTK);
+
+	vif->ptk_rekey_state = WIL_REKEY_IDLE;
+	rtnl_unlock();
+
+	if (rc)
+		wil_err(wil, "Apply PTK key failed %d\n", rc);
+}
+
+void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	struct wireless_dev *wdev = vif_to_wdev(vif);
+	bool q = false;
+
+	if (wdev->iftype != NL80211_IFTYPE_STATION ||
+	    !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
+		return;
+
+	/* check if skb is an EAP message 4/4 */
+	if (!wil_skb_is_eap_4(wil, skb))
+		return;
+
+	spin_lock_bh(&wil->eap_lock);
+	switch (vif->ptk_rekey_state) {
+	case WIL_REKEY_IDLE:
+		/* ignore idle state, can happen due to M4 retransmission */
+		break;
+	case WIL_REKEY_M3_RECEIVED:
+		vif->ptk_rekey_state = WIL_REKEY_IDLE;
+		break;
+	case WIL_REKEY_WAIT_M4_SENT:
+		q = true;
+		break;
+	default:
+		wil_err(wil, "Unknown rekey state = %d",
+			vif->ptk_rekey_state);
+	}
+	spin_unlock_bh(&wil->eap_lock);
+
+	if (q) {
+		q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
+		wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n",
+			     q);
+	}
+}
+
+static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	struct wireless_dev *wdev = vif_to_wdev(vif);
+
+	if (wdev->iftype != NL80211_IFTYPE_STATION ||
+	    !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
+		return;
+
+	/* check if skb is a EAP message 3/4 */
+	if (!wil_skb_is_eap_3(wil, skb))
+		return;
+
+	if (vif->ptk_rekey_state == WIL_REKEY_IDLE)
+		vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED;
+}
+
+/*
  * Pass Rx packet to the netif. Update statistics.
  * Called in softirq context (NAPI poll).
  */
@@ -810,6 +986,10 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
 	if (skb) { /* deliver to local stack */
 		skb->protocol = eth_type_trans(skb, ndev);
 		skb->dev = ndev;
+
+		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+			wil_rx_handle_eapol(vif, skb);
+
 		if (gro)
 			rc = napi_gro_receive(&wil->napi_rx, skb);
 		else
@@ -2408,6 +2588,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
 					if (stats)
 						stats->tx_errors++;
 				}
+
+				if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+					wil_tx_complete_handle_eapol(vif, skb);
+
 				wil_consume_skb(skb, d->dma.error == 0);
 			}
 			memset(ctx, 0, sizeof(*ctx));
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 4e9e6f6..d9e116b 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -412,6 +412,46 @@ struct vring_rx_mac {
 #define RX_DMA_STATUS_PHY_INFO	BIT(6)
 #define RX_DMA_STATUS_FFM	BIT(7) /* EtherType Flex Filter Match */
 
+/* IEEE 802.11, 8.5.2 EAPOL-Key frames */
+#define WIL_KEY_INFO_KEY_TYPE BIT(3) /* val of 1 = Pairwise, 0 = Group key */
+
+#define WIL_KEY_INFO_MIC BIT(8)
+#define WIL_KEY_INFO_ENCR_KEY_DATA BIT(12) /* for rsn only */
+
+#define WIL_EAP_NONCE_LEN 32
+#define WIL_EAP_KEY_RSC_LEN 8
+#define WIL_EAP_REPLAY_COUNTER_LEN 8
+#define WIL_EAP_KEY_IV_LEN 16
+#define WIL_EAP_KEY_ID_LEN 8
+
+enum {
+	WIL_1X_TYPE_EAP_PACKET = 0,
+	WIL_1X_TYPE_EAPOL_START = 1,
+	WIL_1X_TYPE_EAPOL_LOGOFF = 2,
+	WIL_1X_TYPE_EAPOL_KEY = 3,
+};
+
+#define WIL_EAPOL_KEY_TYPE_RSN 2
+#define WIL_EAPOL_KEY_TYPE_WPA 254
+
+struct wil_1x_hdr {
+	u8 version;
+	u8 type;
+	__be16 length;
+	/* followed by data */
+} __packed;
+
+struct wil_eapol_key {
+	u8 type;
+	__be16 key_info;
+	__be16 key_length;
+	u8 replay_counter[WIL_EAP_REPLAY_COUNTER_LEN];
+	u8 key_nonce[WIL_EAP_NONCE_LEN];
+	u8 key_iv[WIL_EAP_KEY_IV_LEN];
+	u8 key_rsc[WIL_EAP_KEY_RSC_LEN];
+	u8 key_id[WIL_EAP_KEY_ID_LEN];
+} __packed;
+
 struct vring_rx_dma {
 	u32 d0;
 	struct wil_ring_dma_addr addr;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 64e7f3a1..b72c248 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1011,8 +1011,8 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
 	}
 	stats = &wil->sta[cid].stats;
 
-	if (unlikely(skb->len < ETH_HLEN)) {
-		wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
+	if (unlikely(dmalen < ETH_HLEN)) {
+		wil_dbg_txrx(wil, "Short frame, len = %d\n", dmalen);
 		stats->rx_short_frame++;
 		rxdata->skipping = true;
 		goto skipping;
@@ -1316,6 +1316,10 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
 					if (stats)
 						stats->tx_errors++;
 				}
+
+				if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+					wil_tx_complete_handle_eapol(vif, skb);
+
 				wil_consume_skb(skb, msg.status == 0);
 			}
 			memset(ctx, 0, sizeof(*ctx));
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 743b2d4..aa06969 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -755,6 +755,12 @@ enum wil_sta_status {
 	wil_sta_connected = 2,
 };
 
+enum wil_rekey_state {
+	WIL_REKEY_IDLE = 0,
+	WIL_REKEY_M3_RECEIVED = 1,
+	WIL_REKEY_WAIT_M4_SENT = 2,
+};
+
 /**
  * struct wil_sta_info - data for peer
  *
@@ -906,6 +912,10 @@ struct wil6210_vif {
 	int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
 	bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */
 	u64 fw_stats_tsf; /* measurement timestamp */
+
+	/* PTK rekey race prevention, this is relevant to station mode only */
+	enum wil_rekey_state ptk_rekey_state;
+	struct work_struct enable_tx_key_worker;
 };
 
 /**
@@ -950,6 +960,17 @@ struct wil_ftm_offsets {
 	unsigned int rx_offset;
 };
 
+enum wil_fw_state {
+	/* When driver loaded with debug_fw the FW state is unknown */
+	WIL_FW_STATE_UNKNOWN,
+	WIL_FW_STATE_DOWN, /* FW not loaded or not ready yet */
+	WIL_FW_STATE_READY,/* FW is ready*/
+	/* Detected FW error before FW sent ready indication */
+	WIL_FW_STATE_ERROR_BEFORE_READY,
+	/* Detected FW error after FW sent ready indication */
+	WIL_FW_STATE_ERROR,
+};
+
 struct wil6210_priv {
 	struct pci_dev *pdev;
 	u32 bar_size;
@@ -1011,6 +1032,7 @@ struct wil6210_priv {
 	 */
 	spinlock_t wmi_ev_lock;
 	spinlock_t net_queue_lock; /* guarding stop/wake netif queue */
+	spinlock_t eap_lock; /* guarding access to eap rekey fields */
 	struct napi_struct napi_rx;
 	struct napi_struct napi_tx;
 	struct net_device napi_ndev; /* dummy net_device serving all VIFs */
@@ -1045,6 +1067,7 @@ struct wil6210_priv {
 	u8 wakeup_trigger;
 	struct wil_suspend_stats suspend_stats;
 	struct wil_debugfs_data dbg_data;
+	/* set to WIL_EDMG_DISABLE to force disable EDMG */
 	u8 force_edmg_channel;
 	bool tx_latency; /* collect TX latency measurements */
 	size_t tx_latency_res; /* bin resolution in usec */
@@ -1113,10 +1136,12 @@ struct wil6210_priv {
 	u32 max_agg_wsize;
 	u32 max_ampdu_size;
 
+	enum wil_fw_state fw_state;
 	struct work_struct pci_linkdown_recovery_worker;
 	void *ipa_handle;
 
 	u32 tx_reserved_entries; /* Used only in Talyn code-path */
+	s32 cqm_rssi_thold;
 };
 
 #define wil_to_wiphy(i) (i->wiphy)
@@ -1277,6 +1302,7 @@ int __wil_down(struct wil6210_priv *wil);
 void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
 int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac);
+int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx);
 void wil_set_ethtoolops(struct net_device *ndev);
 int wil_vr_update_profile(struct wil6210_priv *wil, u8 profile);
 
@@ -1375,6 +1401,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 struct cfg80211_mgmt_tx_params *params,
 			 u64 *cookie);
 void wil_cfg80211_ap_recovery(struct wil6210_priv *wil);
+
+void wil_nl_60g_fw_state_change(struct wil6210_priv *wil,
+				enum wil_fw_state fw_state);
 int wil_cfg80211_iface_combinations_from_fw(
 	struct wil6210_priv *wil,
 	const struct wil_fw_record_concurrency *conc);
@@ -1426,6 +1455,7 @@ void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
 void wil_probe_client_flush(struct wil6210_vif *vif);
 void wil_probe_client_worker(struct work_struct *work);
 void wil_disconnect_worker(struct work_struct *work);
+void wil_enable_tx_key_worker(struct work_struct *work);
 
 void wil_init_txrx_ops(struct wil6210_priv *wil);
 
@@ -1445,6 +1475,8 @@ void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
 			      struct wil_ring *ring, bool check_stop);
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
 int wil_tx_complete(struct wil6210_vif *vif, int ringid);
+void wil_tx_complete_handle_eapol(struct wil6210_vif *vif,
+				  struct sk_buff *skb);
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
 void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
 
@@ -1542,4 +1574,8 @@ void update_supported_bands(struct wil6210_priv *wil);
 int wmi_reset_spi_slave(struct wil6210_priv *wil);
 
 void wil_clear_fw_log_addr(struct wil6210_priv *wil);
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+			    s32 rssi_thold, u32 rssi_hyst);
+int wmi_set_fst_config(struct wil6210_priv *wil, const u8 *bssid, u8 enabled,
+		       u8 entry_mcs, u8 exit_mcs, u8 slevel);
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 45d7934..818e38d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -483,6 +483,10 @@ static const char *cmdid2name(u16 cmdid)
 		return "WMI_RBUFCAP_CFG_CMD";
 	case WMI_TEMP_SENSE_ALL_CMDID:
 		return "WMI_TEMP_SENSE_ALL_CMDID";
+	case WMI_FST_CONFIG_CMDID:
+		return "WMI_FST_CONFIG_CMD";
+	case WMI_SET_LINK_MONITOR_CMDID:
+		return "WMI_SET_LINK_MONITOR_CMD";
 	default:
 		return "Untracked CMD";
 	}
@@ -635,6 +639,12 @@ static const char *eventid2name(u16 eventid)
 		return "WMI_RBUFCAP_CFG_EVENT";
 	case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
 		return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
+	case WMI_FST_CONFIG_EVENTID:
+		return "WMI_FST_CONFIG_EVENT";
+	case WMI_SET_LINK_MONITOR_EVENTID:
+		return "WMI_SET_LINK_MONITOR_EVENT";
+	case WMI_LINK_MONITOR_EVENTID:
+		return "WMI_LINK_MONITOR_EVENT";
 	default:
 		return "Untracked EVENT";
 	}
@@ -1069,6 +1079,24 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
 			mutex_unlock(&wil->mutex);
 			return;
 		}
+
+		sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+		if (!sinfo) {
+			wmi_disconnect_sta(vif, wil->sta[evt->cid].addr,
+					   WLAN_REASON_UNSPECIFIED, false);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		sinfo->generation = wil->sinfo_gen++;
+
+		if (assoc_req_ie) {
+			sinfo->assoc_req_ies = assoc_req_ie;
+			sinfo->assoc_req_ies_len = assoc_req_ielen;
+		}
+
+		cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL);
+		kfree(sinfo);
 	}
 
 	ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
@@ -1111,28 +1139,10 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
 		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
 
 		if (rc) {
-			if (disable_ap_sme)
-				/* notify new_sta has failed */
-				cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
+			/* notify new_sta has failed */
+			cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
 			goto out;
 		}
-
-		sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
-		if (!sinfo) {
-			rc = -ENOMEM;
-			goto out;
-		}
-
-		sinfo->generation = wil->sinfo_gen++;
-
-		if (assoc_req_ie) {
-			sinfo->assoc_req_ies = assoc_req_ie;
-			sinfo->assoc_req_ies_len = assoc_req_ielen;
-		}
-
-		cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL);
-
-		kfree(sinfo);
 	} else {
 		wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
 			evt->cid);
@@ -1908,6 +1918,32 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
 	wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
 }
 
+static void
+wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
+{
+	struct wil6210_priv *wil = vif_to_wil(vif);
+	struct net_device *ndev = vif_to_ndev(vif);
+	struct wmi_link_monitor_event *evt = d;
+	enum nl80211_cqm_rssi_threshold_event event_type;
+
+	if (len < sizeof(*evt)) {
+		wil_err(wil, "link monitor event too short %d\n", len);
+		return;
+	}
+
+	wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
+		    evt->type, evt->rssi_level, wil->cqm_rssi_thold);
+
+	if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
+		/* ignore */
+		return;
+
+	event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
+		      NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+		      NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
+	cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -1948,6 +1984,7 @@ static const struct {
 	{WMI_LINK_STATS_EVENTID,		wmi_evt_link_stats},
 	{WMI_FT_AUTH_STATUS_EVENTID,		wmi_evt_auth_status},
 	{WMI_FT_REASSOC_STATUS_EVENTID,		wmi_evt_reassoc_status},
+	{WMI_LINK_MONITOR_EVENTID,		wmi_evt_link_monitor},
 };
 
 /*
@@ -2547,10 +2584,17 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
 		.key_len = key_len,
 	};
 
-	if (!key || (key_len > sizeof(cmd.key)))
+	if (key_len > sizeof(cmd.key))
 		return -EINVAL;
 
-	memcpy(cmd.key, key, key_len);
+	/* key len = 0 is allowed only for usage of WMI_KEY_USE_APPLY */
+	if ((key_len == 0 || !key) &&
+	    key_usage != WMI_KEY_USE_APPLY_PTK)
+		return -EINVAL;
+
+	if (key)
+		memcpy(cmd.key, key, key_len);
+
 	if (mac_addr)
 		memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
 
@@ -4344,3 +4388,84 @@ int wmi_reset_spi_slave(struct wil6210_priv *wil)
 
 	return 0;
 }
+
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+			    s32 rssi_thold, u32 rssi_hyst)
+{
+	struct net_device *ndev = wil->main_ndev;
+	struct wil6210_vif *vif = ndev_to_vif(ndev);
+	int rc;
+	struct {
+		struct wmi_set_link_monitor_cmd cmd;
+		s8 rssi_thold;
+	} __packed cmd = {
+		.cmd = {
+			.rssi_hyst = rssi_hyst,
+			.rssi_thresholds_list_size = 1,
+		},
+		.rssi_thold = rssi_thold,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_set_link_monitor_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
+		return -EINVAL;
+
+	rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+		      &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int wmi_set_fst_config(struct wil6210_priv *wil, const u8 *bssid, u8 enabled,
+		       u8 entry_mcs, u8 exit_mcs, u8 slevel)
+{
+	struct net_device *ndev = wil->main_ndev;
+	struct wil6210_vif *vif = ndev_to_vif(ndev);
+	int rc;
+	struct wmi_fst_config_cmd cmd = {
+		.fst_en = enabled,
+		.fst_entry_mcs = entry_mcs,
+		.fst_exit_mcs = exit_mcs,
+		.sensitivity_level = slevel,
+	};
+	struct {
+		struct wmi_cmd_hdr hdr;
+		struct wmi_fst_config_event evt;
+	} __packed reply = {
+		.evt = {.status = WMI_FW_STATUS_FAILURE},
+	};
+
+	ether_addr_copy(cmd.fst_ap_bssid, bssid);
+
+	rc = wmi_call(wil, WMI_FST_CONFIG_CMDID, vif->mid, &cmd,
+		      sizeof(cmd), WMI_FST_CONFIG_EVENTID,
+		      &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc) {
+		wil_err(wil, "WMI_FST_CONFIG_CMDID failed, rc %d\n", rc);
+		return rc;
+	}
+
+	if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "WMI_FST_CONFIG_CMDID failed, status %d\n",
+			reply.evt.status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index fc28f4b..3cb6444 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -99,6 +99,7 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_CHANNEL_4			= 26,
 	WMI_FW_CAPABILITY_IPA				= 27,
 	WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF		= 30,
+	WMI_FW_CAPABILITY_SPLIT_REKEY			= 31,
 	WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT		= 32,
 	WMI_FW_CAPABILITY_MAX,
 };
@@ -194,6 +195,8 @@ enum wmi_command_id {
 	WMI_RCP_ADDBA_RESP_EDMA_CMDID			= 0x83B,
 	WMI_LINK_MAINTAIN_CFG_WRITE_CMDID		= 0x842,
 	WMI_LINK_MAINTAIN_CFG_READ_CMDID		= 0x843,
+	WMI_FST_CONFIG_CMDID				= 0x844,
+	WMI_SET_LINK_MONITOR_CMDID			= 0x845,
 	WMI_SET_SECTORS_CMDID				= 0x849,
 	WMI_MAINTAIN_PAUSE_CMDID			= 0x850,
 	WMI_MAINTAIN_RESUME_CMDID			= 0x851,
@@ -414,6 +417,8 @@ enum wmi_key_usage {
 	WMI_KEY_USE_PAIRWISE	= 0x00,
 	WMI_KEY_USE_RX_GROUP	= 0x01,
 	WMI_KEY_USE_TX_GROUP	= 0x02,
+	WMI_KEY_USE_STORE_PTK	= 0x03,
+	WMI_KEY_USE_APPLY_PTK	= 0x04,
 };
 
 struct wmi_add_cipher_key_cmd {
@@ -1986,6 +1991,7 @@ enum wmi_event_id {
 	WMI_REPORT_STATISTICS_EVENTID			= 0x100B,
 	WMI_FT_AUTH_STATUS_EVENTID			= 0x100C,
 	WMI_FT_REASSOC_STATUS_EVENTID			= 0x100D,
+	WMI_LINK_MONITOR_EVENTID			= 0x100E,
 	WMI_RADAR_GENERAL_CONFIG_EVENTID		= 0x1100,
 	WMI_RADAR_CONFIG_SELECT_EVENTID			= 0x1101,
 	WMI_RADAR_PARAMS_CONFIG_EVENTID			= 0x1102,
@@ -2038,6 +2044,8 @@ enum wmi_event_id {
 	WMI_TX_MGMT_PACKET_EVENTID			= 0x1841,
 	WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID	= 0x1842,
 	WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID		= 0x1843,
+	WMI_FST_CONFIG_EVENTID				= 0x1844,
+	WMI_SET_LINK_MONITOR_EVENTID			= 0x1845,
 	WMI_RF_XPM_READ_RESULT_EVENTID			= 0x1856,
 	WMI_RF_XPM_WRITE_RESULT_EVENTID			= 0x1857,
 	WMI_LED_CFG_DONE_EVENTID			= 0x1858,
@@ -3327,6 +3335,61 @@ struct wmi_link_maintain_cfg_read_cmd {
 	__le32 cid;
 } __packed;
 
+/* switch sensitivity levels for WMI_FST_CONFIG_CMDID command */
+enum wmi_fst_switch_sensitivity_level {
+	WMI_FST_SWITCH_SENSITIVITY_LOW	= 0x00,
+	WMI_FST_SWITCH_SENSITIVITY_MED	= 0x01,
+	WMI_FST_SWITCH_SENSITIVITY_HIGH	= 0x02,
+};
+
+/* WMI_FST_CONFIG_CMDID */
+struct wmi_fst_config_cmd {
+	u8 fst_en;
+	u8 fst_ap_bssid[WMI_MAC_LEN];
+	u8 fst_entry_mcs;
+	u8 fst_exit_mcs;
+	/* wmi_fst_switch_sensitivity_level */
+	u8 sensitivity_level;
+	u8 reserved[2];
+} __packed;
+
+/* WMI_SET_LINK_MONITOR_CMDID */
+struct wmi_set_link_monitor_cmd {
+	u8 rssi_hyst;
+	u8 reserved[12];
+	u8 rssi_thresholds_list_size;
+	s8 rssi_thresholds_list[0];
+} __packed;
+
+/* WMI_FST_CONFIG_EVENTID */
+struct wmi_fst_config_event {
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[3];
+} __packed;
+
+/* wmi_link_monitor_event_type */
+enum wmi_link_monitor_event_type {
+	WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT	= 0x00,
+	WMI_LINK_MONITOR_NOTIF_TX_ERR_EVT		= 0x01,
+	WMI_LINK_MONITOR_NOTIF_THERMAL_EVT		= 0x02,
+};
+
+/* WMI_SET_LINK_MONITOR_EVENTID */
+struct wmi_set_link_monitor_event {
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_MONITOR_EVENTID */
+struct wmi_link_monitor_event {
+	/* link_monitor_event_type */
+	u8 type;
+	s8 rssi_level;
+	u8 reserved[2];
+} __packed;
+
 /* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
 struct wmi_link_maintain_cfg_write_done_event {
 	/* requested connection ID */
diff --git a/drivers/net/wireless/cnss2/genl.c b/drivers/net/wireless/cnss2/genl.c
index 5a7fb1f5..8ba1a9e 100644
--- a/drivers/net/wireless/cnss2/genl.c
+++ b/drivers/net/wireless/cnss2/genl.c
@@ -5,6 +5,7 @@
 
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
 
@@ -161,6 +162,7 @@ int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
 	u32 seg_id = 0;
 	u32 data_len = 0;
 	u8 end = 0;
+	u8 retry;
 
 	cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
 
@@ -171,8 +173,16 @@ int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
 			data_len = remaining;
 			end = 1;
 		}
-		ret = cnss_genl_send_data(type, file_name, total_size,
-					  seg_id, end, data_len, msg_buff);
+
+		for (retry = 0; retry < 2; retry++) {
+			ret = cnss_genl_send_data(type, file_name, total_size,
+						  seg_id, end, data_len,
+						  msg_buff);
+			if (ret >= 0)
+				break;
+			msleep(100);
+		}
+
 		if (ret < 0) {
 			cnss_pr_err("fail to send genl data, ret %d\n", ret);
 			return ret;
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index f5d3b38..1c3fb3a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -2094,6 +2094,10 @@ static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
 static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
 {
 	plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
+	if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+				  "cnss-daemon-support"))
+		plat_priv->ctrl_params.quirks |= BIT(ENABLE_DAEMON_SUPPORT);
+
 	plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
 	plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
@@ -2126,6 +2130,13 @@ static const struct of_device_id cnss_of_match_table[] = {
 };
 MODULE_DEVICE_TABLE(of, cnss_of_match_table);
 
+static inline bool
+cnss_use_nv_mac(struct cnss_plat_data *plat_priv)
+{
+	return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+				     "use-nv-mac");
+}
+
 static int cnss_probe(struct platform_device *plat_dev)
 {
 	int ret = 0;
@@ -2158,6 +2169,7 @@ static int cnss_probe(struct platform_device *plat_dev)
 	plat_priv->plat_dev = plat_dev;
 	plat_priv->device_id = device_id->driver_data;
 	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
+	plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 	INIT_LIST_HEAD(&plat_priv->vreg_list);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index c3d764f..b21fcbb 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -361,6 +361,7 @@ struct cnss_plat_data {
 	u64 dynamic_feature;
 	void *get_info_cb_ctx;
 	int (*get_info_cb)(void *ctx, void *event, int event_len);
+	u8 use_nv_mac;
 };
 
 #ifdef CONFIG_ARCH_QCOM
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 8cd6963..077e660 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -58,7 +58,6 @@
 static DEFINE_SPINLOCK(pci_link_down_lock);
 static DEFINE_SPINLOCK(pci_reg_window_lock);
 static DEFINE_SPINLOCK(time_sync_lock);
-static DEFINE_SPINLOCK(pm_qos_lock);
 
 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
@@ -1224,73 +1223,6 @@ static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
 }
 
-static int cnss_pci_pm_qos_notify(struct notifier_block *nb,
-				  unsigned long curr_val, void *cpus)
-{
-	struct cnss_pci_data *pci_priv =
-		container_of(nb, struct cnss_pci_data, pm_qos_nb);
-	unsigned long flags;
-
-	spin_lock_irqsave(&pm_qos_lock, flags);
-
-	if (!pci_priv->runtime_pm_prevented &&
-	    curr_val != PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE) {
-		cnss_pci_pm_runtime_get_noresume(pci_priv);
-		pci_priv->runtime_pm_prevented = true;
-	} else if (pci_priv->runtime_pm_prevented &&
-		   curr_val == PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE) {
-		cnss_pci_pm_runtime_put_noidle(pci_priv);
-		pci_priv->runtime_pm_prevented = false;
-	}
-
-	spin_unlock_irqrestore(&pm_qos_lock, flags);
-
-	return NOTIFY_DONE;
-}
-
-static int cnss_pci_pm_qos_add_notifier(struct cnss_pci_data *pci_priv)
-{
-	int ret;
-
-	if (pci_priv->device_id == QCA6174_DEVICE_ID)
-		return 0;
-
-	pci_priv->pm_qos_nb.notifier_call = cnss_pci_pm_qos_notify;
-	ret = pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
-				  &pci_priv->pm_qos_nb);
-	if (ret)
-		cnss_pr_err("Failed to add qos notifier, err = %d\n",
-			    ret);
-
-	return ret;
-}
-
-static int cnss_pci_pm_qos_remove_notifier(struct cnss_pci_data *pci_priv)
-{
-	int ret;
-	unsigned long flags;
-
-	if (pci_priv->device_id == QCA6174_DEVICE_ID)
-		return 0;
-
-	ret = pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
-				     &pci_priv->pm_qos_nb);
-	if (ret)
-		cnss_pr_dbg("Failed to remove qos notifier, err = %d\n",
-			    ret);
-
-	spin_lock_irqsave(&pm_qos_lock, flags);
-
-	if (pci_priv->runtime_pm_prevented) {
-		cnss_pci_pm_runtime_put_noidle(pci_priv);
-		pci_priv->runtime_pm_prevented = false;
-	}
-
-	spin_unlock_irqrestore(&pm_qos_lock, flags);
-
-	return ret;
-}
-
 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -1352,7 +1284,6 @@ int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
 	}
 
 	cnss_pci_start_time_sync_update(pci_priv);
-	cnss_pci_pm_qos_add_notifier(pci_priv);
 
 	return 0;
 
@@ -1370,6 +1301,8 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
 
 	plat_priv = pci_priv->plat_priv;
 
+	cnss_pci_stop_time_sync_update(pci_priv);
+
 	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
@@ -1382,9 +1315,6 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
 		return -EINVAL;
 	}
 
-	cnss_pci_pm_qos_remove_notifier(pci_priv);
-	cnss_pci_stop_time_sync_update(pci_priv);
-
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
@@ -1979,10 +1909,6 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
 		goto register_driver;
 
-	cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
-	if (!cal_info)
-		return -ENOMEM;
-
 	cnss_pr_dbg("Start to wait for calibration to complete\n");
 
 	timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev);
@@ -1990,6 +1916,11 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
 					  msecs_to_jiffies(timeout) << 2);
 	if (!ret) {
 		cnss_pr_err("Timeout waiting for calibration to complete\n");
+
+		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
+		if (!cal_info)
+			return -ENOMEM;
+
 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
 		cnss_driver_event_post(plat_priv,
 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 31a91b4..d6b72def 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -72,8 +72,6 @@ struct cnss_pci_data {
 	struct pci_saved_state *saved_state;
 	struct pci_saved_state *default_state;
 	struct msm_pcie_register_event msm_pci_event;
-	struct notifier_block pm_qos_nb;
-	u8 runtime_pm_prevented;
 	atomic_t auto_suspended;
 	atomic_t drv_connected;
 	u8 drv_connected_last;
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index d8b6232..5e9179d 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -28,6 +28,9 @@
 
 #define QMI_WLFW_MAX_RECV_BUF_SIZE	SZ_8K
 
+#define QMI_WLFW_MAC_READY_TIMEOUT_MS	50
+#define QMI_WLFW_MAC_READY_MAX_RETRY	200
+
 static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode)
 {
 	switch (mode) {
@@ -595,6 +598,9 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
 		req->seg_id++;
 	}
 
+	if (bdf_type != CNSS_BDF_DUMMY)
+		release_firmware(fw_entry);
+
 	kfree(req);
 	kfree(resp);
 	return 0;
@@ -687,6 +693,131 @@ int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
 	return ret;
 }
 
+static int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
+					    u8 *mac, u32 mac_len)
+{
+	struct wlfw_mac_addr_req_msg_v01 *req;
+	struct wlfw_mac_addr_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret;
+	u8 is_query;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	/* NULL mac && zero mac_len means querying the status of MAC in FW */
+	if ((mac && mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01) ||
+	    (!mac && mac_len != 0))
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_mac_addr_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Failed to initialize txn for mac req, err: %d\n",
+			    ret);
+		ret = -EIO;
+		goto out;
+	}
+
+	is_query = !mac;
+	if (!is_query) {
+		/* DO NOT print this for mac query, that might be too many */
+		cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n",
+			    mac, plat_priv->driver_state);
+		memcpy(req->mac_addr, mac, mac_len);
+
+		/* 0 - query status of wlfw MAC; 1 - set wlfw MAC */
+		req->mac_addr_valid = 1;
+	}
+
+	ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+			       QMI_WLFW_MAC_ADDR_REQ_V01,
+			       WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_mac_addr_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Failed to send mac req, err: %d\n", ret);
+
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Failed to wait for resp of mac req, err: %d\n",
+			    ret);
+
+		ret = -EIO;
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n",
+			    resp->resp.result);
+
+		ret = -EIO;
+		goto out;
+	}
+
+	if (resp->resp.error != QMI_ERR_NONE_V01) {
+		ret = ((resp->resp.error == QMI_ERR_NETWORK_NOT_READY_V01 &&
+			is_query) ? -EAGAIN : -EIO);
+		if (ret != -EAGAIN)
+			cnss_pr_err("Got error resp for mac req, err: %d\n",
+				    resp->resp.error);
+		goto out;
+	}
+
+	cnss_pr_dbg("WLAN mac req completed\n");
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+static void cnss_wait_for_wlfw_mac_ready(struct cnss_plat_data *plat_priv)
+{
+	int ret, retry = 0;
+
+	if (!plat_priv)
+		return;
+
+	cnss_pr_dbg("Checking wlfw mac, state: 0x%lx\n",
+		    plat_priv->driver_state);
+	do {
+		/* query the current status of WLAN MAC */
+		ret = cnss_wlfw_wlan_mac_req_send_sync(plat_priv, NULL, 0);
+		if (!ret) {
+			cnss_pr_dbg("wlfw mac is ready\n");
+			break;
+		}
+
+		if (ret != -EAGAIN) {
+			cnss_pr_err("failed to query wlfw mac, error: %d\n",
+				    ret);
+			break;
+		}
+
+		if (++retry >= QMI_WLFW_MAC_READY_MAX_RETRY) {
+			cnss_pr_err("Timeout to wait for wlfw mac ready\n");
+			break;
+		}
+
+		msleep(QMI_WLFW_MAC_READY_TIMEOUT_MS);
+	} while (true);
+}
+
 int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
 				  enum cnss_driver_mode mode)
 {
@@ -698,6 +829,9 @@ int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
 	if (!plat_priv)
 		return -ENODEV;
 
+	if (mode == CNSS_MISSION && plat_priv->use_nv_mac)
+		cnss_wait_for_wlfw_mac_ready(plat_priv);
+
 	cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
 		    cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
 
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index e88b292..c7e5f9d8 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -19,7 +19,6 @@
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/msi.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
 #include <linux/msm_pcie.h>
@@ -175,7 +174,6 @@
 #define MAX_RC_NUM (3)
 #define MAX_DEVICE_NUM (20)
 #define PCIE_TLP_RD_SIZE (0x5)
-#define PCIE_MSI_NR_IRQS (256)
 #define PCIE_LOG_PAGES (50)
 #define PCIE_CONF_SPACE_DW (1024)
 #define PCIE_CLEAR (0xdeadbeef)
@@ -184,14 +182,6 @@
 #define MSM_PCIE_MAX_RESET (5)
 #define MSM_PCIE_MAX_PIPE_RESET (1)
 
-#define MSM_PCIE_MSI_PHY (0xa0000000)
-#define PCIE20_MSI_CTRL_ADDR (0x820)
-#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
-#define PCIE20_MSI_CTRL_INTR_EN (0x828)
-#define PCIE20_MSI_CTRL_INTR_MASK (0x82c)
-#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
-#define PCIE20_MSI_CTRL_MAX (8)
-
 /* Each tick is 19.2 MHz */
 #define L1SS_TIMEOUT_US_TO_TICKS(x) (x * 192 / 10)
 #define L1SS_TIMEOUT_US (100000)
@@ -286,7 +276,6 @@ enum msm_pcie_res {
 };
 
 enum msm_pcie_irq {
-	MSM_PCIE_INT_MSI,
 	MSM_PCIE_INT_A,
 	MSM_PCIE_INT_B,
 	MSM_PCIE_INT_C,
@@ -685,8 +674,6 @@ struct msm_pcie_dev_t {
 	struct mutex clk_lock;
 
 	struct irq_domain *irq_domain;
-	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
-	bool use_msi;
 
 	enum msm_pcie_link_status link_status;
 	bool user_suspend;
@@ -738,6 +725,8 @@ struct msm_pcie_dev_t {
 	struct mutex recovery_lock;
 	spinlock_t wakeup_lock;
 	spinlock_t irq_lock;
+	struct mutex aspm_lock;
+	int prevent_l1;
 	ulong linkdown_counter;
 	ulong link_turned_on_counter;
 	ulong link_turned_off_counter;
@@ -996,7 +985,6 @@ static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
 
 /* irqs */
 static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
-	{"int_msi", 0},
 	{"int_a", 0},
 	{"int_b", 0},
 	{"int_c", 0},
@@ -1300,8 +1288,6 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
 		? "enabled" : "disabled");
 	PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
 		dev->cfg_access ? "" : "not");
-	PCIE_DBG_FS(dev, "use_msi is %d\n",
-		dev->use_msi);
 	PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
 		dev->use_pinctrl);
 	PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
@@ -1408,6 +1394,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
 		dev->wake_counter);
 	PCIE_DBG_FS(dev, "link_check_max_count: %u\n",
 		dev->link_check_max_count);
+	PCIE_DBG_FS(dev, "prevent_l1: %d\n",
+		dev->prevent_l1);
 	PCIE_DBG_FS(dev, "target_link_speed: 0x%x\n",
 		dev->target_link_speed);
 	PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
@@ -3558,24 +3546,6 @@ static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
 	}
 }
 
-static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
-{
-	int i;
-
-	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
-
-	/* program MSI controller and enable all interrupts */
-	writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
-	writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
-
-	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
-		writel_relaxed(~0, dev->dm_core +
-			       PCIE20_MSI_CTRL_INTR_EN + (i * 12));
-
-	/* ensure that hardware is configured before proceeding */
-	wmb();
-}
-
 static int msm_pcie_get_clk(struct msm_pcie_dev_t *pcie_dev)
 {
 	int i, cnt, ret;
@@ -4267,22 +4237,10 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
 	writel_relaxed(dev->slv_addr_space_size, dev->parf +
 		PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
 
-	if (dev->use_msi) {
-		PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
-		val = dev->wr_halt_size ? dev->wr_halt_size :
-			readl_relaxed(dev->parf +
-				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
-
-		msm_pcie_write_reg(dev->parf,
-			PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
-			BIT(31) | val);
-
-		PCIE_DBG(dev,
-			"RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
-			dev->rc_idx,
-			readl_relaxed(dev->parf +
-				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
-	}
+	val = dev->wr_halt_size ? dev->wr_halt_size :
+		readl_relaxed(dev->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	msm_pcie_write_reg(dev->parf, PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+				BIT(31) | val);
 
 	/* init PCIe PHY */
 	ret = pcie_phy_init(dev);
@@ -4349,9 +4307,6 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
 		goto link_fail;
 	}
 
-	if (!IS_ENABLED(CONFIG_PCI_MSM_MSI))
-		msm_pcie_config_msi_controller(dev);
-
 	if (dev->enumerated)
 		msm_pcie_config_link_pm(dev, true);
 
@@ -5111,39 +5066,6 @@ static irqreturn_t handle_linkdown_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t handle_msi_irq(int irq, void *data)
-{
-	int i, j;
-	unsigned long val;
-	struct msm_pcie_dev_t *dev = data;
-	void __iomem *ctrl_status;
-
-	PCIE_DUMP(dev, "irq: %d\n", irq);
-
-	/*
-	 * check for set bits, clear it by setting that bit
-	 * and trigger corresponding irq
-	 */
-	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
-		ctrl_status = dev->dm_core +
-				PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
-
-		val = readl_relaxed(ctrl_status);
-		while (val) {
-			j = find_first_bit(&val, 32);
-			writel_relaxed(BIT(j), ctrl_status);
-			/* ensure that interrupt is cleared (acked) */
-			wmb();
-			generic_handle_irq(
-			   irq_find_mapping(dev->irq_domain, (j + (32*i)))
-			   );
-			val = readl_relaxed(ctrl_status);
-		}
-	}
-
-	return IRQ_HANDLED;
-}
-
 static irqreturn_t handle_global_irq(int irq, void *data)
 {
 	int i;
@@ -5207,192 +5129,9 @@ static irqreturn_t handle_global_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static void msm_pcie_destroy_irq(struct msi_desc *entry, unsigned int irq)
-{
-	int pos;
-	struct msm_pcie_dev_t *dev;
-	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
-
-	if (!pdev) {
-		pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
-		return;
-	}
-
-	dev = PCIE_BUS_PRIV_DATA(pdev->bus);
-	if (!dev) {
-		pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
-		return;
-	}
-
-	PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
-	pos = irq - irq_find_mapping(dev->irq_domain, 0);
-
-	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
-
-	PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
-		pos, *dev->msi_irq_in_use);
-	clear_bit(pos, dev->msi_irq_in_use);
-	PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
-		pos, *dev->msi_irq_in_use);
-}
-
-/* hookup to linux pci msi framework */
-void arch_teardown_msi_irq(unsigned int irq)
-{
-	struct msi_desc *entry = irq_get_msi_desc(irq);
-
-	PCIE_GEN_DBG("irq %d deallocated\n", irq);
-
-	if (entry)
-		msm_pcie_destroy_irq(entry, irq);
-}
-
-void arch_teardown_msi_irqs(struct pci_dev *dev)
-{
-	struct msi_desc *entry;
-	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
-
-	PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
-		pcie_dev->rc_idx, dev->vendor, dev->device);
-
-	pcie_dev->use_msi = false;
-
-	list_for_each_entry(entry, &dev->dev.msi_list, list) {
-		int i, nvec;
-
-		if (entry->irq == 0)
-			continue;
-		nvec = 1 << entry->msi_attrib.multiple;
-		for (i = 0; i < nvec; i++)
-			msm_pcie_destroy_irq(entry, entry->irq + i);
-	}
-}
-
-static void msm_pcie_msi_nop(struct irq_data *d)
-{
-}
-
-static struct irq_chip pcie_msi_chip = {
-	.name = "msm-pcie-msi",
-	.irq_ack = msm_pcie_msi_nop,
-	.irq_enable = unmask_msi_irq,
-	.irq_disable = mask_msi_irq,
-	.irq_mask = mask_msi_irq,
-	.irq_unmask = unmask_msi_irq,
-};
-
-static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
-{
-	int irq, pos;
-
-	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
-
-again:
-	pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
-
-	if (pos >= PCIE_MSI_NR_IRQS)
-		return -ENOSPC;
-
-	PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
-
-	if (test_and_set_bit(pos, dev->msi_irq_in_use))
-		goto again;
-	else
-		PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
-
-	irq = irq_create_mapping(dev->irq_domain, pos);
-	if (!irq)
-		return -EINVAL;
-
-	return irq;
-}
-
-static int arch_setup_msi_irq_default(struct pci_dev *pdev,
-		struct msi_desc *desc, int nvec)
-{
-	int irq;
-	struct msi_msg msg;
-	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
-
-	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
-
-	irq = msm_pcie_create_irq(dev);
-
-	PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
-
-	if (irq < 0)
-		return irq;
-
-	PCIE_DBG(dev, "irq %d allocated\n", irq);
-
-	irq_set_chip_data(irq, pdev);
-	irq_set_msi_desc(irq, desc);
-
-	/* write msi vector and data */
-	msg.address_hi = 0;
-	msg.address_lo = MSM_PCIE_MSI_PHY;
-	msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
-	write_msi_msg(irq, &msg);
-
-	return 0;
-}
-
-int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
-{
-	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
-
-	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
-
-	return arch_setup_msi_irq_default(pdev, desc, 1);
-}
-
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
-	struct msi_desc *entry;
-	int ret;
-	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
-
-	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
-
-	if (type != PCI_CAP_ID_MSI || nvec > 32)
-		return -ENOSPC;
-
-	PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
-
-	list_for_each_entry(entry, &dev->dev.msi_list, list) {
-		entry->msi_attrib.multiple =
-			__ilog2_u32(__roundup_pow_of_two(nvec));
-
-		ret = arch_setup_msi_irq_default(dev, entry, nvec);
-
-		PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
-
-		if (ret < 0)
-			return ret;
-		if (ret > 0)
-			return -ENOSPC;
-	}
-
-	pcie_dev->use_msi = true;
-
-	return 0;
-}
-
-static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
-	   irq_hw_number_t hwirq)
-{
-	irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
-	return 0;
-}
-
-static const struct irq_domain_ops msm_pcie_msi_ops = {
-	.map = msm_pcie_msi_map,
-};
-
 static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
 {
 	int rc;
-	int msi_start =  0;
 	struct device *pdev = &dev->pdev->dev;
 
 	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
@@ -5402,22 +5141,6 @@ static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
 	else
 		wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
 
-	/* register handler for physical MSI interrupt line */
-	if (dev->irq[MSM_PCIE_INT_MSI].num) {
-		rc = devm_request_irq(pdev,
-			dev->irq[MSM_PCIE_INT_MSI].num,
-			handle_msi_irq,
-			IRQF_TRIGGER_RISING,
-			dev->irq[MSM_PCIE_INT_MSI].name,
-			dev);
-		if (rc) {
-			PCIE_ERR(dev,
-				"PCIe: RC%d: Unable to request MSI interrupt\n",
-				dev->rc_idx);
-			return rc;
-		}
-	}
-
 	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
 		rc = devm_request_irq(pdev,
 				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
@@ -5457,25 +5180,6 @@ static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
 		}
 	}
 
-	/* Create a virtual domain of interrupts */
-	if (!IS_ENABLED(CONFIG_PCI_MSM_MSI)) {
-		dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
-			PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
-
-		if (!dev->irq_domain) {
-			PCIE_ERR(dev,
-				"PCIe: RC%d: Unable to initialize irq domain\n",
-				dev->rc_idx);
-
-			if (dev->wake_n)
-				disable_irq(dev->wake_n);
-
-			return PTR_ERR(dev->irq_domain);
-		}
-
-		msi_start = irq_create_mapping(dev->irq_domain, 0);
-	}
-
 	return 0;
 }
 
@@ -6351,20 +6055,6 @@ static int msm_pcie_link_retrain(struct msm_pcie_dev_t *pcie_dev,
 	u32 cnt_max = 1000; /* 100ms timeout */
 	u32 link_status_lbms_mask = PCI_EXP_LNKSTA_LBMS << PCI_EXP_LNKCTL;
 
-	cnt = 0;
-	/* confirm link is in L0 */
-	while (((readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) &
-		MSM_PCIE_LTSSM_MASK)) != MSM_PCIE_LTSSM_L0) {
-		if (unlikely(cnt++ >= cnt_max)) {
-			PCIE_ERR(pcie_dev,
-				"PCIe: RC%d: failed to transition to L0\n",
-				pcie_dev->rc_idx);
-			return -EIO;
-		}
-
-		usleep_range(100, 105);
-	}
-
 	/* link retrain */
 	msm_pcie_config_clear_set_dword(pci_dev,
 					pci_dev->pcie_cap + PCI_EXP_LNKCTL,
@@ -6413,6 +6103,99 @@ static int msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
 	return 0;
 }
 
+void msm_pcie_allow_l1(struct pci_dev *pci_dev)
+{
+	struct pci_dev *root_pci_dev;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	root_pci_dev = pci_find_pcie_root_port(pci_dev);
+	if (!root_pci_dev)
+		return;
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
+
+	mutex_lock(&pcie_dev->aspm_lock);
+	if (unlikely(--pcie_dev->prevent_l1 < 0))
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: %02x:%02x.%01x: unbalanced prevent_l1: %d < 0\n",
+			pcie_dev->rc_idx, pci_dev->bus->number,
+			PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
+			pcie_dev->prevent_l1);
+
+	if (pcie_dev->prevent_l1) {
+		mutex_unlock(&pcie_dev->aspm_lock);
+		return;
+	}
+
+	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+	/* enable L1 */
+	msm_pcie_write_mask(pcie_dev->dm_core +
+				(root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
+				0, PCI_EXP_LNKCTL_ASPM_L1);
+
+	PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x: exit\n",
+		pcie_dev->rc_idx, pci_dev->bus->number,
+		PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+	mutex_unlock(&pcie_dev->aspm_lock);
+}
+EXPORT_SYMBOL(msm_pcie_allow_l1);
+
+int msm_pcie_prevent_l1(struct pci_dev *pci_dev)
+{
+	struct pci_dev *root_pci_dev;
+	struct msm_pcie_dev_t *pcie_dev;
+	u32 cnt = 0;
+	u32 cnt_max = 1000; /* 100ms timeout */
+	int ret = 0;
+
+	root_pci_dev = pci_find_pcie_root_port(pci_dev);
+	if (!root_pci_dev)
+		return -ENODEV;
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
+
+	/* disable L1 */
+	mutex_lock(&pcie_dev->aspm_lock);
+	if (pcie_dev->prevent_l1++) {
+		mutex_unlock(&pcie_dev->aspm_lock);
+		return 0;
+	}
+
+	msm_pcie_write_mask(pcie_dev->dm_core +
+				(root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
+				PCI_EXP_LNKCTL_ASPM_L1, 0);
+	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(5));
+
+	/* confirm link is in L0 */
+	while (((readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) &
+		MSM_PCIE_LTSSM_MASK)) != MSM_PCIE_LTSSM_L0) {
+		if (unlikely(cnt++ >= cnt_max)) {
+			PCIE_ERR(pcie_dev,
+				"PCIe: RC%d: %02x:%02x.%01x: failed to transition to L0\n",
+				pcie_dev->rc_idx, pci_dev->bus->number,
+				PCI_SLOT(pci_dev->devfn),
+				PCI_FUNC(pci_dev->devfn));
+			ret = -EIO;
+			goto err;
+		}
+
+		usleep_range(100, 105);
+	}
+
+	PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x: exit\n",
+		pcie_dev->rc_idx, pci_dev->bus->number,
+		PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+	mutex_unlock(&pcie_dev->aspm_lock);
+
+	return 0;
+err:
+	mutex_unlock(&pcie_dev->aspm_lock);
+	msm_pcie_allow_l1(pci_dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_prevent_l1);
+
 int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 				u16 target_link_width)
 {
@@ -6462,9 +6245,10 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 						PCI_EXP_LNKSTA_CLS,
 						target_link_speed);
 
-	/* disable link L1. Need to be in L0 for gen switch */
-	msm_pcie_config_l1(pcie_dev, root_pci_dev, false);
-	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL,  0, BIT(5));
+	/* need to be in L0 for gen switch */
+	ret = msm_pcie_prevent_l1(root_pci_dev);
+	if (ret)
+		return ret;
 
 	if (target_link_speed > current_link_speed)
 		msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
@@ -6487,9 +6271,7 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 	if (target_link_speed < current_link_speed)
 		msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
 out:
-	/* re-enable link L1 */
-	msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
-	msm_pcie_config_l1(pcie_dev, root_pci_dev, true);
+	msm_pcie_allow_l1(root_pci_dev);
 
 	return ret;
 }
@@ -6800,6 +6582,7 @@ static int __init pcie_init(void)
 		mutex_init(&msm_pcie_dev[i].setup_lock);
 		mutex_init(&msm_pcie_dev[i].clk_lock);
 		mutex_init(&msm_pcie_dev[i].recovery_lock);
+		mutex_init(&msm_pcie_dev[i].aspm_lock);
 		spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
 		spin_lock_init(&msm_pcie_dev[i].irq_lock);
 		msm_pcie_dev[i].drv_ready = false;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index defae6c..5fe35d7 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -77,6 +77,15 @@
 	  Adds the L2 cache PMU into the perf events subsystem for
 	  monitoring L2 cache events.
 
+config QCOM_L2_COUNTERS
+	bool "Qualcomm Technologies L2-cache counters (PMU)"
+	depends on ARCH_QCOM && ARM64
+	  help
+	  Provides support for the L2 cache counters
+	  in Qualcomm Technologies processors.
+	  Adds the L2 cache counters support into the perf events subsystem for
+	  monitoring L2 cache events.
+
 config QCOM_L3_PMU
 	bool "Qualcomm Technologies L3-cache PMU"
 	depends on ARCH_QCOM && ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 8b5e88f..ca14381 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
 obj-$(CONFIG_HISI_PMU) += hisilicon/
 obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
+obj-$(CONFIG_QCOM_L2_COUNTERS)	+= qcom_l2_counters.o
 obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
 obj-$(CONFIG_QCOM_LLCC_PMU) += qcom_llcc_pmu.o
 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/qcom_l2_counters.c b/drivers/perf/qcom_l2_counters.c
new file mode 100644
index 0000000..2430870
--- /dev/null
+++ b/drivers/perf/qcom_l2_counters.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/percpu.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/local64.h>
+
+#define L2_COUNTERS_BUG		 "[l2 counters error]: "
+/*
+ * Events id
+ * 0xXXX here,
+ *
+ * 1 bit(lsb) for group (group is either txn/tenure counter).
+ * 4 bits for serial number for counter starting from 0 to 8.
+ * 5 bits for bit position of counter enable bit in a register.
+ */
+#define L2_EVENT_CYCLE_CNTR			0x000
+#define L2_EVENT_DDR_WR_CNTR			0x022
+#define L2_EVENT_DDR_RD_CNTR			0x044
+#define L2_EVENT_SNP_RD_CNTR			0x066
+#define L2_EVENT_ACP_WR_CNTR			0x088
+
+#define L2_EVENT_TENURE_CNTR			0x26b
+#define L2_EVENT_LOW_RANGE_OCCUR_CNTR		0x04d
+#define L2_EVENT_MID_RANGE_OCCUR_CNTR		0x0cf
+#define L2_EVENT_HIGH_RANGE_OCCUR_CNTR		0x151
+
+#define EVENT_GROUP_MASK			0x1
+#define REGBIT_MASK				0x3e0
+#define ID_MASK					0x1e
+
+#define TRANSACTION_CNTRS_GROUP_ID		0x0
+#define TENURE_CNTRS_GROUP_ID			0x1
+#define ID_SHIFT				0x1
+#define REGBIT_SHIFT				0x5
+
+#define TXN_CONFIG_REG_OFFSET			0x54c
+#define OVERFLOW_REG_OFFSET			0x560
+#define CNTR_SET_VAL_REG_OFFSET			0x55c
+#define TXN_CYCLE_CNTR_DATA			0x634
+#define TXN_DDR_WR_CNTR_DATA			0x638
+#define TXN_DDR_RD_CNTR_DATA			0x63c
+#define TXN_SNP_RD_CNTR_DATA			0x640
+#define TXN_ACP_WR_CNTR_DATA			0x644
+
+#define TENURE_CONFIG_REG_OFFSET		0x52c
+#define LOW_RANGE_OCCURRENCE_CNTR_DATA		0x53c
+#define MID_RANGE_OCCURRENCE_CNTR_DATA		0x540
+#define HIGH_RANGE_OCCURRENCE_CNTR_DATA		0x544
+#define LPM_TENURE_CNTR_DATA			0x548
+#define LOW_RANGE_TENURE_VAL			0x534
+#define MID_RANGE_TENURE_VAL			0x538
+
+#define TENURE_ENABLE_ALL			0x880444
+#define TENURE_CNTR_ENABLE			19
+#define LOW_RANGE_OCCURRENCE_CNTR_ENABLE	2
+#define MID_RANGE_OCCURRENCE_CNTR_ENABLE	6
+#define HIGH_RANGE_OCCURRENCE_CNTR_ENABLE	10
+#define OCCURRENCE_CNTR_ENABLE_MASK		(BIT(2) | BIT(6) | BIT(10))
+
+#define LPM_MODE_TENURE_CNTR_RESET		12
+#define LOW_RANGE_OCCURRENCE_CNTR_RESET		 0
+#define MID_RANGE_OCCURRENCE_CNTR_RESET		 4
+#define HIGH_RANGE_OCCURRENCE_CNTR_RESET	 8
+
+/* Txn reset/set/overflow bit offsets */
+#define TXN_RESET_BIT				 5
+#define TXN_RESET_ALL_CNTR			0x000003e0
+#define TXN_RESET_ALL_CNTR_OVSR_BIT		0x007c0000
+#define TENURE_RESET_ALL_CNTR			0x00001111
+#define TENURE_RESET_OVERFLOW_ALL_CNTR		0x00002888
+
+#define TXN_SET_BIT				13
+#define TXN_OVERFLOW_RESET_BIT			18
+
+#define LOW_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET	 3
+#define MID_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET	 7
+#define HIGH_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET	11
+#define LPM_MODE_TENURE_CNTR_OVERFLOW_RESET		13
+
+enum counter_index {
+	CLUSTER_CYCLE_COUNTER,
+	DDR_WR_CNTR,
+	DDR_RD_CNTR,
+	SNP_RD_CNTR,
+	ACP_WR_CNTR,
+	LPM_TENURE_CNTR,
+	LOW_OCCURRENCE_CNTR,
+	MID_OCCURRENCE_CNTR,
+	HIGH_OCCURRENCE_CNTR,
+	MAX_L2_CNTRS
+};
+
+/*
+ * Each cluster has its own PMU(counters) and associated with one or more CPUs.
+ * This structure represents one of the hardware PMUs.
+ */
+struct cluster_pmu {
+	struct device dev;
+	struct list_head next;
+	struct perf_event *events[MAX_L2_CNTRS];
+	void __iomem *reg_addr;
+	struct l2cache_pmu *l2cache_pmu;
+	DECLARE_BITMAP(used_counters, MAX_L2_CNTRS);
+	int irq;
+	int cluster_id;
+	/* The CPU that is used for collecting events on this cluster */
+	int on_cpu;
+	/* All the CPUs associated with this cluster */
+	cpumask_t cluster_cpus;
+	spinlock_t pmu_lock;
+};
+
+/*
+ * Aggregate PMU. Implements the core pmu functions and manages
+ * the hardware PMUs.
+ */
+struct l2cache_pmu {
+	struct hlist_node node;
+	u32 num_pmus;
+	struct pmu pmu;
+	int num_counters;
+	cpumask_t cpumask;
+	struct platform_device *pdev;
+	struct cluster_pmu * __percpu *pmu_cluster;
+	struct list_head clusters;
+};
+
+
+static unsigned int which_cluster_tenure = 1;
+static u32 l2_counter_present_mask;
+
+#define to_l2cache_pmu(p)	(container_of(p, struct l2cache_pmu, pmu))
+#define to_cluster_device(d)	container_of(d, struct cluster_pmu, dev)
+
+
+static inline struct cluster_pmu *get_cluster_pmu(
+	struct l2cache_pmu *l2cache_pmu, int cpu)
+{
+	return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
+}
+
+static inline u32 cluster_tenure_counter_read(struct cluster_pmu *cluster,
+		u32 idx)
+{
+	u32 val = 0;
+
+	switch (idx) {
+	case LOW_RANGE_OCCURRENCE_CNTR_ENABLE:
+		val = readl_relaxed(cluster->reg_addr +
+					LOW_RANGE_OCCURRENCE_CNTR_DATA);
+		break;
+
+	case MID_RANGE_OCCURRENCE_CNTR_ENABLE:
+		val = readl_relaxed(cluster->reg_addr +
+					MID_RANGE_OCCURRENCE_CNTR_DATA);
+		break;
+
+	case HIGH_RANGE_OCCURRENCE_CNTR_ENABLE:
+		val = readl_relaxed(cluster->reg_addr +
+					HIGH_RANGE_OCCURRENCE_CNTR_DATA);
+		break;
+
+	default:
+		pr_crit(L2_COUNTERS_BUG
+			"Invalid index, during %s\n", __func__);
+	}
+
+	return val;
+}
+
+static inline u32 cluster_pmu_counter_get_value(struct cluster_pmu *cluster,
+		u32 idx, u32 event_grp)
+{
+	if (event_grp == TENURE_CNTRS_GROUP_ID)
+		return cluster_tenure_counter_read(cluster, idx);
+
+	return readl_relaxed(cluster->reg_addr +
+					TXN_CYCLE_CNTR_DATA + (4 * idx));
+}
+
+static inline u32 cluster_txn_config_read(struct cluster_pmu *cluster)
+{
+	return readl_relaxed(cluster->reg_addr + TXN_CONFIG_REG_OFFSET);
+}
+
+static inline void cluster_txn_config_write(struct cluster_pmu *cluster,
+		u32 val)
+{
+	writel_relaxed(val, cluster->reg_addr + TXN_CONFIG_REG_OFFSET);
+}
+
+static inline u32 cluster_tenure_config_read(struct cluster_pmu *cluster)
+{
+	return readl_relaxed(cluster->reg_addr + TENURE_CONFIG_REG_OFFSET);
+}
+
+static inline void cluster_tenure_config_write(struct cluster_pmu *cluster,
+		u32 val)
+{
+	writel_relaxed(val, cluster->reg_addr + TENURE_CONFIG_REG_OFFSET);
+}
+
+static void cluster_txn_cntr_reset(struct cluster_pmu *cluster, u32 idx)
+{
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				| BIT(idx));
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				& ~BIT(idx));
+}
+
+static void cluster_pmu_reset(struct cluster_pmu *cluster)
+{
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				| TXN_RESET_ALL_CNTR);
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				& ~TXN_RESET_ALL_CNTR);
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				| TXN_RESET_ALL_CNTR_OVSR_BIT);
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				& ~TXN_RESET_ALL_CNTR_OVSR_BIT);
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				| TENURE_RESET_ALL_CNTR);
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				& ~TENURE_RESET_ALL_CNTR);
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				| TENURE_RESET_OVERFLOW_ALL_CNTR);
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				& ~TENURE_RESET_OVERFLOW_ALL_CNTR);
+}
+
+static void cluster_tenure_counter_reset(struct cluster_pmu *cluster, u32 idx)
+{
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				| BIT(idx));
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+				& ~BIT(idx));
+}
+
+static inline void cluster_tenure_counter_enable(struct cluster_pmu *cluster,
+						u32 idx)
+{
+	u32 val;
+
+	val = cluster_tenure_config_read(cluster);
+	/* Already enabled */
+	if (val & BIT(idx))
+		return;
+
+	switch (idx) {
+	case LOW_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+					LOW_RANGE_OCCURRENCE_CNTR_RESET);
+		break;
+
+	case MID_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+					MID_RANGE_OCCURRENCE_CNTR_RESET);
+		break;
+
+	case HIGH_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+					HIGH_RANGE_OCCURRENCE_CNTR_RESET);
+		break;
+
+	default:
+		pr_crit(L2_COUNTERS_BUG
+			"Invalid index, during %s\n", __func__);
+		return;
+	}
+
+	if (!(val & BIT(TENURE_CNTR_ENABLE))) {
+		cluster_tenure_counter_reset(cluster,
+					LPM_MODE_TENURE_CNTR_RESET);
+		/*
+		 * Enable tenure counter as a part of enablement of any
+		 * occurrences counter, as occurrence counters would not
+		 * increment unless tenure counter is enabled.
+		 */
+		cluster_tenure_config_write(cluster,
+			cluster_tenure_config_read(cluster)
+			| BIT(TENURE_CNTR_ENABLE));
+	}
+
+	cluster_tenure_config_write(cluster,
+			cluster_tenure_config_read(cluster) | BIT(idx));
+}
+
+static inline void cluster_txn_counter_enable(struct cluster_pmu *cluster,
+						u32 idx)
+{
+	u32 val;
+
+	val = cluster_txn_config_read(cluster);
+	if (val & BIT(idx))
+		return;
+
+	cluster_txn_cntr_reset(cluster, TXN_RESET_BIT + idx);
+	cluster_txn_config_write(cluster, cluster_txn_config_read(cluster)
+				| BIT(idx));
+}
+
+static inline void cluster_tenure_counter_disable(struct cluster_pmu *cluster,
+						u32 idx)
+{
+	u32 val;
+
+	cluster_tenure_config_write(cluster, cluster_tenure_config_read(cluster)
+					& ~BIT(idx));
+	val = cluster_tenure_config_read(cluster);
+	if (!(val & OCCURRENCE_CNTR_ENABLE_MASK))
+		cluster_tenure_config_write(cluster, val &
+					~BIT(TENURE_CNTR_ENABLE));
+}
+
+static inline void cluster_txn_counter_disable(struct cluster_pmu *cluster,
+						u32 idx)
+{
+	cluster_txn_config_write(cluster,
+		cluster_txn_config_read(cluster) & ~BIT(idx));
+}
+
+static inline u32 cluster_reg_read(struct cluster_pmu *cluster, u32 offset)
+{
+	return readl_relaxed(cluster->reg_addr + offset);
+}
+
+static inline void cluster_tenure_cntr_reset_ovsr(struct cluster_pmu *cluster,
+		u32 event_idx)
+{
+	switch (event_idx) {
+	case LPM_TENURE_CNTR:
+		cluster_tenure_counter_reset(cluster,
+				LPM_MODE_TENURE_CNTR_OVERFLOW_RESET);
+		break;
+
+	case LOW_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+				LOW_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET);
+		break;
+
+	case MID_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+				MID_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET);
+		break;
+
+	case HIGH_RANGE_OCCURRENCE_CNTR_ENABLE:
+		cluster_tenure_counter_reset(cluster,
+				HIGH_RANGE_OCCURRENCE_CNTR_OVERFLOW_RESET);
+		break;
+
+	default:
+		pr_crit(L2_COUNTERS_BUG
+			"Invalid index, during %s\n", __func__);
+	}
+}
+
+static inline void cluster_pmu_reset_ovsr(struct cluster_pmu *cluster,
+		u32 config_base)
+{
+	u32 event_idx;
+	u32 event_grp;
+
+	event_idx = (config_base & REGBIT_MASK) >> REGBIT_SHIFT;
+	event_grp = config_base & EVENT_GROUP_MASK;
+
+	if (event_grp == TENURE_CNTRS_GROUP_ID)
+		cluster_tenure_cntr_reset_ovsr(cluster, event_idx);
+	else
+		cluster_txn_cntr_reset(cluster,
+			TXN_OVERFLOW_RESET_BIT + event_idx);
+}
+
+static inline bool cluster_pmu_has_overflowed(u32 ovsr)
+{
+	return !!(ovsr & l2_counter_present_mask);
+}
+
+static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
+{
+	return !!(ovsr & BIT(idx));
+}
+
+static void l2_cache_event_update(struct perf_event *event, u32 ovsr)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev, now;
+	u32 event_idx = hwc->config_base;
+	u32 event_grp;
+	struct cluster_pmu *cluster;
+
+	prev = local64_read(&hwc->prev_count);
+	if (ovsr) {
+		now = 0xffffffff;
+		goto out;
+	}
+
+	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
+	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
+	event_grp = hwc->config_base & EVENT_GROUP_MASK;
+	do {
+		prev = local64_read(&hwc->prev_count);
+		now = cluster_pmu_counter_get_value(cluster, event_idx,
+				event_grp);
+	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+	/* All are 32-bit counters */
+out:
+	delta = now - prev;
+	delta &= 0xffffffff;
+
+	local64_add(delta, &event->count);
+	if (ovsr)
+		local64_set(&hwc->prev_count, 0);
+}
+
+static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
+				   struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int num_ctrs = cluster->l2cache_pmu->num_counters;
+
+	idx = (hwc->config_base & ID_MASK) >> ID_SHIFT;
+	if (idx >= num_ctrs)
+		return -EINVAL;
+
+	if (test_bit(idx, cluster->used_counters))
+		return -EAGAIN;
+
+	set_bit(idx, cluster->used_counters);
+	return idx;
+}
+
+static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
+				      struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	clear_bit(idx, cluster->used_counters);
+}
+
+static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
+{
+	struct cluster_pmu *cluster = data;
+	int num_counters = cluster->l2cache_pmu->num_counters;
+	u32 ovsr;
+	int idx;
+	u32 config_base;
+
+	ovsr = cluster_reg_read(cluster, OVERFLOW_REG_OFFSET);
+	if (!cluster_pmu_has_overflowed(ovsr))
+		return IRQ_NONE;
+
+	/*
+	 * LPM tenure counter overflow would be a special case, although
+	 * it would never happen, but for a ideal case we would reset
+	 * it's overflow bit. I hope hardware takes care the overflow
+	 * of tenure counter and its classifying category but even if
+	 * it does not, we would get a extra count gets added
+	 * erroneously to one of low/mid/high occurrence counter, but
+	 * that is very rare and we can ignore it too.
+	 */
+	if (ovsr & BIT(LPM_TENURE_CNTR))
+		cluster_tenure_cntr_reset_ovsr(cluster, LPM_TENURE_CNTR);
+
+	spin_lock(&cluster->pmu_lock);
+	for_each_set_bit(idx, cluster->used_counters, num_counters) {
+		struct perf_event *event = cluster->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (WARN_ON_ONCE(!event))
+			continue;
+
+		if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
+			continue;
+
+		l2_cache_event_update(event, 1);
+		hwc = &event->hw;
+		config_base = hwc->config_base;
+		cluster_pmu_reset_ovsr(cluster, config_base);
+	}
+	spin_unlock(&cluster->pmu_lock);
+	return IRQ_HANDLED;
+}
+
+static int l2_cache_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct cluster_pmu *cluster;
+	struct l2cache_pmu *l2cache_pmu;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	l2cache_pmu = to_l2cache_pmu(event->pmu);
+
+	if (hwc->sample_period) {
+		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
+				    "Sampling not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (event->cpu < 0) {
+		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
+				    "Per-task mode not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* We can not filter accurately so we just don't allow it. */
+	if (event->attr.exclude_user || event->attr.exclude_kernel ||
+	    event->attr.exclude_hv || event->attr.exclude_idle) {
+		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
+				    "Can't exclude execution levels\n");
+		return -EOPNOTSUPP;
+	}
+
+	cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
+	if (!cluster) {
+		/* CPU has not been initialised */
+		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
+			"CPU%d not associated with L2 cluster\n", event->cpu);
+		return -EINVAL;
+	}
+
+	/* Ensure all events in a group are on the same cpu */
+	if ((event->group_leader != event) &&
+	    (cluster->on_cpu != event->group_leader->cpu)) {
+		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
+			 "Can't create group on CPUs %d and %d",
+			 event->cpu, event->group_leader->cpu);
+		return -EINVAL;
+	}
+
+	hwc->idx = -1;
+	hwc->config_base = event->attr.config;
+	event->readable_on_cpus = CPU_MASK_ALL;
+
+	/*
+	 * We are overiding event->cpu, as it is possible to enable events,
+	 * even if the event->cpu is offline.
+	 */
+	event->cpu = cluster->on_cpu;
+	return 0;
+}
+
+static void l2_cache_event_start(struct perf_event *event, int flags)
+{
+	struct cluster_pmu *cluster;
+	struct hw_perf_event *hwc = &event->hw;
+	int event_idx;
+
+	hwc->state = 0;
+	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
+	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
+	if ((hwc->config_base & EVENT_GROUP_MASK) == TENURE_CNTRS_GROUP_ID) {
+		cluster_tenure_counter_enable(cluster, event_idx);
+		return;
+	}
+
+	cluster_txn_counter_enable(cluster, event_idx);
+}
+
+static void l2_cache_event_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct cluster_pmu *cluster;
+	int event_idx;
+	u32 ovsr;
+
+	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
+	if (hwc->state & PERF_HES_STOPPED)
+		return;
+
+	event_idx = (hwc->config_base & REGBIT_MASK) >> REGBIT_SHIFT;
+	if ((hwc->config_base & EVENT_GROUP_MASK) == TENURE_CNTRS_GROUP_ID)
+		cluster_tenure_counter_disable(cluster, event_idx);
+	else
+		cluster_txn_counter_disable(cluster, event_idx);
+
+	ovsr = cluster_reg_read(cluster, OVERFLOW_REG_OFFSET);
+	if (cluster_pmu_counter_has_overflowed(ovsr, event_idx)) {
+		l2_cache_event_update(event, 1);
+		cluster_pmu_reset_ovsr(cluster, hwc->config_base);
+	}
+
+	if (flags & PERF_EF_UPDATE)
+		l2_cache_event_update(event, 0);
+
+	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int l2_cache_event_add(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	struct cluster_pmu *cluster;
+
+	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
+	idx = l2_cache_get_event_idx(cluster, event);
+	if (idx < 0)
+		return idx;
+
+	hwc->idx = idx;
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	cluster->events[idx] = event;
+	local64_set(&hwc->prev_count, 0);
+
+	if (flags & PERF_EF_START)
+		l2_cache_event_start(event, flags);
+
+	/* Propagate changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+	return 0;
+}
+
+static void l2_cache_event_del(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct cluster_pmu *cluster;
+	int idx = hwc->idx;
+	unsigned long intr_flag;
+
+	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
+
+	/*
+	 * We could race here with overflow interrupt of this event.
+	 * So, let's be safe here.
+	 */
+	spin_lock_irqsave(&cluster->pmu_lock, intr_flag);
+	l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
+	l2_cache_clear_event_idx(cluster, event);
+	cluster->events[idx] = NULL;
+	hwc->idx = -1;
+	spin_unlock_irqrestore(&cluster->pmu_lock, intr_flag);
+
+	perf_event_update_userpage(event);
+}
+
+static void l2_cache_event_read(struct perf_event *event)
+{
+	l2_cache_event_update(event, 0);
+}
+
+static ssize_t low_tenure_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
+	struct cluster_pmu *cluster = NULL;
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0 || val > INT_MAX)
+		return -EINVAL;
+
+	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
+		if (cluster->cluster_id == which_cluster_tenure)
+			writel_relaxed(val,
+				cluster->reg_addr + LOW_RANGE_TENURE_VAL);
+	}
+
+	return count;
+}
+
+static ssize_t low_tenure_threshold_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
+	struct cluster_pmu *cluster = NULL;
+	u32 val = 0;
+
+	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
+		if (cluster->cluster_id == which_cluster_tenure)
+			val = cluster_reg_read(cluster, LOW_RANGE_TENURE_VAL);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+static ssize_t mid_tenure_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
+	struct cluster_pmu *cluster = NULL;
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0 || val > INT_MAX)
+		return -EINVAL;
+
+	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
+		if (cluster->cluster_id == which_cluster_tenure)
+			writel_relaxed(val,
+				cluster->reg_addr + MID_RANGE_TENURE_VAL);
+	}
+
+	return count;
+}
+
+static ssize_t mid_tenure_threshold_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
+	struct cluster_pmu *cluster = NULL;
+	u32 val = 0;
+
+	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
+		if (cluster->cluster_id == which_cluster_tenure)
+			val = cluster_reg_read(cluster, MID_RANGE_TENURE_VAL);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+static ssize_t which_cluster_tenure_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", which_cluster_tenure);
+}
+
+static ssize_t which_cluster_tenure_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+
+	ret = kstrtouint(buf, 0, &which_cluster_tenure);
+	if (ret < 0)
+		return ret;
+
+	if (which_cluster_tenure > 1)
+		return -EINVAL;
+
+	return count;
+}
+
+static struct device_attribute mid_tenure_threshold_attr =
+	__ATTR(mid_tenure_threshold, 0644,
+			mid_tenure_threshold_show,
+			mid_tenure_threshold_store);
+
+static struct attribute *mid_tenure_threshold_attrs[] = {
+	&mid_tenure_threshold_attr.attr,
+	NULL,
+};
+
+static struct attribute_group mid_tenure_threshold_group = {
+	.attrs = mid_tenure_threshold_attrs,
+};
+
+static struct device_attribute low_tenure_threshold_attr =
+	__ATTR(low_tenure_threshold, 0644,
+			low_tenure_threshold_show,
+			low_tenure_threshold_store);
+
+static struct attribute *low_tenure_threshold_attrs[] = {
+	&low_tenure_threshold_attr.attr,
+	NULL,
+};
+
+static struct attribute_group low_tenure_threshold_group = {
+	.attrs = low_tenure_threshold_attrs,
+};
+
+static struct device_attribute which_cluster_tenure_attr =
+	__ATTR(which_cluster_tenure, 0644,
+			which_cluster_tenure_show,
+			which_cluster_tenure_store);
+
+static struct attribute *which_cluster_tenure_attrs[] = {
+	&which_cluster_tenure_attr.attr,
+	NULL,
+};
+
+static struct attribute_group which_cluster_tenure_group = {
+	.attrs = which_cluster_tenure_attrs,
+};
+
+static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
+
+	return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
+}
+
+static struct device_attribute l2_cache_pmu_cpumask_attr =
+	__ATTR(cpumask, 0444, l2_cache_pmu_cpumask_show, NULL);
+
+
+static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
+	&l2_cache_pmu_cpumask_attr.attr,
+	NULL,
+};
+
+static struct attribute_group l2_cache_pmu_cpumask_group = {
+	.attrs = l2_cache_pmu_cpumask_attrs,
+};
+
+PMU_FORMAT_ATTR(event,     "config:0-9");
+static struct attribute *l2_cache_pmu_formats[] = {
+	&format_attr_event.attr,
+	NULL,
+};
+
+static struct attribute_group l2_cache_pmu_format_group = {
+	.name = "format",
+	.attrs = l2_cache_pmu_formats,
+};
+
+static ssize_t l2cache_pmu_event_show(struct device *dev,
+				      struct device_attribute *attr, char *page)
+{
+	struct perf_pmu_events_attr *pmu_attr;
+
+	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+	return snprintf(page, PAGE_SIZE, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define L2CACHE_EVENT_ATTR(_name, _id)					     \
+	(&((struct perf_pmu_events_attr[]) {				     \
+		{ .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
+		  .id = _id, }						     \
+	})[0].attr.attr)
+
+static struct attribute *l2_cache_pmu_events[] = {
+	L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLE_CNTR),
+	L2CACHE_EVENT_ATTR(ddr_write, L2_EVENT_DDR_WR_CNTR),
+	L2CACHE_EVENT_ATTR(ddr_read, L2_EVENT_DDR_RD_CNTR),
+	L2CACHE_EVENT_ATTR(snoop_read, L2_EVENT_SNP_RD_CNTR),
+	L2CACHE_EVENT_ATTR(acp_write, L2_EVENT_ACP_WR_CNTR),
+	L2CACHE_EVENT_ATTR(low_range_occur, L2_EVENT_LOW_RANGE_OCCUR_CNTR),
+	L2CACHE_EVENT_ATTR(mid_range_occur, L2_EVENT_MID_RANGE_OCCUR_CNTR),
+	L2CACHE_EVENT_ATTR(high_range_occur, L2_EVENT_HIGH_RANGE_OCCUR_CNTR),
+	NULL
+};
+
+static struct attribute_group l2_cache_pmu_events_group = {
+	.name = "events",
+	.attrs = l2_cache_pmu_events,
+};
+
+static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
+	&l2_cache_pmu_format_group,
+	&l2_cache_pmu_cpumask_group,
+	&l2_cache_pmu_events_group,
+	&mid_tenure_threshold_group,
+	&low_tenure_threshold_group,
+	&which_cluster_tenure_group,
+	NULL,
+};
+
+static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
+	struct l2cache_pmu *l2cache_pmu, int cpu)
+{
+	u64 mpidr;
+	int cpu_cluster_id;
+	struct cluster_pmu *cluster = NULL;
+
+	/*
+	 * This assumes that the cluster_id is in MPIDR[aff1] for
+	 * single-threaded cores, and MPIDR[aff2] for multi-threaded
+	 * cores. This logic will have to be updated if this changes.
+	 */
+	mpidr = read_cpuid_mpidr();
+	if (mpidr & MPIDR_MT_BITMASK)
+		cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+	else
+		cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
+		if (cluster->cluster_id != cpu_cluster_id)
+			continue;
+
+		dev_info(&l2cache_pmu->pdev->dev,
+			 "CPU%d associated with cluster %d\n", cpu,
+			 cluster->cluster_id);
+		cpumask_set_cpu(cpu, &cluster->cluster_cpus);
+		*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
+		break;
+	}
+
+	return cluster;
+}
+
+static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct cluster_pmu *cluster;
+	struct l2cache_pmu *l2cache_pmu;
+
+	if (!node)
+		return 0;
+
+	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
+	cluster = get_cluster_pmu(l2cache_pmu, cpu);
+	if (!cluster) {
+		/* First time this CPU has come online */
+		cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
+		if (!cluster) {
+			/* Only if broken firmware doesn't list every cluster */
+			WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
+			return 0;
+		}
+	}
+
+	/* If another CPU is managing this cluster, we're done */
+	if (cluster->on_cpu != -1)
+		return 0;
+
+	/*
+	 * All CPUs on this cluster were down, use this one.
+	 * Reset to put it into sane state.
+	 */
+	cluster->on_cpu = cpu;
+	cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
+	cluster_pmu_reset(cluster);
+
+	enable_irq(cluster->irq);
+
+	return 0;
+}
+
+static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct cluster_pmu *cluster;
+	struct l2cache_pmu *l2cache_pmu;
+	cpumask_t cluster_online_cpus;
+	unsigned int target;
+
+	if (!node)
+		return 0;
+
+	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
+	cluster = get_cluster_pmu(l2cache_pmu, cpu);
+	if (!cluster)
+		return 0;
+
+	/* If this CPU is not managing the cluster, we're done */
+	if (cluster->on_cpu != cpu)
+		return 0;
+
+	/* Give up ownership of cluster */
+	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
+	cluster->on_cpu = -1;
+
+	/* Any other CPU for this cluster which is still online */
+	cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
+		    cpu_online_mask);
+	target = cpumask_any_but(&cluster_online_cpus, cpu);
+	if (target >= nr_cpu_ids) {
+		disable_irq(cluster->irq);
+		return 0;
+	}
+
+	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
+	cluster->on_cpu = target;
+	cpumask_set_cpu(target, &l2cache_pmu->cpumask);
+
+	return 0;
+}
+
+static void l2_cache_pmu_dev_release(struct device *dev)
+{
+	struct cluster_pmu *cluster = to_cluster_device(dev);
+
+	kfree(cluster);
+}
+
+static int l2_cache_pmu_probe_cluster(struct device *parent,
+					struct device_node *cn, void *data)
+{
+	struct l2cache_pmu *l2cache_pmu = data;
+	struct cluster_pmu *cluster;
+	u32 fw_cluster_id;
+	struct resource res;
+	int ret;
+	int irq;
+
+	cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
+	if (!cluster) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	cluster->dev.parent = parent;
+	cluster->dev.of_node = cn;
+	cluster->dev.release = l2_cache_pmu_dev_release;
+	dev_set_name(&cluster->dev, "%s:%s", dev_name(parent), cn->name);
+
+	ret = device_register(&cluster->dev);
+	if (ret) {
+		pr_err(L2_COUNTERS_BUG
+			"failed to register l2 cache pmu device\n");
+		goto err_put_dev;
+	}
+
+	ret = of_property_read_u32(cn, "cluster-id", &fw_cluster_id);
+	if (ret) {
+		pr_err(L2_COUNTERS_BUG "Missing cluster-id.\n");
+		goto err_put_dev;
+	}
+
+	ret = of_address_to_resource(cn, 0, &res);
+	if (ret) {
+		pr_err(L2_COUNTERS_BUG "not able to find the resource\n");
+		goto err_put_dev;
+	}
+
+	cluster->reg_addr = devm_ioremap_resource(&cluster->dev, &res);
+	if (IS_ERR(cluster->reg_addr)) {
+		ret = PTR_ERR(cluster->reg_addr);
+		pr_err(L2_COUNTERS_BUG "not able to remap the resource\n");
+		goto err_put_dev;
+	}
+
+	INIT_LIST_HEAD(&cluster->next);
+	cluster->cluster_id = fw_cluster_id;
+	cluster->l2cache_pmu = l2cache_pmu;
+
+	irq = of_irq_get(cn, 0);
+	if (irq < 0) {
+		pr_err(L2_COUNTERS_BUG
+			"Failed to get valid irq for cluster %ld\n",
+			fw_cluster_id);
+		goto err_put_dev;
+	}
+
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	cluster->irq = irq;
+	cluster->on_cpu = -1;
+
+	ret = devm_request_irq(&cluster->dev, irq, l2_cache_handle_irq,
+			       IRQF_NOBALANCING | IRQF_NO_THREAD,
+			       "l2-cache-pmu", cluster);
+	if (ret) {
+		pr_err(L2_COUNTERS_BUG
+			"Unable to request IRQ%d for L2 PMU counters\n", irq);
+		goto err_put_dev;
+	}
+
+	pr_info(L2_COUNTERS_BUG
+		"Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
+
+	spin_lock_init(&cluster->pmu_lock);
+	list_add(&cluster->next, &l2cache_pmu->clusters);
+	l2cache_pmu->num_pmus++;
+
+	return 0;
+
+err_put_dev:
+	put_device(&cluster->dev);
+	return ret;
+}
+
+static int l2_cache_pmu_probe(struct platform_device *pdev)
+{
+	int err;
+	struct l2cache_pmu *l2cache_pmu;
+	struct device_node *pn = pdev->dev.of_node;
+	struct device_node *cn;
+
+	l2cache_pmu =
+		devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
+	if (!l2cache_pmu)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&l2cache_pmu->clusters);
+	platform_set_drvdata(pdev, l2cache_pmu);
+	l2cache_pmu->pmu = (struct pmu) {
+		.name		= "l2cache_counters",
+		.task_ctx_nr    = perf_invalid_context,
+		.event_init	= l2_cache_event_init,
+		.add		= l2_cache_event_add,
+		.del		= l2_cache_event_del,
+		.start		= l2_cache_event_start,
+		.stop		= l2_cache_event_stop,
+		.read		= l2_cache_event_read,
+		.attr_groups	= l2_cache_pmu_attr_grps,
+	};
+
+	l2cache_pmu->num_counters = MAX_L2_CNTRS;
+	l2cache_pmu->pdev = pdev;
+	l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
+						     struct cluster_pmu *);
+	if (!l2cache_pmu->pmu_cluster)
+		return -ENOMEM;
+
+	l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 1, 0);
+	cpumask_clear(&l2cache_pmu->cpumask);
+
+	for_each_available_child_of_node(pn, cn) {
+		err = l2_cache_pmu_probe_cluster(&pdev->dev, cn, l2cache_pmu);
+		if (err < 0) {
+			of_node_put(cn);
+			dev_err(&pdev->dev,
+				"No hardware L2 cache PMUs found\n");
+			return err;
+		}
+	}
+
+	if (l2cache_pmu->num_pmus == 0) {
+		dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
+		return -ENODEV;
+	}
+
+	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+				       &l2cache_pmu->node);
+	if (err) {
+		dev_err(&pdev->dev, "Error %d registering hotplug\n", err);
+		return err;
+	}
+
+	err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
+	if (err) {
+		dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
+		goto out_unregister;
+	}
+
+	dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
+		 l2cache_pmu->num_pmus);
+
+	return 0;
+
+out_unregister:
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+				    &l2cache_pmu->node);
+	return err;
+}
+
+static int l2cache_pmu_unregister_device(struct device *dev, void *data)
+{
+	device_unregister(dev);
+	return 0;
+}
+
+static int l2_cache_pmu_remove(struct platform_device *pdev)
+{
+	struct l2cache_pmu *l2cache_pmu = platform_get_drvdata(pdev);
+	int ret;
+
+	perf_pmu_unregister(&l2cache_pmu->pmu);
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+			&l2cache_pmu->node);
+
+	ret = device_for_each_child(&pdev->dev, NULL,
+			l2cache_pmu_unregister_device);
+	if (ret)
+		dev_warn(&pdev->dev,
+			"can't remove cluster pmu device: %d\n", ret);
+	return ret;
+}
+
+static const struct of_device_id l2_cache_pmu_of_match[] = {
+	{ .compatible = "qcom,l2cache-pmu" },
+	{},
+};
+
+static struct platform_driver l2_cache_pmu_driver = {
+	.driver = {
+		.name = "l2cache-pmu",
+		.of_match_table = l2_cache_pmu_of_match,
+	},
+	.probe = l2_cache_pmu_probe,
+	.remove = l2_cache_pmu_remove,
+};
+
+static int __init register_l2_cache_pmu_driver(void)
+{
+	int err;
+
+	err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+				      "AP_PERF_ARM_QCOM_L2_ONLINE",
+				      l2cache_pmu_online_cpu,
+				      l2cache_pmu_offline_cpu);
+	if (err)
+		return err;
+
+	return platform_driver_register(&l2_cache_pmu_driver);
+}
+device_initcall(register_l2_cache_pmu_driver);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
index b3c4271..a74af35 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.c
@@ -31,7 +31,7 @@ int ufs_qcom_phy_qmp_v3_660_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
 	tbl_B = phy_cal_table_rate_B;
 
-	if ((major == 0x3) && (minor == 0x001) && (step >= 0x001)) {
+	if ((major == 0x4) && (minor == 0x002) && (step >= 0x000)) {
 		tbl_A = phy_cal_table_rate_A_3_1_1;
 		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_1);
 	} else {
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 22eb7fe..dc6efb1 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -192,4 +192,13 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc BENGAL platform.
 
+config PINCTRL_SCUBA
+	tristate "Qualcomm Technologies Inc SCUBA pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc SCUBA platform.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index e6e4de9..996270d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -24,3 +24,4 @@
 obj-$(CONFIG_PINCTRL_LITO) += pinctrl-lito.o
 obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o
 obj-$(CONFIG_PINCTRL_LAGOON) += pinctrl-lagoon.o
+obj-$(CONFIG_PINCTRL_SCUBA) += pinctrl-scuba.o
diff --git a/drivers/pinctrl/qcom/pinctrl-bengal.c b/drivers/pinctrl/qcom/pinctrl-bengal.c
index a93ddca..68641fc 100644
--- a/drivers/pinctrl/qcom/pinctrl-bengal.c
+++ b/drivers/pinctrl/qcom/pinctrl-bengal.c
@@ -520,6 +520,8 @@ enum bengal_functions {
 	msm_mux_uim1_present,
 	msm_mux_dac_calib19,
 	msm_mux_mdp_vsync,
+	msm_mux_mdp_vsync_out_0,
+	msm_mux_mdp_vsync_out_1,
 	msm_mux_dac_calib20,
 	msm_mux_dac_calib21,
 	msm_mux_atest_bbrx1,
@@ -1030,6 +1032,12 @@ static const char * const dac_calib19_groups[] = {
 static const char * const mdp_vsync_groups[] = {
 	"gpio81", "gpio96", "gpio97",
 };
+static const char * const mdp_vsync_out_0_groups[] = {
+	"gpio81",
+};
+static const char * const mdp_vsync_out_1_groups[] = {
+	"gpio81",
+};
 static const char * const dac_calib20_groups[] = {
 	"gpio81",
 };
@@ -1262,6 +1270,8 @@ static const struct msm_function bengal_functions[] = {
 	FUNCTION(uim1_present),
 	FUNCTION(dac_calib19),
 	FUNCTION(mdp_vsync),
+	FUNCTION(mdp_vsync_out_0),
+	FUNCTION(mdp_vsync_out_1),
 	FUNCTION(dac_calib20),
 	FUNCTION(dac_calib21),
 	FUNCTION(atest_bbrx1),
@@ -1444,8 +1454,8 @@ static const struct msm_pingroup bengal_groups[] = {
 			NA, 0x71000, 5),
 	[80] = PINGROUP(80, WEST, qup2, dac_calib19, NA, NA, NA, NA, NA, NA,
 			NA, 0x71000, 13),
-	[81] = PINGROUP(81, WEST, mdp_vsync, mdp_vsync, mdp_vsync, dac_calib20,
-			NA, NA, NA, NA, NA, 0x71000, 14),
+	[81] = PINGROUP(81, WEST, mdp_vsync_out_0, mdp_vsync_out_1, mdp_vsync,
+			dac_calib20, NA, NA, NA, NA, NA, 0x71000, 14),
 	[82] = PINGROUP(82, WEST, qup0, dac_calib21, NA, NA, NA, NA, NA, NA,
 			NA, 0, -1),
 	[83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
diff --git a/drivers/pinctrl/qcom/pinctrl-scuba.c b/drivers/pinctrl/qcom/pinctrl-scuba.c
new file mode 100644
index 0000000..a804763
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-scuba.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)			                \
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = REG_BASE + REG_SIZE * id,			\
+		.io_reg = REG_BASE + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id,		\
+		.intr_status_reg = REG_BASE + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.egpio_enable = 12,		\
+		.egpio_present = 11,		\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+		.wake_reg = REG_BASE + wake_off,	\
+		.wake_bit = bit,		\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc scuba_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "SDC1_RCLK"),
+	PINCTRL_PIN(128, "SDC1_CLK"),
+	PINCTRL_PIN(129, "SDC1_CMD"),
+	PINCTRL_PIN(130, "SDC1_DATA"),
+	PINCTRL_PIN(131, "SDC2_CLK"),
+	PINCTRL_PIN(132, "SDC2_CMD"),
+	PINCTRL_PIN(133, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+
+static const unsigned int sdc1_rclk_pins[] = { 127 };
+static const unsigned int sdc1_clk_pins[] = { 128 };
+static const unsigned int sdc1_cmd_pins[] = { 129 };
+static const unsigned int sdc1_data_pins[] = { 130 };
+static const unsigned int sdc2_clk_pins[] = { 131 };
+static const unsigned int sdc2_cmd_pins[] = { 132 };
+static const unsigned int sdc2_data_pins[] = { 133 };
+
+enum scuba_functions {
+	msm_mux_qup0,
+	msm_mux_gpio,
+	msm_mux_ddr_bist,
+	msm_mux_phase_flag0,
+	msm_mux_qdss_gpio8,
+	msm_mux_atest_tsens,
+	msm_mux_mpm_pwr,
+	msm_mux_m_voc,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio9,
+	msm_mux_atest_tsens2,
+	msm_mux_phase_flag2,
+	msm_mux_qdss_gpio10,
+	msm_mux_dac_calib0,
+	msm_mux_atest_usb10,
+	msm_mux_phase_flag3,
+	msm_mux_qdss_gpio11,
+	msm_mux_dac_calib1,
+	msm_mux_atest_usb11,
+	msm_mux_qup1,
+	msm_mux_CRI_TRNG0,
+	msm_mux_phase_flag4,
+	msm_mux_dac_calib2,
+	msm_mux_atest_usb12,
+	msm_mux_CRI_TRNG1,
+	msm_mux_phase_flag5,
+	msm_mux_dac_calib3,
+	msm_mux_atest_usb13,
+	msm_mux_qup2,
+	msm_mux_phase_flag6,
+	msm_mux_dac_calib4,
+	msm_mux_atest_usb1,
+	msm_mux_qup3,
+	msm_mux_pbs_out,
+	msm_mux_PLL_BIST,
+	msm_mux_qdss_gpio,
+	msm_mux_tsense_pwm,
+	msm_mux_AGERA_PLL,
+	msm_mux_pbs0,
+	msm_mux_qdss_gpio0,
+	msm_mux_pbs1,
+	msm_mux_qdss_gpio1,
+	msm_mux_qup4,
+	msm_mux_tgu_ch0,
+	msm_mux_tgu_ch1,
+	msm_mux_qup5,
+	msm_mux_tgu_ch2,
+	msm_mux_phase_flag7,
+	msm_mux_qdss_gpio4,
+	msm_mux_dac_calib5,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag8,
+	msm_mux_qdss_gpio5,
+	msm_mux_dac_calib6,
+	msm_mux_phase_flag9,
+	msm_mux_qdss_gpio6,
+	msm_mux_dac_calib7,
+	msm_mux_phase_flag10,
+	msm_mux_qdss_gpio7,
+	msm_mux_dac_calib8,
+	msm_mux_SDC2_TB,
+	msm_mux_CRI_TRNG,
+	msm_mux_pbs2,
+	msm_mux_qdss_gpio2,
+	msm_mux_pwm_0,
+	msm_mux_SDC1_TB,
+	msm_mux_pbs3,
+	msm_mux_qdss_gpio3,
+	msm_mux_cam_mclk,
+	msm_mux_pbs4,
+	msm_mux_adsp_ext,
+	msm_mux_pbs5,
+	msm_mux_cci_i2c,
+	msm_mux_prng_rosc,
+	msm_mux_pbs6,
+	msm_mux_phase_flag11,
+	msm_mux_dac_calib9,
+	msm_mux_atest_usb20,
+	msm_mux_pbs7,
+	msm_mux_phase_flag12,
+	msm_mux_dac_calib10,
+	msm_mux_atest_usb21,
+	msm_mux_CCI_TIMER1,
+	msm_mux_GCC_GP1,
+	msm_mux_pbs8,
+	msm_mux_phase_flag13,
+	msm_mux_dac_calib11,
+	msm_mux_atest_usb22,
+	msm_mux_cci_async,
+	msm_mux_CCI_TIMER0,
+	msm_mux_pbs9,
+	msm_mux_phase_flag14,
+	msm_mux_dac_calib12,
+	msm_mux_atest_usb23,
+	msm_mux_pbs10,
+	msm_mux_phase_flag15,
+	msm_mux_dac_calib13,
+	msm_mux_atest_usb2,
+	msm_mux_vsense_trigger,
+	msm_mux_qdss_cti,
+	msm_mux_CCI_TIMER2,
+	msm_mux_pwm_1,
+	msm_mux_phase_flag16,
+	msm_mux_dac_calib14,
+	msm_mux_atest_char,
+	msm_mux_phase_flag17,
+	msm_mux_dac_calib15,
+	msm_mux_atest_char0,
+	msm_mux_GP_PDM0,
+	msm_mux_phase_flag18,
+	msm_mux_dac_calib16,
+	msm_mux_atest_char1,
+	msm_mux_CCI_TIMER3,
+	msm_mux_GP_PDM1,
+	msm_mux_phase_flag19,
+	msm_mux_dac_calib17,
+	msm_mux_atest_char2,
+	msm_mux_GP_PDM2,
+	msm_mux_phase_flag20,
+	msm_mux_dac_calib18,
+	msm_mux_atest_char3,
+	msm_mux_phase_flag21,
+	msm_mux_phase_flag22,
+	msm_mux_char_exec,
+	msm_mux_NAV_GPIO,
+	msm_mux_phase_flag23,
+	msm_mux_phase_flag24,
+	msm_mux_phase_flag25,
+	msm_mux_pbs14,
+	msm_mux_qdss_gpio14,
+	msm_mux_vfr_1,
+	msm_mux_pbs15,
+	msm_mux_qdss_gpio15,
+	msm_mux_PA_INDICATOR,
+	msm_mux_pwm_2,
+	msm_mux_gsm1_tx,
+	msm_mux_SSBI_WTR1,
+	msm_mux_pll_bypassnl,
+	msm_mux_pll_reset,
+	msm_mux_phase_flag26,
+	msm_mux_ddr_pxi0,
+	msm_mux_gsm0_tx,
+	msm_mux_phase_flag27,
+	msm_mux_GCC_GP2,
+	msm_mux_qdss_gpio12,
+	msm_mux_ddr_pxi1,
+	msm_mux_GCC_GP3,
+	msm_mux_qdss_gpio13,
+	msm_mux_dbg_out,
+	msm_mux_uim2_data,
+	msm_mux_pwm_3,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_pwm_4,
+	msm_mux_uim2_present,
+	msm_mux_pwm_5,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_dac_calib19,
+	msm_mux_mdp_vsync,
+	msm_mux_dac_calib20,
+	msm_mux_dac_calib21,
+	msm_mux_pwm_6,
+	msm_mux_atest_bbrx1,
+	msm_mux_pbs11,
+	msm_mux_usb_phy,
+	msm_mux_atest_bbrx0,
+	msm_mux_pwm_7,
+	msm_mux_mss_lte,
+	msm_mux_pbs12,
+	msm_mux_pbs13,
+	msm_mux_wlan1_adc0,
+	msm_mux_wlan1_adc1,
+	msm_mux_sd_write,
+	msm_mux_JITTER_BIST,
+	msm_mux_atest_gpsadc_dtest0_native,
+	msm_mux_atest_gpsadc_dtest1_native,
+	msm_mux_phase_flag28,
+	msm_mux_dac_calib22,
+	msm_mux_ddr_pxi2,
+	msm_mux_phase_flag29,
+	msm_mux_dac_calib23,
+	msm_mux_phase_flag30,
+	msm_mux_dac_calib24,
+	msm_mux_ddr_pxi3,
+	msm_mux_pwm_8,
+	msm_mux_phase_flag31,
+	msm_mux_dac_calib25,
+	msm_mux_pwm_9,
+	msm_mux_NA,
+};
+
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio0",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio0", "gpio24",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio0",
+};
+static const char * const mpm_pwr_groups[] = {
+	"gpio1",
+};
+static const char * const m_voc_groups[] = {
+	"gpio0",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio1",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio1", "gpio25",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio1",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio2",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio2", "gpio26",
+};
+static const char * const dac_calib0_groups[] = {
+	"gpio2",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio2",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio3",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio3", "gpio87",
+};
+static const char * const dac_calib1_groups[] = {
+	"gpio3",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio3",
+};
+static const char * const qup1_groups[] = {
+	"gpio4", "gpio5", "gpio69", "gpio70",
+};
+static const char * const CRI_TRNG0_groups[] = {
+	"gpio4",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio4",
+};
+static const char * const dac_calib2_groups[] = {
+	"gpio4",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio4",
+};
+static const char * const CRI_TRNG1_groups[] = {
+	"gpio5",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio5",
+};
+static const char * const dac_calib3_groups[] = {
+	"gpio5",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio5",
+};
+static const char * const qup2_groups[] = {
+	"gpio6", "gpio7", "gpio71", "gpio80",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio6",
+};
+static const char * const dac_calib4_groups[] = {
+	"gpio6",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio6",
+};
+static const char * const qup3_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const pbs_out_groups[] = {
+	"gpio8", "gpio9", "gpio52",
+};
+static const char * const PLL_BIST_groups[] = {
+	"gpio8", "gpio9",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio8", "gpio9", "gpio105", "gpio106",
+};
+static const char * const tsense_pwm_groups[] = {
+	"gpio8",
+};
+static const char * const AGERA_PLL_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const pbs0_groups[] = {
+	"gpio10",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio10", "gpio107",
+};
+static const char * const pbs1_groups[] = {
+	"gpio11",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio11", "gpio104",
+};
+static const char * const qup4_groups[] = {
+	"gpio12", "gpio13", "gpio96", "gpio97",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio12",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio13",
+};
+static const char * const qup5_groups[] = {
+	"gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio14",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio14", "gpio20",
+};
+static const char * const dac_calib5_groups[] = {
+	"gpio14",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio15",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio15",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio15", "gpio21",
+};
+static const char * const dac_calib6_groups[] = {
+	"gpio15",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio16",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio16", "gpio22",
+};
+static const char * const dac_calib7_groups[] = {
+	"gpio16",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio17",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio17", "gpio23",
+};
+static const char * const dac_calib8_groups[] = {
+	"gpio17",
+};
+static const char * const SDC2_TB_groups[] = {
+	"gpio18",
+};
+static const char * const CRI_TRNG_groups[] = {
+	"gpio18",
+};
+static const char * const pbs2_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio18", "gpio109",
+};
+static const char * const pwm_0_groups[] = {
+	"gpio18",
+};
+static const char * const SDC1_TB_groups[] = {
+	"gpio19",
+};
+static const char * const pbs3_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio19", "gpio110",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio20", "gpio21", "gpio27", "gpio28",
+};
+static const char * const pbs4_groups[] = {
+	"gpio20",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio21",
+};
+static const char * const pbs5_groups[] = {
+	"gpio21",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio22", "gpio23", "gpio29", "gpio30",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio22", "gpio23",
+};
+static const char * const pbs6_groups[] = {
+	"gpio22",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio22",
+};
+static const char * const dac_calib9_groups[] = {
+	"gpio22",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio22",
+};
+static const char * const pbs7_groups[] = {
+	"gpio23",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio23",
+};
+static const char * const dac_calib10_groups[] = {
+	"gpio23",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio23",
+};
+static const char * const CCI_TIMER1_groups[] = {
+	"gpio24",
+};
+static const char * const GCC_GP1_groups[] = {
+	"gpio24", "gpio86",
+};
+static const char * const pbs8_groups[] = {
+	"gpio24",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio24",
+};
+static const char * const dac_calib11_groups[] = {
+	"gpio24",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio25",
+};
+static const char * const CCI_TIMER0_groups[] = {
+	"gpio25",
+};
+static const char * const pbs9_groups[] = {
+	"gpio25",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio25",
+};
+static const char * const dac_calib12_groups[] = {
+	"gpio25",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio25",
+};
+static const char * const pbs10_groups[] = {
+	"gpio26",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio26",
+};
+static const char * const dac_calib13_groups[] = {
+	"gpio26",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio26",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio26",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio27", "gpio28", "gpio72", "gpio73", "gpio96", "gpio97",
+};
+static const char * const CCI_TIMER2_groups[] = {
+	"gpio28",
+};
+static const char * const pwm_1_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio29",
+};
+static const char * const dac_calib14_groups[] = {
+	"gpio29",
+};
+static const char * const atest_char_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio30",
+};
+static const char * const dac_calib15_groups[] = {
+	"gpio30",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio30",
+};
+static const char * const GP_PDM0_groups[] = {
+	"gpio31", "gpio95",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio31",
+};
+static const char * const dac_calib16_groups[] = {
+	"gpio31",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio31",
+};
+static const char * const CCI_TIMER3_groups[] = {
+	"gpio32",
+};
+static const char * const GP_PDM1_groups[] = {
+	"gpio32", "gpio96",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio32",
+};
+static const char * const dac_calib17_groups[] = {
+	"gpio32",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio32",
+};
+static const char * const GP_PDM2_groups[] = {
+	"gpio33", "gpio97",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio33",
+};
+static const char * const dac_calib18_groups[] = {
+	"gpio33",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio33",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio35",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio36",
+};
+static const char * const char_exec_groups[] = {
+	"gpio37", "gpio38",
+};
+static const char * const NAV_GPIO_groups[] = {
+	"gpio42", "gpio47", "gpio52", "gpio95", "gpio96", "gpio97", "gpio106",
+	"gpio107", "gpio108",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio43",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio44",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio45",
+};
+static const char * const pbs14_groups[] = {
+	"gpio47",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio47", "gpio94",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio48",
+};
+static const char * const pbs15_groups[] = {
+	"gpio48",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio48", "gpio95",
+};
+static const char * const PA_INDICATOR_groups[] = {
+	"gpio49",
+};
+static const char * const pwm_2_groups[] = {
+	"gpio51",
+};
+static const char * const gsm1_tx_groups[] = {
+	"gpio53",
+};
+static const char * const SSBI_WTR1_groups[] = {
+	"gpio59", "gpio60",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio62",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio63",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio63",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio63", "gpio64",
+};
+static const char * const gsm0_tx_groups[] = {
+	"gpio64",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio64",
+};
+static const char * const GCC_GP2_groups[] = {
+	"gpio69", "gpio107",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio69", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio69", "gpio70",
+};
+static const char * const GCC_GP3_groups[] = {
+	"gpio70", "gpio106",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio70", "gpio91",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio71",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio72",
+};
+static const char * const pwm_3_groups[] = {
+	"gpio72",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio73",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio74",
+};
+static const char * const pwm_4_groups[] = {
+	"gpio74",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio75",
+};
+static const char * const pwm_5_groups[] = {
+	"gpio75",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio76",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio77",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio78",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio79",
+};
+static const char * const dac_calib19_groups[] = {
+	"gpio80",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio81", "gpio96", "gpio97",
+};
+static const char * const dac_calib20_groups[] = {
+	"gpio81",
+};
+static const char * const dac_calib21_groups[] = {
+	"gpio82",
+};
+static const char * const pwm_6_groups[] = {
+	"gpio82",
+};
+static const char * const atest_bbrx1_groups[] = {
+	"gpio86",
+};
+static const char * const pbs11_groups[] = {
+	"gpio87",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio89",
+};
+static const char * const atest_bbrx0_groups[] = {
+	"gpio89",
+};
+static const char * const pwm_7_groups[] = {
+	"gpio89",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio90", "gpio91",
+};
+static const char * const pbs12_groups[] = {
+	"gpio90",
+};
+static const char * const pbs13_groups[] = {
+	"gpio91",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio94",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio95",
+};
+static const char * const sd_write_groups[] = {
+	"gpio96",
+};
+static const char * const JITTER_BIST_groups[] = {
+	"gpio96", "gpio97",
+};
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+	"gpio100",
+};
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+	"gpio101",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio102",
+};
+static const char * const dac_calib22_groups[] = {
+	"gpio102",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio102", "gpio103",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio103",
+};
+static const char * const dac_calib23_groups[] = {
+	"gpio103",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio104",
+};
+static const char * const dac_calib24_groups[] = {
+	"gpio104",
+};
+static const char * const ddr_pxi3_groups[] = {
+	"gpio104", "gpio105",
+};
+static const char * const pwm_8_groups[] = {
+	"gpio104",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio105",
+};
+static const char * const dac_calib25_groups[] = {
+	"gpio105",
+};
+static const char * const pwm_9_groups[] = {
+	"gpio115",
+};
+
+static const struct msm_function scuba_functions[] = {
+	FUNCTION(qup0),
+	FUNCTION(gpio),
+	FUNCTION(ddr_bist),
+	FUNCTION(phase_flag0),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(atest_tsens),
+	FUNCTION(mpm_pwr),
+	FUNCTION(m_voc),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(atest_tsens2),
+	FUNCTION(phase_flag2),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(dac_calib0),
+	FUNCTION(atest_usb10),
+	FUNCTION(phase_flag3),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(dac_calib1),
+	FUNCTION(atest_usb11),
+	FUNCTION(qup1),
+	FUNCTION(CRI_TRNG0),
+	FUNCTION(phase_flag4),
+	FUNCTION(dac_calib2),
+	FUNCTION(atest_usb12),
+	FUNCTION(CRI_TRNG1),
+	FUNCTION(phase_flag5),
+	FUNCTION(dac_calib3),
+	FUNCTION(atest_usb13),
+	FUNCTION(qup2),
+	FUNCTION(phase_flag6),
+	FUNCTION(dac_calib4),
+	FUNCTION(atest_usb1),
+	FUNCTION(qup3),
+	FUNCTION(pbs_out),
+	FUNCTION(PLL_BIST),
+	FUNCTION(qdss_gpio),
+	FUNCTION(tsense_pwm),
+	FUNCTION(AGERA_PLL),
+	FUNCTION(pbs0),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(pbs1),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qup4),
+	FUNCTION(tgu_ch0),
+	FUNCTION(tgu_ch1),
+	FUNCTION(qup5),
+	FUNCTION(tgu_ch2),
+	FUNCTION(phase_flag7),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(dac_calib5),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag8),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(dac_calib6),
+	FUNCTION(phase_flag9),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(dac_calib7),
+	FUNCTION(phase_flag10),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(dac_calib8),
+	FUNCTION(SDC2_TB),
+	FUNCTION(CRI_TRNG),
+	FUNCTION(pbs2),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(pwm_0),
+	FUNCTION(SDC1_TB),
+	FUNCTION(pbs3),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(cam_mclk),
+	FUNCTION(pbs4),
+	FUNCTION(adsp_ext),
+	FUNCTION(pbs5),
+	FUNCTION(cci_i2c),
+	FUNCTION(prng_rosc),
+	FUNCTION(pbs6),
+	FUNCTION(phase_flag11),
+	FUNCTION(dac_calib9),
+	FUNCTION(atest_usb20),
+	FUNCTION(pbs7),
+	FUNCTION(phase_flag12),
+	FUNCTION(dac_calib10),
+	FUNCTION(atest_usb21),
+	FUNCTION(CCI_TIMER1),
+	FUNCTION(GCC_GP1),
+	FUNCTION(pbs8),
+	FUNCTION(phase_flag13),
+	FUNCTION(dac_calib11),
+	FUNCTION(atest_usb22),
+	FUNCTION(cci_async),
+	FUNCTION(CCI_TIMER0),
+	FUNCTION(pbs9),
+	FUNCTION(phase_flag14),
+	FUNCTION(dac_calib12),
+	FUNCTION(atest_usb23),
+	FUNCTION(pbs10),
+	FUNCTION(phase_flag15),
+	FUNCTION(dac_calib13),
+	FUNCTION(atest_usb2),
+	FUNCTION(vsense_trigger),
+	FUNCTION(qdss_cti),
+	FUNCTION(CCI_TIMER2),
+	FUNCTION(pwm_1),
+	FUNCTION(phase_flag16),
+	FUNCTION(dac_calib14),
+	FUNCTION(atest_char),
+	FUNCTION(phase_flag17),
+	FUNCTION(dac_calib15),
+	FUNCTION(atest_char0),
+	FUNCTION(GP_PDM0),
+	FUNCTION(phase_flag18),
+	FUNCTION(dac_calib16),
+	FUNCTION(atest_char1),
+	FUNCTION(CCI_TIMER3),
+	FUNCTION(GP_PDM1),
+	FUNCTION(phase_flag19),
+	FUNCTION(dac_calib17),
+	FUNCTION(atest_char2),
+	FUNCTION(GP_PDM2),
+	FUNCTION(phase_flag20),
+	FUNCTION(dac_calib18),
+	FUNCTION(atest_char3),
+	FUNCTION(phase_flag21),
+	FUNCTION(phase_flag22),
+	FUNCTION(char_exec),
+	FUNCTION(NAV_GPIO),
+	FUNCTION(phase_flag23),
+	FUNCTION(phase_flag24),
+	FUNCTION(phase_flag25),
+	FUNCTION(pbs14),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(vfr_1),
+	FUNCTION(pbs15),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(PA_INDICATOR),
+	FUNCTION(pwm_2),
+	FUNCTION(gsm1_tx),
+	FUNCTION(SSBI_WTR1),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(pll_reset),
+	FUNCTION(phase_flag26),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(gsm0_tx),
+	FUNCTION(phase_flag27),
+	FUNCTION(GCC_GP2),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(GCC_GP3),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(dbg_out),
+	FUNCTION(uim2_data),
+	FUNCTION(pwm_3),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(pwm_4),
+	FUNCTION(uim2_present),
+	FUNCTION(pwm_5),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(dac_calib19),
+	FUNCTION(mdp_vsync),
+	FUNCTION(dac_calib20),
+	FUNCTION(dac_calib21),
+	FUNCTION(pwm_6),
+	FUNCTION(atest_bbrx1),
+	FUNCTION(pbs11),
+	FUNCTION(usb_phy),
+	FUNCTION(atest_bbrx0),
+	FUNCTION(pwm_7),
+	FUNCTION(mss_lte),
+	FUNCTION(pbs12),
+	FUNCTION(pbs13),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(sd_write),
+	FUNCTION(JITTER_BIST),
+	FUNCTION(atest_gpsadc_dtest0_native),
+	FUNCTION(atest_gpsadc_dtest1_native),
+	FUNCTION(phase_flag28),
+	FUNCTION(dac_calib22),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(phase_flag29),
+	FUNCTION(dac_calib23),
+	FUNCTION(phase_flag30),
+	FUNCTION(dac_calib24),
+	FUNCTION(ddr_pxi3),
+	FUNCTION(pwm_8),
+	FUNCTION(phase_flag31),
+	FUNCTION(dac_calib25),
+	FUNCTION(pwm_9),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup scuba_groups[] = {
+	[0] = PINGROUP(0, qup0, m_voc, ddr_bist, NA, phase_flag0, qdss_gpio8,
+		       atest_tsens, NA, NA, 0x7F000, 0),
+	[1] = PINGROUP(1, qup0, mpm_pwr, ddr_bist, NA, phase_flag1, qdss_gpio9,
+		       atest_tsens2, NA, NA, 0, -1),
+	[2] = PINGROUP(2, qup0, ddr_bist, NA, phase_flag2, qdss_gpio10,
+		       dac_calib0, atest_usb10, NA, NA, 0, -1),
+	[3] = PINGROUP(3, qup0, ddr_bist, NA, phase_flag3, qdss_gpio11,
+		       dac_calib1, atest_usb11, NA, NA, 0x7F000, 1),
+	[4] = PINGROUP(4, qup1, CRI_TRNG0, NA, phase_flag4, dac_calib2,
+		       atest_usb12, NA, NA, NA, 0x7F000, 2),
+	[5] = PINGROUP(5, qup1, CRI_TRNG1, NA, phase_flag5, dac_calib3,
+		       atest_usb13, NA, NA, NA, 0, -1),
+	[6] = PINGROUP(6, qup2, NA, phase_flag6, dac_calib4, atest_usb1, NA,
+		       NA, NA, NA, 0x7F000, 3),
+	[7] = PINGROUP(7, qup2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[8] = PINGROUP(8, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA,
+		       tsense_pwm, NA, NA, 0x7F008, 0),
+	[9] = PINGROUP(9, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, NA, NA,
+		       NA, 0, -1),
+	[10] = PINGROUP(10, qup3, AGERA_PLL, NA, pbs0, qdss_gpio0, NA, NA, NA,
+			NA, 0, -1),
+	[11] = PINGROUP(11, qup3, AGERA_PLL, NA, pbs1, qdss_gpio1, NA, NA, NA,
+			NA, 0x7F008, 1),
+	[12] = PINGROUP(12, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[13] = PINGROUP(13, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA,
+			0x7F000, 4),
+	[14] = PINGROUP(14, qup5, tgu_ch2, NA, phase_flag7, qdss_gpio4,
+			dac_calib5, NA, NA, NA, 0x7F000, 5),
+	[15] = PINGROUP(15, qup5, tgu_ch3, NA, phase_flag8, qdss_gpio5,
+			dac_calib6, NA, NA, NA, 0, -1),
+	[16] = PINGROUP(16, qup5, NA, phase_flag9, qdss_gpio6, dac_calib7, NA,
+			NA, NA, NA, 0, -1),
+	[17] = PINGROUP(17, qup5, NA, phase_flag10, qdss_gpio7, dac_calib8, NA,
+			NA, NA, NA, 0x7F000, 6),
+	[18] = PINGROUP(18, SDC2_TB, CRI_TRNG, pbs2, qdss_gpio2, NA, pwm_0, NA,
+			NA, NA, 0x7F008, 2),
+	[19] = PINGROUP(19, SDC1_TB, pbs3, qdss_gpio3, NA, NA, NA, NA, NA, NA,
+			0x7F008, 3),
+	[20] = PINGROUP(20, cam_mclk, pbs4, qdss_gpio4, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[21] = PINGROUP(21, cam_mclk, adsp_ext, pbs5, qdss_gpio5, NA, NA, NA,
+			NA, NA, 0, -1),
+	[22] = PINGROUP(22, cci_i2c, prng_rosc, NA, pbs6, phase_flag11,
+			qdss_gpio6, dac_calib9, atest_usb20, NA, 0, -1),
+	[23] = PINGROUP(23, cci_i2c, prng_rosc, NA, pbs7, phase_flag12,
+			qdss_gpio7, dac_calib10, atest_usb21, NA, 0, -1),
+	[24] = PINGROUP(24, CCI_TIMER1, GCC_GP1, NA, pbs8, phase_flag13,
+			qdss_gpio8, dac_calib11, atest_usb22, NA, 0x7F008, 4),
+	[25] = PINGROUP(25, cci_async, CCI_TIMER0, NA, pbs9, phase_flag14,
+			qdss_gpio9, dac_calib12, atest_usb23, NA, 0x7F008, 5),
+	[26] = PINGROUP(26, NA, pbs10, phase_flag15, qdss_gpio10, dac_calib13,
+			atest_usb2, vsense_trigger, NA, NA, 0, -1),
+	[27] = PINGROUP(27, cam_mclk, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			0x7F008, 6),
+	[28] = PINGROUP(28, cam_mclk, CCI_TIMER2, qdss_cti, NA, pwm_1, NA, NA,
+			NA, NA, 0x7F008, 7),
+	[29] = PINGROUP(29, cci_i2c, NA, phase_flag16, dac_calib14, atest_char,
+			NA, NA, NA, NA, 0, -1),
+	[30] = PINGROUP(30, cci_i2c, NA, phase_flag17, dac_calib15,
+			atest_char0, NA, NA, NA, NA, 0, -1),
+	[31] = PINGROUP(31, GP_PDM0, NA, phase_flag18, dac_calib16,
+			atest_char1, NA, NA, NA, NA, 0x7F008, 8),
+	[32] = PINGROUP(32, CCI_TIMER3, GP_PDM1, NA, phase_flag19, dac_calib17,
+			atest_char2, NA, NA, NA, 0x7F008, 9),
+	[33] = PINGROUP(33, GP_PDM2, NA, phase_flag20, dac_calib18,
+			atest_char3, NA, NA, NA, NA, 0x7F008, 10),
+	[34] = PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F008, 11),
+	[35] = PINGROUP(35, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA,
+			0x7F008, 12),
+	[36] = PINGROUP(36, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA,
+			0x7F008, 13),
+	[37] = PINGROUP(37, NA, NA, char_exec, NA, NA, NA, NA, NA, NA, 0, -1),
+	[38] = PINGROUP(38, NA, NA, NA, char_exec, NA, NA, NA, NA, NA, 0, -1),
+	[39] = PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F008, 14),
+	[40] = PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[41] = PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[42] = PINGROUP(42, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[43] = PINGROUP(43, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[44] = PINGROUP(44, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[45] = PINGROUP(45, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[46] = PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F008, 15),
+	[47] = PINGROUP(47, NA, NAV_GPIO, pbs14, qdss_gpio14, NA, NA, NA, NA,
+			NA, 0, -1),
+	[48] = PINGROUP(48, NA, vfr_1, NA, pbs15, qdss_gpio15, NA, NA, NA, NA,
+			0, -1),
+	[49] = PINGROUP(49, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[50] = PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[51] = PINGROUP(51, NA, NA, NA, pwm_2, NA, NA, NA, NA, NA, 0, -1),
+	[52] = PINGROUP(52, NA, NAV_GPIO, pbs_out, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[53] = PINGROUP(53, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[54] = PINGROUP(54, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[55] = PINGROUP(55, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[56] = PINGROUP(56, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[57] = PINGROUP(57, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[59] = PINGROUP(59, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[60] = PINGROUP(60, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[61] = PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[62] = PINGROUP(62, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA,
+			0x7F00C, 0),
+	[63] = PINGROUP(63, pll_reset, NA, phase_flag26, ddr_pxi0, NA, NA, NA,
+			NA, NA, 0x7F00C, 1),
+	[64] = PINGROUP(64, gsm0_tx, NA, phase_flag27, ddr_pxi0, NA, NA, NA,
+			NA, NA, 0x7F00C, 2),
+	[65] = PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F000, 7),
+	[66] = PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F000, 8),
+	[67] = PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F000, 9),
+	[68] = PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[69] = PINGROUP(69, qup1, GCC_GP2, qdss_gpio12, ddr_pxi1, NA, NA, NA,
+			NA, NA, 0x7F000, 10),
+	[70] = PINGROUP(70, qup1, GCC_GP3, qdss_gpio13, ddr_pxi1, NA, NA, NA,
+			NA, NA, 0x7F000, 11),
+	[71] = PINGROUP(71, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[72] = PINGROUP(72, uim2_data, qdss_cti, NA, pwm_3, NA, NA, NA, NA, NA,
+			0x7F010, 3),
+	[73] = PINGROUP(73, uim2_clk, NA, qdss_cti, NA, NA, NA, NA, NA, NA,
+			0, -1),
+	[74] = PINGROUP(74, uim2_reset, NA, NA, pwm_4, NA, NA, NA, NA, NA,
+			0, -1),
+	[75] = PINGROUP(75, uim2_present, NA, NA, pwm_5, NA, NA, NA, NA, NA,
+			0x7F010, 4),
+	[76] = PINGROUP(76, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[77] = PINGROUP(77, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[78] = PINGROUP(78, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[79] = PINGROUP(79, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA,
+			0x7F010, 5),
+	[80] = PINGROUP(80, qup2, dac_calib19, NA, NA, NA, NA, NA, NA, NA,
+			0x7F000, 12),
+	[81] = PINGROUP(81, mdp_vsync, mdp_vsync, mdp_vsync, dac_calib20, NA,
+			NA, NA, NA, NA, 0x7F000, 13),
+	[82] = PINGROUP(82, qup0, dac_calib21, NA, pwm_6, NA, NA, NA, NA, NA,
+			0, -1),
+	[83] = PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F000, 14),
+	[84] = PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F000, 15),
+	[85] = PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F004, 0),
+	[86] = PINGROUP(86, qup0, GCC_GP1, atest_bbrx1, NA, NA, NA, NA, NA, NA,
+			0x7F004, 1),
+	[87] = PINGROUP(87, pbs11, qdss_gpio11, NA, NA, NA, NA, NA, NA, NA,
+			0x7F00C, 3),
+	[88] = PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F00C, 4),
+	[89] = PINGROUP(89, usb_phy, atest_bbrx0, NA, pwm_7, NA, NA, NA, NA,
+			NA, 0x7F004, 2),
+	[90] = PINGROUP(90, mss_lte, pbs12, qdss_gpio12, NA, NA, NA, NA, NA,
+			NA, 0, -1),
+	[91] = PINGROUP(91, mss_lte, pbs13, qdss_gpio13, NA, NA, NA, NA, NA,
+			NA, 0x7F00C, 5),
+	[92] = PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[93] = PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F004, 3),
+	[94] = PINGROUP(94, NA, qdss_gpio14, wlan1_adc0, NA, NA, NA, NA, NA,
+			NA, 0x7F004, 4),
+	[95] = PINGROUP(95, NAV_GPIO, GP_PDM0, qdss_gpio15, wlan1_adc1, NA, NA,
+			NA, NA, NA, 0x7F004, 5),
+	[96] = PINGROUP(96, qup4, NAV_GPIO, mdp_vsync, GP_PDM1, sd_write,
+			JITTER_BIST, qdss_cti, qdss_cti, NA, 0x7F004, 6),
+	[97] = PINGROUP(97, qup4, NAV_GPIO, mdp_vsync, GP_PDM2, JITTER_BIST,
+			qdss_cti, qdss_cti, NA, NA, 0x7F004, 7),
+	[98] = PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[99] = PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F010, 6),
+	[100] = PINGROUP(100, atest_gpsadc_dtest0_native, NA, NA, NA, NA, NA,
+			 NA, NA, NA, 0, -1),
+	[101] = PINGROUP(101, atest_gpsadc_dtest1_native, NA, NA, NA, NA, NA,
+			 NA, NA, NA, 0x7F010, 7),
+	[102] = PINGROUP(102, NA, phase_flag28, dac_calib22, ddr_pxi2, NA, NA,
+			 NA, NA, NA, 0x7F010, 8),
+	[103] = PINGROUP(103, NA, phase_flag29, dac_calib23, ddr_pxi2, NA, NA,
+			 NA, NA, NA, 0x7F010, 9),
+	[104] = PINGROUP(104, NA, phase_flag30, qdss_gpio1, dac_calib24,
+			 ddr_pxi3, NA, pwm_8, NA, NA, 0x7F010, 10),
+	[105] = PINGROUP(105, NA, phase_flag31, qdss_gpio, dac_calib25,
+			 ddr_pxi3, NA, NA, NA, NA, 0x7F010, 11),
+	[106] = PINGROUP(106, NAV_GPIO, GCC_GP3, qdss_gpio, NA, NA, NA, NA, NA,
+			 NA, 0x7F010, 12),
+	[107] = PINGROUP(107, NAV_GPIO, GCC_GP2, qdss_gpio0, NA, NA, NA, NA,
+			 NA, NA, 0x7F010, 13),
+	[108] = PINGROUP(108, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[109] = PINGROUP(109, NA, qdss_gpio2, NA, NA, NA, NA, NA, NA, NA,
+			 0x7F010, 14),
+	[110] = PINGROUP(110, NA, qdss_gpio3, NA, NA, NA, NA, NA, NA, NA,
+			 0, -1),
+	[111] = PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[112] = PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F010, 15),
+	[113] = PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[114] = PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[115] = PINGROUP(115, NA, pwm_9, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[117] = PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[118] = PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[119] = PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[120] = PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F004, 8),
+	[121] = PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[122] = PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x7F004, 9),
+	[123] = PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[124] = PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[125] = PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+	[127] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x84004, 0, 0),
+	[128] = SDC_QDSD_PINGROUP(sdc1_clk, 0x84000, 13, 6),
+	[129] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x84000, 11, 3),
+	[130] = SDC_QDSD_PINGROUP(sdc1_data, 0x84000, 9, 0),
+	[131] = SDC_QDSD_PINGROUP(sdc2_clk, 0x86000, 14, 6),
+	[132] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x86000, 11, 3),
+	[133] = SDC_QDSD_PINGROUP(sdc2_data, 0x86000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data scuba_pinctrl = {
+	.pins = scuba_pins,
+	.npins = ARRAY_SIZE(scuba_pins),
+	.functions = scuba_functions,
+	.nfunctions = ARRAY_SIZE(scuba_functions),
+	.groups = scuba_groups,
+	.ngroups = ARRAY_SIZE(scuba_groups),
+	.ngpios = 127,
+};
+
+static int scuba_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &scuba_pinctrl);
+}
+
+static const struct of_device_id scuba_pinctrl_of_match[] = {
+	{ .compatible = "qcom,scuba-pinctrl", },
+	{ },
+};
+
+static struct platform_driver scuba_pinctrl_driver = {
+	.driver = {
+		.name = "scuba-pinctrl",
+		.of_match_table = scuba_pinctrl_of_match,
+	},
+	.probe = scuba_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init scuba_pinctrl_init(void)
+{
+	return platform_driver_register(&scuba_pinctrl_driver);
+}
+arch_initcall(scuba_pinctrl_init);
+
+static void __exit scuba_pinctrl_exit(void)
+{
+	platform_driver_unregister(&scuba_pinctrl_driver);
+}
+module_exit(scuba_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI scuba pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, scuba_pinctrl_of_match);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 0d9a2c7..e7e82e0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3554,7 +3554,7 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 	/* Set the exception path to AP */
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		ep_idx = ipa3_get_ep_mapping(client_idx);
-		if (ep_idx == -1)
+		if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
 			continue;
 
 		/* disable statuses for all modem controlled prod pipes */
@@ -5785,6 +5785,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 		goto fail_dma_task;
 	}
 
+	result = ipa3_allocate_coal_close_frame();
+	if (result) {
+		IPAERR("failed to allocate coal frame cmd\n");
+		goto fail_coal_frame;
+	}
+
 	if (ipa3_nat_ipv6ct_init_devices()) {
 		IPAERR("unable to init NAT and IPv6CT devices\n");
 		result = -ENODEV;
@@ -5989,6 +5995,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 fail_allok_pkt_init:
 	ipa3_nat_ipv6ct_destroy_devices();
 fail_nat_ipv6ct_init_dev:
+	ipa3_free_coal_close_frame();
+fail_coal_frame:
 	ipa3_free_dma_task_for_gsi();
 fail_dma_task:
 fail_init_hw:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index e342024..82a0e54 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2019,7 +2019,6 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 			rx_pkt = sys->repl->cache[curr_wq];
 			curr_wq = (++curr_wq == sys->repl->capacity) ?
 								 0 : curr_wq;
-			atomic_set(&sys->repl->head_idx, curr_wq);
 		}
 
 		dma_sync_single_for_device(ipa3_ctx->pdev,
@@ -2057,6 +2056,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 	if (likely(ret == GSI_STATUS_SUCCESS)) {
 		/* ensure write is done before setting head index */
 		mb();
+		atomic_set(&sys->repl->head_idx, curr_wq);
 		atomic_set(&sys->page_recycle_repl->head_idx, curr);
 		sys->len = rx_len_cached;
 	} else {
@@ -2809,8 +2809,14 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 		case IPAHAL_PKT_STATUS_OPCODE_PACKET:
 		case IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET:
 		case IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS:
-		case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
 			break;
+		case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
+			IPAERR_RL("Frag packets received on lan consumer\n");
+			IPAERR_RL("STATUS opcode=%d src=%d dst=%d src ip=%x\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.src_ip_addr);
+			skb_pull(skb, pkt_status_sz);
+			continue;
 		default:
 			IPAERR_RL("unsupported opcode(%d)\n",
 				status.status_opcode);
@@ -3262,7 +3268,13 @@ static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
 
 static void ipa3_recycle_rx_page_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
 {
-	/* no-op */
+	struct ipa_rx_page_data rx_page;
+
+	rx_page = rx_pkt->page_data;
+
+	/* Free rx_wrapper only for tmp alloc pages*/
+	if (rx_page.is_tmp_alloc)
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 }
 
 /**
@@ -4974,15 +4986,22 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
 	}
 	cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
 	/* call repl_hdlr before napi_reschedule / napi_complete */
-	if (cnt)
-		ep->sys->repl_hdlr(ep->sys);
-	if (cnt < weight) {
+	ep->sys->repl_hdlr(ep->sys);
+
+	/* When not able to replenish enough descriptors pipe wait
+	 * until minimum number descripotrs to replish.
+	 */
+	if (cnt < weight && ep->sys->len > IPA_DEFAULT_SYS_YELLOW_WM) {
 		napi_complete(ep->sys->napi_obj);
 		ret = ipa3_rx_switch_to_intr_mode(ep->sys);
 		if (ret == -GSI_STATUS_PENDING_IRQ &&
 				napi_reschedule(ep->sys->napi_obj))
 			goto start_poll;
 		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
+	} else {
+		cnt = weight;
+		IPADBG_LOW("Client = %d not replenished free descripotrs\n",
+				ep->client);
 	}
 	return cnt;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 5f319f2..a6bdbcb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1758,6 +1758,7 @@ struct ipa3_pc_mbox_data {
  * @lan_rx_napi_enable: flag if NAPI is enabled on the LAN dp
  * @lan_ndev: dummy netdev for LAN rx NAPI
  * @napi_lan_rx: NAPI object for LAN rx
+ * @coal_cmd_pyld: holds the coslescing close frame command payload
  */
 struct ipa3_context {
 	struct ipa3_char_device_context cdev;
@@ -1929,6 +1930,7 @@ struct ipa3_context {
 	bool lan_rx_napi_enable;
 	struct net_device lan_ndev;
 	struct napi_struct napi_lan_rx;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld;
 };
 
 struct ipa3_plat_drv_res {
@@ -3013,6 +3015,8 @@ void ipa3_disable_prefetch(enum ipa_client_type client);
 int ipa3_alloc_common_event_ring(void);
 int ipa3_allocate_dma_task_for_gsi(void);
 void ipa3_free_dma_task_for_gsi(void);
+int ipa3_allocate_coal_close_frame(void);
+void ipa3_free_coal_close_frame(void);
 int ipa3_set_clock_plan_from_pm(int idx);
 void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
 int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
@@ -3111,4 +3115,10 @@ static inline void *alloc_and_init(u32 size, u32 init_val)
 bool ipa3_is_apq(void);
 /* check if odl is connected */
 bool ipa3_is_odl_connected(void);
+
+int ipa3_uc_send_enable_flow_control(uint16_t gsi_chid,
+	uint16_t redMarkerThreshold);
+int ipa3_uc_send_disable_flow_control(void);
+int ipa3_uc_send_update_flow_control(uint32_t bitmask,
+	uint8_t  add_delete);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 02895e7fc..7be84f2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -55,8 +55,8 @@
 
 #define IPA_MPM_MAX_MHIP_CHAN 3
 
-#define IPA_MPM_NUM_RING_DESC 74
-#define IPA_MPM_RING_LEN (IPA_MPM_NUM_RING_DESC - 10)
+#define IPA_MPM_NUM_RING_DESC 6
+#define IPA_MPM_RING_LEN IPA_MPM_NUM_RING_DESC
 
 #define IPA_MPM_MHI_HOST_UL_CHANNEL 4
 #define IPA_MPM_MHI_HOST_DL_CHANNEL  5
@@ -72,6 +72,8 @@
 #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
 #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
 #define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
+#define IPA_MPM_FLOW_CTRL_ADD 1
+#define IPA_MPM_FLOW_CTRL_DELETE 0
 
 enum mhip_re_type {
 	MHIP_RE_XFER = 0x2,
@@ -394,7 +396,9 @@ struct ipa_mpm_context {
 	atomic_t probe_cnt;
 	atomic_t pcie_clk_total_cnt;
 	atomic_t ipa_clk_total_cnt;
+	atomic_t flow_ctrl_mask;
 	atomic_t adpl_over_usb_available;
+	atomic_t adpl_over_odl_available;
 	struct device *parent_pdev;
 	struct ipa_smmu_cb_ctx carved_smmu_cb;
 	struct device *mhi_parent_dev;
@@ -686,7 +690,7 @@ static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
 
 		cb->next_addr -= size_p;
 		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
-			IPA_MPM_RING_TOTAL_SIZE, dir);
+			size_p, dir);
 	} else {
 		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
 			IPA_MPM_RING_TOTAL_SIZE, dir);
@@ -1265,12 +1269,12 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 	if (IPA_CLIENT_IS_PROD(mhip_client)) {
 		ipa_mpm_smmu_unmap(
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
-			IPA_MPM_PAGE_SIZE, dir,
+			PAGE_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
 
 		ipa_mpm_smmu_unmap(
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
-			IPA_MPM_PAGE_SIZE, dir,
+			PAGE_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
 
 		if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va) {
@@ -1290,11 +1294,11 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 	} else {
 		ipa_mpm_smmu_unmap(
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
-			IPA_MPM_PAGE_SIZE, dir,
+			PAGE_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
 		ipa_mpm_smmu_unmap(
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
-			IPA_MPM_PAGE_SIZE, dir,
+			PAGE_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
 
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = 0;
@@ -1454,7 +1458,8 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 
 	if (vote == CLK_ON) {
 		result = mhi_device_get_sync(
-			ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
+			ipa_mpm_ctx->md[probe_id].mhi_dev,
+				MHI_VOTE_BUS | MHI_VOTE_DEVICE);
 		if (result) {
 			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
 				result, probe_id);
@@ -1475,7 +1480,8 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 			*is_acted = true;
 			return 0;
 		}
-		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
+		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev,
+				MHI_VOTE_BUS | MHI_VOTE_DEVICE);
 		IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
 		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
 		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
@@ -1759,6 +1765,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 	int ret = 0;
 	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
 	bool is_acted = true;
+	const struct ipa_gsi_ep_config *ep_cfg;
+	uint32_t flow_ctrl_mask = 0;
 
 	if (!state)
 		return -EPERM;
@@ -1816,6 +1824,35 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		}
 		IPA_MPM_DBG("MHIP remote channels are started\n");
 
+		 /*
+		  * Update flow control monitoring end point info.
+		  * This info will be used to set delay on the end points upon
+		  * hitting RED water mark.
+		  */
+		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_WLAN2_PROD);
+
+		if (!ep_cfg)
+			IPA_MPM_ERR("ep = %d not allocated yet\n",
+					IPA_CLIENT_WLAN2_PROD);
+		else
+			flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
+
+		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_USB_PROD);
+
+		if (!ep_cfg)
+			IPA_MPM_ERR("ep = %d not allocated yet\n",
+					IPA_CLIENT_USB_PROD);
+		else
+			flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
+
+		atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, flow_ctrl_mask);
+
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+						IPA_MPM_FLOW_CTRL_ADD);
+
+		if (ret)
+			IPA_MPM_ERR("Err = %d setting uc flow control\n", ret);
+
 		status = ipa_mpm_start_stop_mhip_chan(
 				IPA_MPM_MHIP_CHAN_UL, probe_id, MPM_MHIP_START);
 		switch (status) {
@@ -1854,6 +1891,23 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
 	} else {
 		/*
+		 * Update flow control monitoring end point info.
+		 * This info will be used to reset delay on the end points.
+		 */
+		flow_ctrl_mask =
+			atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
+
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+						IPA_MPM_FLOW_CTRL_DELETE);
+		flow_ctrl_mask = 0;
+		atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
+
+		if (ret) {
+			IPA_MPM_ERR("Err = %d resetting uc flow control\n",
+					ret);
+			ipa_assert();
+		}
+		/*
 		 * Make sure to stop Device side channels before
 		 * stopping Host side UL channels. This is to make
 		 * sure device side doesn't access host IPA after
@@ -2010,6 +2064,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	u32 wp_addr;
 	int pipe_idx;
 	bool is_acted = true;
+	uint64_t flow_ctrl_mask = 0;
+	bool add_delete = false;
 
 	IPA_MPM_FUNC_ENTRY();
 
@@ -2388,16 +2444,44 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	}
 
 	atomic_inc(&ipa_mpm_ctx->probe_cnt);
-	/* Check if ODL pipe is connected to MHIP DPL pipe before probe */
-	if (probe_id == IPA_MPM_MHIP_CH_ID_2 &&
-		ipa3_is_odl_connected()) {
-		IPA_MPM_DBG("setting DPL DMA to ODL\n");
-		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
-			IPA_CLIENT_USB_DPL_CONS, false);
+	/* Check if ODL/USB DPL pipe is connected before probe */
+	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
+		if (ipa3_is_odl_connected())
+			ret = ipa_mpm_set_dma_mode(
+				IPA_CLIENT_MHI_PRIME_DPL_PROD,
+				IPA_CLIENT_ODL_DPL_CONS, false);
+		else if (atomic_read(&ipa_mpm_ctx->adpl_over_usb_available))
+			ret = ipa_mpm_set_dma_mode(
+				IPA_CLIENT_MHI_PRIME_DPL_PROD,
+				IPA_CLIENT_USB_DPL_CONS, false);
+		if (ret)
+			IPA_MPM_ERR("DPL DMA to ODL/USB failed, ret = %d\n",
+				ret);
 	}
 	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	ipa_mpm_ctx->md[probe_id].init_complete = true;
 	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	/* Update Flow control Monitoring, only for the teth UL Prod pipes */
+	if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		ret = ipa3_uc_send_enable_flow_control(ep->gsi_chan_hdl,
+			IPA_MPM_RING_LEN / 4);
+		if (ret) {
+			IPA_MPM_ERR("Err %d flow control enable\n", ret);
+			goto fail_flow_control;
+		}
+		IPA_MPM_DBG("Flow Control enabled for %d", probe_id);
+		flow_ctrl_mask = atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
+		add_delete = flow_ctrl_mask > 0 ? 1 : 0;
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+							add_delete);
+		if (ret) {
+			IPA_MPM_ERR("Err %d flow control update\n", ret);
+			goto fail_flow_control;
+		}
+		IPA_MPM_DBG("Flow Control updated for %d", probe_id);
+	}
 	IPA_MPM_FUNC_EXIT();
 	return 0;
 
@@ -2405,6 +2489,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 fail_start_channel:
 fail_stop_channel:
 fail_smmu:
+fail_flow_control:
 	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
 		IPA_MPM_DBG("SMMU failed\n");
 	if (is_acted)
@@ -2474,6 +2559,9 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
 	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
 	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
 
+	if (mhip_idx == IPA_MPM_MHIP_CH_ID_0)
+		ipa3_uc_send_disable_flow_control();
+
 	ipa_mpm_mhip_shutdown(mhip_idx);
 
 	atomic_dec(&ipa_mpm_ctx->probe_cnt);
@@ -2768,19 +2856,25 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 	 * sure device side doesn't access host side IPA if
 	 * Host IPA gets unvoted.
 	 */
-	ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
-						MPM_MHIP_STOP, false);
-	if (ret) {
-		/*
-		 * This can fail only when modem is in SSR state.
-		 * Eventually there would be a remove callback,
-		 * so return a failure.
-		 */
-		IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
-		return ret;
+
+	/* stop remote mhip-dpl ch if ODL not enable */
+	if ((!atomic_read(&ipa_mpm_ctx->adpl_over_odl_available))
+			|| (probe_id != IPA_MPM_MHIP_CH_ID_2)) {
+		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+							MPM_MHIP_STOP, false);
+		if (ret) {
+			/*
+			 * This can fail only when modem is in SSR state.
+			 * Eventually there would be a remove callback,
+			 * so return a failure.
+			 */
+			IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
+			return ret;
+		}
+		IPA_MPM_DBG("MHIP remote channels are stopped(id=%d)\n",
+			probe_id);
 	}
 
-	IPA_MPM_DBG("MHIP remote channels are stopped\n");
 
 	switch (mhip_client) {
 	case IPA_MPM_MHIP_USB_RMNET:
@@ -2975,6 +3069,7 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 
 	atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
 	atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
 
 	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
 		ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
@@ -3161,9 +3256,45 @@ int ipa3_mpm_enable_adpl_over_odl(bool enable)
 			return ret;
 		}
 
+		/* start remote mhip-dpl ch */
+		ret = ipa_mpm_start_stop_remote_mhip_chan(IPA_MPM_MHIP_CH_ID_2,
+					MPM_MHIP_START, false);
+		if (ret) {
+			/*
+			 * This can fail only when modem is in SSR state.
+			 * Eventually there would be a remove callback,
+			 * so return a failure. Dont have to unvote PCIE here.
+			 */
+			IPA_MPM_ERR("MHIP remote chan start fail = %d\n",
+					ret);
+			return ret;
+		}
+		IPA_MPM_DBG("MHIP remote channels are started(id=%d)\n",
+			IPA_MPM_MHIP_CH_ID_2);
+		atomic_set(&ipa_mpm_ctx->adpl_over_odl_available, 1);
+
 		ipa_mpm_change_teth_state(IPA_MPM_MHIP_CH_ID_2,
 			IPA_MPM_TETH_CONNECTED);
 	} else {
+		/* stop remote mhip-dpl ch if adpl not enable */
+		if (!atomic_read(&ipa_mpm_ctx->adpl_over_usb_available)) {
+			ret = ipa_mpm_start_stop_remote_mhip_chan(
+				IPA_MPM_MHIP_CH_ID_2, MPM_MHIP_STOP, false);
+			if (ret) {
+				/*
+				 * This can fail only when modem in SSR state.
+				 * Eventually there would be a remove callback,
+				 * so return a failure.
+				 */
+				IPA_MPM_ERR("MHIP remote chan stop fail = %d\n",
+					ret);
+				return ret;
+			}
+			IPA_MPM_DBG("MHIP remote channels are stopped(id=%d)\n",
+				IPA_MPM_MHIP_CH_ID_2);
+		}
+		atomic_set(&ipa_mpm_ctx->adpl_over_odl_available, 0);
+
 		/* dec clk count and set DMA to USB */
 		IPA_MPM_DBG("mpm disabling ADPL over ODL\n");
 		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 4b1ca88..6f2bfba 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1645,63 +1645,61 @@ static struct qmi_msg_handler server_handlers[] = {
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
 		.ei = ipa3_indication_reg_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(struct ipa_indication_reg_req_msg_v01),
 		.fn = ipa3_handle_indication_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
 		.ei = ipa3_install_fltr_rule_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_install_fltr_rule_req_msg_v01),
 		.fn = ipa3_handle_install_filter_rule_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
 		.ei = ipa3_fltr_installed_notif_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_fltr_installed_notif_req_msg_v01),
 		.fn = ipa3_handle_filter_installed_notify_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_CONFIG_REQ_V01,
 		.ei = ipa3_config_req_msg_data_v01_ei,
-		.decoded_size = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(struct ipa_config_req_msg_v01),
 		.fn = handle_ipa_config_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
 		.ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01),
 		.fn = ipa3_handle_modem_init_cmplt_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
 		.ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01),
 		.fn = ipa3_handle_modem_init_cmplt_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01,
 		.ei = ipa_mhi_alloc_channel_req_msg_v01_ei,
-		.decoded_size =
-			IPA_MHI_ALLOC_CHANNEL_REQ_MSG_V01_MAX_MSG_LEN,
+		.decoded_size = sizeof(
+			struct ipa_mhi_alloc_channel_req_msg_v01),
 		.fn = ipa3_handle_mhi_alloc_channel_req,
 	},
 	{
 		.type = QMI_REQUEST,
 		.msg_id = QMI_IPA_MHI_CLK_VOTE_REQ_V01,
 		.ei = ipa_mhi_clk_vote_req_msg_v01_ei,
-		.decoded_size =
-			IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN,
+		.decoded_size = sizeof(struct ipa_mhi_clk_vote_req_msg_v01),
 		.fn = ipa3_handle_mhi_vote_req,
 	},
 
@@ -1718,24 +1716,23 @@ static struct qmi_msg_handler client_handlers[] = {
 		.type = QMI_INDICATION,
 		.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01,
 		.ei = ipa3_data_usage_quota_reached_ind_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01),
 		.fn = ipa3_q6_clnt_quota_reached_ind_cb,
 	},
 	{
 		.type = QMI_INDICATION,
 		.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01,
 		.ei = ipa3_install_fltr_rule_req_msg_data_v01_ei,
-		.decoded_size =
-			QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01,
+		.decoded_size = sizeof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01),
 		.fn = ipa3_q6_clnt_install_firewall_rules_ind_cb,
 	},
 	{
 		.type = QMI_INDICATION,
 		.msg_id = QMI_IPA_BW_CHANGE_INDICATION_V01,
 		.ei = ipa_bw_change_ind_msg_v01_ei,
-		.decoded_size =
-			IPA_BW_CHANGE_IND_MSG_V01_MAX_MSG_LEN,
+		.decoded_size = sizeof(struct ipa_bw_change_ind_msg_v01),
 		.fn = ipa3_q6_clnt_bw_vhang_ind_cb,
 	},
 };
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index d110961..1edc56b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -45,6 +45,10 @@
  * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
  * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
  * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING:  Command to setup the event ring
+ * IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR: Command to enable pipe monitoring.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR: Command to update pipes to monitor.
+ * IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR: Command to disable pipe
+					monitoring, no parameter required.
  */
 enum ipa3_cpu_2_hw_commands {
 	IPA_CPU_2_HW_CMD_NO_OP                     =
@@ -73,6 +77,13 @@ enum ipa3_cpu_2_hw_commands {
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
 	IPA_CPU_2_HW_CMD_SETUP_EVENT_RING          =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 12),
+	IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 13),
+	IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 14),
+	IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 15),
+
 };
 
 /**
@@ -198,6 +209,36 @@ struct IpaHwDbAddrInfo_t {
 	uint32_t mboxN;
 } __packed;
 
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_ENABLE_PIPE_MONITOR
+ * command.
+ * @ipaProdGsiChid       IPA prod GSI chid to monitor
+ * @redMarkerThreshold   red marker threshold in elements for the GSI channel
+ */
+union IpaEnablePipeMonitorCmdData_t {
+	struct IpaEnablePipeMonitorCmdParams_t {
+		u32 ipaProdGsiChid:16;
+		u32 redMarkerThreshold:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_UPDATE_PIPE_MONITOR
+ * command.
+ *
+ * @bitmask      The parameter of bitmask to add/delete channels/pipes from
+ *                global monitoring pipemask
+ *                IPA pipe# bitmask or GSI chid bitmask
+ * add_delete   1: add pipes to monitor
+ *              0: delete pipes to monitor
+ */
+struct IpaUpdateFlowCtlMonitorData_t {
+	u32 bitmask;
+	u8 add_delete;
+};
+
 static DEFINE_MUTEX(uc_loaded_nb_lock);
 static BLOCKING_NOTIFIER_HEAD(uc_loaded_notifier);
 
@@ -321,6 +362,7 @@ static void ipa3_uc_save_dbg_stats(u32 size)
 		break;
 	default:
 		IPAERR("unknown protocols %d\n", prot_id);
+		goto unmap;
 	}
 	return;
 unmap:
@@ -1513,3 +1555,65 @@ int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
 
 	return 0;
 }
+
+int ipa3_uc_send_enable_flow_control(uint16_t gsi_chid,
+		uint16_t redMarkerThreshold)
+{
+
+	int res;
+	union IpaEnablePipeMonitorCmdData_t cmd;
+
+	cmd.params.ipaProdGsiChid = gsi_chid;
+	cmd.params.redMarkerThreshold = redMarkerThreshold;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd((cmd.raw32b),
+		IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail to enable flow ctrl for 0x%x\n",
+			cmd.params.ipaProdGsiChid);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_send_disable_flow_control(void)
+{
+	int res;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd(0,
+		IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail to disable flow control\n");
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_send_update_flow_control(uint32_t bitmask,
+		 uint8_t  add_delete)
+{
+	int res;
+
+	if (bitmask == 0) {
+		IPAERR("Err update flow control, mask = 0\n");
+		return 0;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd_64b_param(bitmask, add_delete,
+		IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail flowCtrl update mask = 0x%x add_del = 0x%x\n",
+			bitmask, add_delete);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c9f84cf..d5918c4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3202,21 +3202,21 @@ static struct ipa3_mem_partition ipa_4_7_mem_part = {
 	.apps_hdr_size			= 0x200,
 	.apps_hdr_size_ddr		= 0x800,
 	.modem_hdr_proc_ctx_ofst	= 0xad0,
-	.modem_hdr_proc_ctx_size	= 0x200,
-	.apps_hdr_proc_ctx_ofst		= 0xcd0,
+	.modem_hdr_proc_ctx_size	= 0xAC0,
+	.apps_hdr_proc_ctx_ofst		= 0x1590,
 	.apps_hdr_proc_ctx_size		= 0x200,
 	.apps_hdr_proc_ctx_size_ddr	= 0x0,
-	.nat_tbl_ofst			= 0xee0,
+	.nat_tbl_ofst			= 0x17A0,
 	.nat_tbl_size			= 0x800,
-	.nat_index_tbl_ofst		= 0x16e0,
+	.nat_index_tbl_ofst		= 0x1FA0,
 	.nat_index_tbl_size		= 0x100,
-	.nat_exp_tbl_ofst		= 0x17e0,
+	.nat_exp_tbl_ofst		= 0x20A0,
 	.nat_exp_tbl_size		= 0x400,
-	.pdn_config_ofst		= 0x1be8,
+	.pdn_config_ofst		= 0x24A8,
 	.pdn_config_size		= 0x50,
-	.stats_quota_ofst		= 0x1c40,
+	.stats_quota_ofst		= 0x2500,
 	.stats_quota_size		= 0x78,
-	.stats_tethering_ofst		= 0x1cb8,
+	.stats_tethering_ofst		= 0x2578,
 	.stats_tethering_size		= 0x238,
 	.stats_flt_v4_ofst		= 0,
 	.stats_flt_v4_size		= 0,
@@ -3226,29 +3226,29 @@ static struct ipa3_mem_partition ipa_4_7_mem_part = {
 	.stats_rt_v4_size		= 0,
 	.stats_rt_v6_ofst		= 0,
 	.stats_rt_v6_size		= 0,
-	.stats_fnr_ofst			= 0x1ef0,
+	.stats_fnr_ofst			= 0x27B0,
 	.stats_fnr_size			= 0x0,
-	.stats_drop_ofst		= 0x1ef0,
+	.stats_drop_ofst		= 0x27B0,
 	.stats_drop_size		= 0x20,
 	.modem_comp_decomp_ofst		= 0x0,
 	.modem_comp_decomp_size		= 0x0,
-	.modem_ofst			= 0x1f18,
-	.modem_size			= 0x100c,
-	.apps_v4_flt_hash_ofst	= 0x1f18,
+	.modem_ofst			= 0x27D8,
+	.modem_size			= 0x800,
+	.apps_v4_flt_hash_ofst	= 0x27B0,
 	.apps_v4_flt_hash_size	= 0x0,
-	.apps_v4_flt_nhash_ofst	= 0x1f18,
+	.apps_v4_flt_nhash_ofst	= 0x27B0,
 	.apps_v4_flt_nhash_size	= 0x0,
-	.apps_v6_flt_hash_ofst	= 0x1f18,
+	.apps_v6_flt_hash_ofst	= 0x27B0,
 	.apps_v6_flt_hash_size	= 0x0,
-	.apps_v6_flt_nhash_ofst	= 0x1f18,
+	.apps_v6_flt_nhash_ofst	= 0x27B0,
 	.apps_v6_flt_nhash_size	= 0x0,
-	.apps_v4_rt_hash_ofst	= 0x1f18,
+	.apps_v4_rt_hash_ofst	= 0x27B0,
 	.apps_v4_rt_hash_size	= 0x0,
-	.apps_v4_rt_nhash_ofst	= 0x1f18,
+	.apps_v4_rt_nhash_ofst	= 0x27B0,
 	.apps_v4_rt_nhash_size	= 0x0,
-	.apps_v6_rt_hash_ofst	= 0x1f18,
+	.apps_v6_rt_hash_ofst	= 0x27B0,
 	.apps_v6_rt_hash_size	= 0x0,
-	.apps_v6_rt_nhash_ofst	= 0x1f18,
+	.apps_v6_rt_nhash_ofst	= 0x27B0,
 	.apps_v6_rt_nhash_size	= 0x0,
 	.uc_descriptor_ram_ofst	= 0x3000,
 	.uc_descriptor_ram_size	= 0x0000,
@@ -7782,9 +7782,6 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 
 void ipa3_force_close_coal(void)
 {
-	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
-	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
-	struct ipahal_reg_valmask valmask;
 	struct ipa3_desc desc;
 	int ep_idx;
 
@@ -7792,28 +7789,11 @@ void ipa3_force_close_coal(void)
 	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
 		return;
 
-	reg_write_cmd.skip_pipeline_clear = false;
-	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
-	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
-	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
-	reg_write_cmd.value = valmask.val;
-	reg_write_cmd.value_mask = valmask.mask;
-	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
-		&reg_write_cmd, false);
-	if (!cmd_pyld) {
-		IPAERR("fail construct register_write imm cmd\n");
-		ipa_assert();
-		return;
-	}
-	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->coal_cmd_pyld);
 
 	IPADBG("Sending 1 descriptor for coal force close\n");
-	if (ipa3_send_cmd_timeout(1, &desc,
-		IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC)) {
-		IPAERR("ipa3_send_cmd failed\n");
-		ipa_assert();
-	}
-	ipahal_destroy_imm_cmd(cmd_pyld);
+	if (ipa3_send_cmd(1, &desc))
+		IPADBG("ipa3_send_cmd timedout\n");
 }
 
 int ipa3_suspend_apps_pipes(bool suspend)
@@ -7942,6 +7922,39 @@ void ipa3_free_dma_task_for_gsi(void)
 	memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
 }
 
+int ipa3_allocate_coal_close_frame(void)
+{
+	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED)
+		return 0;
+	IPADBG("Allocate coal close frame cmd\n");
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	ipa3_ctx->coal_cmd_pyld =
+		ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_cmd, false);
+	if (!ipa3_ctx->coal_cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
+		ipa_assert();
+		return 0;
+	}
+
+	return 0;
+}
+
+void ipa3_free_coal_close_frame(void)
+{
+	if (ipa3_ctx->coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld);
+}
 /**
  * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
  *
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 985eae8..0bef801 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -861,7 +861,8 @@ struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
 
 #define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
 	(status->status_mask |= \
-		((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
+		((hw_status->ipa_pkt.status_mask & (__hw_bit_msk) ? 1 : 0) \
+					<< (__shft)))
 
 static enum ipahal_pkt_status_exception pkt_status_parse_exception(
 	bool is_ipv6, u64 exception)
@@ -905,47 +906,81 @@ static enum ipahal_pkt_status_exception pkt_status_parse_exception(
 	return exception_type;
 }
 
+static void __ipa_parse_gen_pkt(struct ipahal_pkt_status *status,
+				const void *unparsed_status)
+{
+	bool is_ipv6;
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+
+	is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
+	status->pkt_len = hw_status->ipa_pkt.pkt_len;
+	status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
+	status->endp_dest_idx = hw_status->ipa_pkt.endp_dest_idx;
+	status->metadata = hw_status->ipa_pkt.metadata;
+	status->flt_local = hw_status->ipa_pkt.flt_local;
+	status->flt_hash = hw_status->ipa_pkt.flt_hash;
+	status->flt_global = hw_status->ipa_pkt.flt_hash;
+	status->flt_ret_hdr = hw_status->ipa_pkt.flt_ret_hdr;
+	status->flt_miss = (hw_status->ipa_pkt.rt_rule_id ==
+			IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
+	status->flt_rule_id = hw_status->ipa_pkt.flt_rule_id;
+	status->rt_local = hw_status->ipa_pkt.rt_local;
+	status->rt_hash = hw_status->ipa_pkt.rt_hash;
+	status->ucp = hw_status->ipa_pkt.ucp;
+	status->rt_tbl_idx = hw_status->ipa_pkt.rt_tbl_idx;
+	status->rt_miss = (hw_status->ipa_pkt.rt_rule_id ==
+			IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
+	status->rt_rule_id = hw_status->ipa_pkt.rt_rule_id;
+	status->nat_hit = hw_status->ipa_pkt.nat_hit;
+	status->nat_entry_idx = hw_status->ipa_pkt.nat_entry_idx;
+	status->tag_info = hw_status->ipa_pkt.tag_info;
+	status->seq_num = hw_status->ipa_pkt.seq_num;
+	status->time_of_day_ctr = hw_status->ipa_pkt.time_of_day_ctr;
+	status->hdr_local = hw_status->ipa_pkt.hdr_local;
+	status->hdr_offset = hw_status->ipa_pkt.hdr_offset;
+	status->frag_hit = hw_status->ipa_pkt.frag_hit;
+	status->frag_rule = hw_status->ipa_pkt.frag_rule;
+	status->nat_type = hw_status->ipa_pkt.nat_type;
+
+	status->exception = pkt_status_parse_exception(is_ipv6,
+			hw_status->ipa_pkt.exception);
+}
+
+static void __ipa_parse_frag_pkt(struct ipahal_pkt_status *status,
+				const void *unparsed_status)
+{
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+
+	status->frag_rule_idx = hw_status->frag_pkt.frag_rule_idx;
+	status->tbl_idx = hw_status->frag_pkt.tbl_idx;
+	status->src_ip_addr = hw_status->frag_pkt.src_ip_addr;
+	status->dest_ip_addr = hw_status->frag_pkt.dest_ip_addr;
+	status->protocol = hw_status->frag_pkt.protocol;
+	status->ip_id = hw_status->frag_pkt.ip_id;
+	status->tlated_ip_addr = hw_status->frag_pkt.tlated_ip_addr;
+	status->ip_cksum_diff = hw_status->frag_pkt.ip_cksum_diff;
+	status->endp_src_idx = hw_status->frag_pkt.endp_src_idx;
+	status->endp_dest_idx = hw_status->frag_pkt.endp_dest_idx;
+	status->metadata = hw_status->frag_pkt.metadata;
+	status->seq_num = hw_status->frag_pkt.seq_num;
+	status->hdr_local = hw_status->frag_pkt.hdr_local;
+	status->hdr_offset = hw_status->frag_pkt.hdr_offset;
+	status->exception = hw_status->frag_pkt.exception;
+	status->nat_type = hw_status->frag_pkt.nat_type;
+}
 
 static void ipa_pkt_status_parse(
 	const void *unparsed_status, struct ipahal_pkt_status *status)
 {
 	enum ipahal_pkt_status_opcode opcode = 0;
-	bool is_ipv6;
 
-	struct ipa_pkt_status_hw *hw_status =
-		(struct ipa_pkt_status_hw *)unparsed_status;
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
 
-	is_ipv6 = (hw_status->status_mask & 0x80) ? false : true;
 
-	status->pkt_len = hw_status->pkt_len;
-	status->endp_src_idx = hw_status->endp_src_idx;
-	status->endp_dest_idx = hw_status->endp_dest_idx;
-	status->metadata = hw_status->metadata;
-	status->flt_local = hw_status->flt_local;
-	status->flt_hash = hw_status->flt_hash;
-	status->flt_global = hw_status->flt_hash;
-	status->flt_ret_hdr = hw_status->flt_ret_hdr;
-	status->flt_miss = (hw_status->rt_rule_id ==
-		IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
-	status->flt_rule_id = hw_status->flt_rule_id;
-	status->rt_local = hw_status->rt_local;
-	status->rt_hash = hw_status->rt_hash;
-	status->ucp = hw_status->ucp;
-	status->rt_tbl_idx = hw_status->rt_tbl_idx;
-	status->rt_miss = (hw_status->rt_rule_id ==
-		IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
-	status->rt_rule_id = hw_status->rt_rule_id;
-	status->nat_hit = hw_status->nat_hit;
-	status->nat_entry_idx = hw_status->nat_entry_idx;
-	status->tag_info = hw_status->tag_info;
-	status->seq_num = hw_status->seq_num;
-	status->time_of_day_ctr = hw_status->time_of_day_ctr;
-	status->hdr_local = hw_status->hdr_local;
-	status->hdr_offset = hw_status->hdr_offset;
-	status->frag_hit = hw_status->frag_hit;
-	status->frag_rule = hw_status->frag_rule;
-
-	switch (hw_status->status_opcode) {
+	switch (hw_status->ipa_pkt.status_opcode) {
 	case 0x1:
 		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
 		break;
@@ -969,11 +1004,17 @@ static void ipa_pkt_status_parse(
 		break;
 	default:
 		IPAHAL_ERR_RL("unsupported Status Opcode 0x%x\n",
-			hw_status->status_opcode);
+			hw_status->ipa_pkt.status_opcode);
 	}
+
 	status->status_opcode = opcode;
 
-	switch (hw_status->nat_type) {
+	if (status->status_opcode == IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE)
+		__ipa_parse_frag_pkt(status, unparsed_status);
+	else
+		__ipa_parse_gen_pkt(status, unparsed_status);
+
+	switch (status->nat_type) {
 	case 0:
 		status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
 		break;
@@ -985,10 +1026,8 @@ static void ipa_pkt_status_parse(
 		break;
 	default:
 		IPAHAL_ERR_RL("unsupported Status NAT type 0x%x\n",
-			hw_status->nat_type);
+			status->nat_type);
 	}
-	status->exception = pkt_status_parse_exception(is_ipv6,
-						hw_status->exception);
 
 	IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
 	IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
@@ -1023,11 +1062,11 @@ static void ipa_pkt_status_parse(
 static void ipa_pkt_status_parse_thin(const void *unparsed_status,
 	struct ipahal_pkt_status_thin *status)
 {
-	struct ipa_pkt_status_hw *hw_status =
-		(struct ipa_pkt_status_hw *)unparsed_status;
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
 	bool is_ipv6;
 
-	is_ipv6 = (hw_status->status_mask & 0x80) ? false : true;
+	is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
 	if (!unparsed_status || !status) {
 		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
 			unparsed_status, status);
@@ -1035,11 +1074,11 @@ static void ipa_pkt_status_parse_thin(const void *unparsed_status,
 	}
 
 	IPAHAL_DBG_LOW("Parse Thin Status Packet\n");
-	status->metadata = hw_status->metadata;
-	status->endp_src_idx = hw_status->endp_src_idx;
-	status->ucp = hw_status->ucp;
+	status->metadata = hw_status->ipa_pkt.metadata;
+	status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
+	status->ucp = hw_status->ipa_pkt.ucp;
 	status->exception = pkt_status_parse_exception(is_ipv6,
-						hw_status->exception);
+						hw_status->ipa_pkt.exception);
 }
 
 /*
@@ -1102,7 +1141,7 @@ static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
 	 * add a compile time validty check for it like below (as well as
 	 * the new defines and/or the new strucutre in the internal header).
 	 */
-	BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
+	BUILD_BUG_ON(sizeof(union ipa_pkt_status_hw) !=
 		IPA3_0_PKT_STATUS_SIZE);
 
 	memset(&zero_obj, 0, sizeof(zero_obj));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index d43af78..88db03f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -555,6 +555,14 @@ enum ipahal_pkt_status_nat_type {
  * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
  * @seq_num: Per source endp unique packet sequence number
  * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @frag_rule_idx: Frag rule index value.
+ * @tbl_idx: Table index valid or not.
+ * @src_ip_addr: Source packet IP address.
+ * @dest_ip_addr: Destination packet IP address.
+ * @protocol: Protocal number.
+ * @ip_id: IP packet IP ID number.
+ * @tlated_ip_addr: IP address.
+ * @ip_cksum_diff: IP packet checksum difference.
  */
 struct ipahal_pkt_status {
 	u64 tag_info;
@@ -586,6 +594,15 @@ struct ipahal_pkt_status {
 	u8 rt_tbl_idx;
 	u8 seq_num;
 	u8 frag_rule;
+	u8 frag_rule_idx;
+	bool tbl_idx;
+	u32 src_ip_addr;
+	u32 dest_ip_addr;
+	u8 protocol;
+	u16 ip_id;
+	u32 tlated_ip_addr;
+	u16 ip_cksum_diff;
+
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 3625e7e..dd88ce8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -566,7 +566,7 @@ struct ipa_imm_cmd_hw_dma_task_32b_addr {
  * @frag_rule: Frag rule index in H/W frag table in case of frag hit
  * @hw_specific: H/W specific reserved value
  */
-struct ipa_pkt_status_hw {
+struct ipa_gen_pkt_status_hw {
 	u64 status_opcode:8;
 	u64 exception:8;
 	u64 status_mask:16;
@@ -597,7 +597,68 @@ struct ipa_pkt_status_hw {
 	u64 frag_hit:1;
 	u64 frag_rule:4;
 	u64 hw_specific:16;
-};
+} __packed;
+
+/*
+ * struct ipa_frag_pkt_status_hw - IPA status packet payload in H/W format.
+ *  This structure describes the frag status packet H/W structure for the
+ *   following statuses: IPA_NEW_FRAG_RULE.
+ * @status_opcode: The Type of the status (Opcode).
+ * @frag_rule_idx: Frag rule index value.
+ * @rsvd1: reserved
+ * @tbl_idx: Table index valid or not.
+ * @endp_src_idx: Source end point index.
+ * @exception: (not bitmask) - the first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @rsvd2: reserved
+ * @seq_num: Packet sequence number.
+ * @src_ip_addr: Source packet IP address.
+ * @dest_ip_addr: Destination packet IP address.
+ * @rsvd3: reserved
+ * @nat_type: Defines the type of the NAT operation:
+ *	00: No NAT
+ *	01: Source NAT
+ *	10: Destination NAT
+ *	11: Reserved
+ * @protocol: Protocal number.
+ * @ip_id: IP packet IP ID number.
+ * @tlated_ip_addr: IP address.
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @endp_dest_idx: Destination end point index.
+ * @ip_cksum_diff: IP packet checksum difference.
+ * @metadata: meta data value used by packet
+ * @rsvd4: reserved
+ */
+struct ipa_frag_pkt_status_hw {
+	u64 status_opcode:8;
+	u64 frag_rule_idx:4;
+	u64 reserved_1:3;
+	u64 tbl_idx:1;
+	u64 endp_src_idx:5;
+	u64 exception:1;
+	u64 reserved_2:2;
+	u64 seq_num:8;
+	u64 src_ip_addr:32;
+	u64 dest_ip_addr:32;
+	u64 reserved_3:6;
+	u64 nat_type:2;
+	u64 protocol:8;
+	u64 ip_id:16;
+	u64 tlated_ip_addr:32;
+	u64 hdr_local:1;
+	u64 hdr_offset:10;
+	u64 endp_dest_idx:5;
+	u64 ip_cksum_diff:16;
+	u64 metadata:32;
+	u64 reserved_4:32;
+} __packed;
+
+union ipa_pkt_status_hw {
+	struct ipa_gen_pkt_status_hw ipa_pkt;
+	struct ipa_frag_pkt_status_hw frag_pkt;
+} __packed;
 
 /* Size of H/W Packet Status */
 #define IPA3_0_PKT_STATUS_SIZE 32
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 2750425..e78a982 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -808,8 +808,7 @@ int se_geni_clks_off(struct se_geni_rsc *rsc)
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev || !(geni_se_dev->bus_bw ||
-					geni_se_dev->bus_bw_noc)))
+	if (unlikely(!geni_se_dev))
 		return -ENODEV;
 
 	clk_disable_unprepare(rsc->se_clk);
@@ -841,9 +840,7 @@ int se_geni_resources_off(struct se_geni_rsc *rsc)
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev ||
-			!(geni_se_dev->bus_bw ||
-					geni_se_dev->bus_bw_noc)))
+	if (unlikely(!geni_se_dev))
 		return -ENODEV;
 
 	ret = se_geni_clks_off(rsc);
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 6f58167..53fdd16 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -189,6 +189,34 @@ static int cp_get_parallel_mode(struct pl_data *chip, int mode)
 	return pval.intval;
 }
 
+static int get_hvdcp3_icl_limit(struct pl_data *chip)
+{
+	int rc, main_icl, target_icl = -EINVAL;
+	union power_supply_propval pval = {0, };
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_REAL_TYPE, &pval);
+	if ((rc < 0) || (pval.intval != POWER_SUPPLY_TYPE_USB_HVDCP_3))
+		return target_icl;
+
+	/*
+	 * For HVDCP3 adapters, limit max. ILIM as follows:
+	 * HVDCP3_ICL: Maximum ICL of HVDCP3 adapter(from DT configuration)
+	 * For Parallel input configurations:
+	 * VBUS: target_icl = HVDCP3_ICL - main_ICL
+	 * VMID: target_icl = HVDCP3_ICL
+	 */
+	target_icl = chip->chg_param->hvdcp3_max_icl_ua;
+	if (cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE)
+					== POWER_SUPPLY_PL_USBIN_USBIN) {
+		main_icl = get_effective_result_locked(chip->usb_icl_votable);
+		if ((main_icl >= 0) && (main_icl < target_icl))
+			target_icl -= main_icl;
+	}
+
+	return target_icl;
+}
+
 /*
  * Adapter CC Mode: ILIM over-ridden explicitly, below takes no effect.
  *
@@ -204,7 +232,7 @@ static int cp_get_parallel_mode(struct pl_data *chip, int mode)
  */
 static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 {
-	int rc, fcc, main_icl, target_icl = chip->chg_param->hvdcp3_max_icl_ua;
+	int rc, fcc, target_icl;
 	union power_supply_propval pval = {0, };
 
 	if (!is_usb_available(chip))
@@ -217,30 +245,8 @@ static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
 					== POWER_SUPPLY_PL_OUTPUT_VPH)
 		return;
 
-	rc = power_supply_get_property(chip->usb_psy,
-				POWER_SUPPLY_PROP_REAL_TYPE, &pval);
-	if (rc < 0)
-		return;
-
-	/*
-	 * For HVDCP3 adapters limit max. ILIM based on DT configuration
-	 * of HVDCP3 ICL value.
-	 * Input VBUS:
-	 * target_icl = HVDCP3_ICL - main_ICL
-	 * Input VMID
-	 * target_icl = HVDCP3_ICL
-	 */
-	if (pval.intval == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
-		if (((cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE))
-					== POWER_SUPPLY_PL_USBIN_USBIN)) {
-			main_icl = get_effective_result_locked(
-							chip->usb_icl_votable);
-			if ((main_icl >= 0) && (main_icl < target_icl))
-				target_icl -= main_icl;
-		}
-
-		ilim = min(target_icl, ilim);
-	}
+	target_icl = get_hvdcp3_icl_limit(chip);
+	ilim = (target_icl > 0) ? min(ilim, target_icl) : ilim;
 
 	rc = power_supply_get_property(chip->cp_master_psy,
 				POWER_SUPPLY_PROP_MIN_ICL, &pval);
@@ -663,14 +669,17 @@ static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
 static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 			int parallel_fcc_ua)
 {
-	int main_set_fcc_ua, total_fcc_ua;
+	int main_set_fcc_ua, total_fcc_ua, target_icl;
+	bool override;
 
 	if (!chip->chg_param->fcc_step_size_ua) {
 		pr_err("Invalid fcc stepper step size, value 0\n");
 		return;
 	}
 
-	if (is_override_vote_enabled_locked(chip->fcc_main_votable)) {
+	total_fcc_ua = main_fcc_ua + parallel_fcc_ua;
+	override = is_override_vote_enabled_locked(chip->fcc_main_votable);
+	if (override) {
 		/*
 		 * FCC stepper params need re-calculation in override mode
 		 * only if there is change in Main or total FCC
@@ -678,48 +687,99 @@ static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 
 		main_set_fcc_ua = get_effective_result_locked(
 							chip->fcc_main_votable);
-		total_fcc_ua = main_fcc_ua + parallel_fcc_ua;
-
 		if ((main_set_fcc_ua != chip->override_main_fcc_ua)
 				|| (total_fcc_ua != chip->total_fcc_ua)) {
 			chip->override_main_fcc_ua = main_set_fcc_ua;
 			chip->total_fcc_ua = total_fcc_ua;
-			parallel_fcc_ua = (total_fcc_ua
-						- chip->override_main_fcc_ua);
 		} else {
 			goto skip_fcc_step_update;
 		}
 	}
 
-	/* Read current FCC of main charger */
-	chip->main_fcc_ua = get_effective_result(chip->fcc_main_votable);
-	chip->main_step_fcc_dir = (main_fcc_ua > chip->main_fcc_ua) ?
-				STEP_UP : STEP_DOWN;
-	chip->main_step_fcc_count = abs((main_fcc_ua - chip->main_fcc_ua) /
-				chip->chg_param->fcc_step_size_ua);
-	chip->main_step_fcc_residual = abs((main_fcc_ua - chip->main_fcc_ua) %
-				chip->chg_param->fcc_step_size_ua);
+	/*
+	 * If override vote is removed then start main FCC from the
+	 * last overridden value.
+	 * Clear slave_fcc if requested parallel current is 0 i.e.
+	 * parallel is disabled.
+	 */
+	if (chip->override_main_fcc_ua && !override) {
+		chip->main_fcc_ua = chip->override_main_fcc_ua;
+		chip->override_main_fcc_ua = 0;
+		if (!parallel_fcc_ua)
+			chip->slave_fcc_ua = 0;
+	} else {
+		chip->main_fcc_ua = get_effective_result_locked(
+						chip->fcc_main_votable);
+	}
 
-	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
-				STEP_UP : STEP_DOWN;
-	chip->parallel_step_fcc_count
-				= abs((parallel_fcc_ua - chip->slave_fcc_ua) /
-					chip->chg_param->fcc_step_size_ua);
-	chip->parallel_step_fcc_residual
-				= abs((parallel_fcc_ua - chip->slave_fcc_ua) %
-					chip->chg_param->fcc_step_size_ua);
+	/* Skip stepping if override vote is applied on main */
+	if (override) {
+		chip->main_step_fcc_count = 0;
+		chip->main_step_fcc_residual = 0;
+	} else {
+		chip->main_step_fcc_dir =
+				(main_fcc_ua > chip->main_fcc_ua) ?
+					STEP_UP : STEP_DOWN;
+		chip->main_step_fcc_count =
+				abs(main_fcc_ua - chip->main_fcc_ua) /
+					chip->chg_param->fcc_step_size_ua;
+		chip->main_step_fcc_residual =
+				abs(main_fcc_ua - chip->main_fcc_ua) %
+					chip->chg_param->fcc_step_size_ua;
+	}
 
+	 /* Calculate CP_ILIM based on adapter limit and max. FCC */
+	if (!parallel_fcc_ua && is_cp_available(chip) && override) {
+		if (!chip->cp_ilim_votable)
+			chip->cp_ilim_votable = find_votable("CP_ILIM");
+
+		target_icl = get_hvdcp3_icl_limit(chip) * 2;
+		total_fcc_ua -= chip->main_fcc_ua;
+
+		/*
+		 * CP_ILIM = parallel_fcc_ua / 2.
+		 * Calculate parallel_fcc_ua as follows:
+		 * parallel_fcc_ua is based minimum of total FCC
+		 * or adapter's maximum allowed ICL limitation(if adapter
+		 * has max. ICL limitations).
+		 */
+		parallel_fcc_ua = (target_icl > 0) ?
+				min(target_icl, total_fcc_ua) : total_fcc_ua;
+	}
+
+	/* Skip stepping if override vote is applied on CP */
+	if (chip->cp_ilim_votable
+		&& is_override_vote_enabled(chip->cp_ilim_votable)) {
+		chip->parallel_step_fcc_count = 0;
+		chip->parallel_step_fcc_residual = 0;
+	} else {
+		chip->parallel_step_fcc_dir =
+				(parallel_fcc_ua > chip->slave_fcc_ua) ?
+					STEP_UP : STEP_DOWN;
+		chip->parallel_step_fcc_count =
+				abs(parallel_fcc_ua - chip->slave_fcc_ua) /
+					chip->chg_param->fcc_step_size_ua;
+		chip->parallel_step_fcc_residual =
+				abs(parallel_fcc_ua - chip->slave_fcc_ua) %
+					chip->chg_param->fcc_step_size_ua;
+	}
 skip_fcc_step_update:
 	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
 		|| chip->main_step_fcc_count || chip->main_step_fcc_residual)
 		chip->step_fcc = 1;
 
-	pr_debug("Main FCC Stepper parameters: main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d\n",
-		chip->main_step_fcc_dir, chip->main_step_fcc_count,
-		chip->main_step_fcc_residual);
-	pr_debug("Parallel FCC Stepper parameters: parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n",
+	pl_dbg(chip, PR_PARALLEL,
+		"Main FCC Stepper parameters: target_main_fcc: %d, current_main_fcc: %d main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d override_main_fcc_ua: %d override: %d\n",
+		main_fcc_ua, chip->main_fcc_ua, chip->main_step_fcc_dir,
+		chip->main_step_fcc_count, chip->main_step_fcc_residual,
+		chip->override_main_fcc_ua, override);
+	pl_dbg(chip, PR_PARALLEL,
+		"Parallel FCC Stepper parameters: target_pl_fcc: %d current_pl_fcc: %d parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n",
+		parallel_fcc_ua, chip->slave_fcc_ua,
 		chip->parallel_step_fcc_dir, chip->parallel_step_fcc_count,
 		chip->parallel_step_fcc_residual);
+	pl_dbg(chip, PR_PARALLEL, "FCC Stepper parameters: step_fcc=%d\n",
+		chip->step_fcc);
 }
 
 #define MINIMUM_PARALLEL_FCC_UA		500000
@@ -845,9 +905,8 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 {
 	struct pl_data *chip = data;
 	int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
-	int main_fcc_ua = 0, cp_fcc_ua = 0, fcc_thr_ua = 0, rc;
+	int cp_fcc_ua = 0, rc = 0;
 	union power_supply_propval pval = {0, };
-	bool is_cc_mode = false;
 
 	if (total_fcc_ua < 0)
 		return 0;
@@ -869,45 +928,30 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 		chip->cp_slave_disable_votable =
 			find_votable("CP_SLAVE_DISABLE");
 
-	if (!chip->usb_psy)
-		chip->usb_psy = power_supply_get_by_name("usb");
-
-	if (chip->usb_psy) {
-		rc = power_supply_get_property(chip->usb_psy,
-					POWER_SUPPLY_PROP_ADAPTER_CC_MODE,
-					&pval);
-		if (rc < 0)
-			pr_err("Couldn't get PPS CC mode status rc=%d\n", rc);
-		else
-			is_cc_mode = pval.intval;
-	}
-
-	if (chip->cp_master_psy) {
-		rc = power_supply_get_property(chip->cp_master_psy,
-					POWER_SUPPLY_PROP_MIN_ICL, &pval);
-		if (rc < 0)
-			pr_err("Couldn't get MIN ICL threshold rc=%d\n", rc);
-		else
-			fcc_thr_ua = is_cc_mode ? (3 * pval.intval) :
-							(4 * pval.intval);
-	}
-
-	if (chip->fcc_main_votable)
-		main_fcc_ua =
-			get_effective_result_locked(chip->fcc_main_votable);
-
-	if (main_fcc_ua < 0)
-		main_fcc_ua = 0;
-
-	cp_fcc_ua = total_fcc_ua - main_fcc_ua;
+	/*
+	 * CP charger current = Total FCC - Main charger's FCC.
+	 * Main charger FCC is userspace's override vote on main.
+	 */
+	cp_fcc_ua = total_fcc_ua - chip->chg_param->forced_main_fcc;
+	pl_dbg(chip, PR_PARALLEL,
+		"cp_fcc_ua=%d total_fcc_ua=%d forced_main_fcc=%d\n",
+		cp_fcc_ua, total_fcc_ua, chip->chg_param->forced_main_fcc);
 	if (cp_fcc_ua > 0) {
+		if (chip->cp_master_psy) {
+			rc = power_supply_get_property(chip->cp_master_psy,
+					POWER_SUPPLY_PROP_MIN_ICL, &pval);
+			if (rc < 0)
+				pr_err("Couldn't get MIN ICL threshold rc=%d\n",
+									rc);
+		}
+
 		if (chip->cp_slave_psy && chip->cp_slave_disable_votable) {
 			/*
 			 * Disable Slave CP if FCC share
-			 * falls below threshold.
+			 * falls below 3 * min ICL threshold.
 			 */
 			vote(chip->cp_slave_disable_votable, FCC_VOTER,
-				(cp_fcc_ua < fcc_thr_ua), 0);
+				(cp_fcc_ua < (3 * pval.intval)), 0);
 		}
 
 		if (chip->cp_disable_votable) {
@@ -1487,13 +1531,6 @@ static int pl_disable_vote_callback(struct votable *votable,
 			if (chip->step_fcc) {
 				vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
 					true, 0);
-				/*
-				 * Configure ILIM above min ILIM of CP to
-				 * ensure CP is not disabled due to ILIM vote.
-				 * Later FCC stepper will take to ILIM to
-				 * target value.
-				 */
-				cp_configure_ilim(chip, FCC_VOTER, 0);
 				schedule_delayed_work(&chip->fcc_stepper_work,
 					0);
 			}
@@ -1786,6 +1823,10 @@ static void handle_usb_change(struct pl_data *chip)
 		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
 				false, 0);
 		vote(chip->pl_disable_votable, ICL_LIMIT_VOTER, false, 0);
+		chip->override_main_fcc_ua = 0;
+		chip->total_fcc_ua = 0;
+		chip->slave_fcc_ua = 0;
+		chip->main_fcc_ua = 0;
 	}
 }
 
@@ -1918,6 +1959,11 @@ int qcom_batt_init(struct charger_param *chg_param)
 	if (!chip->pl_ws)
 		goto cleanup;
 
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+	INIT_WORK(&chip->pl_taper_work, pl_taper_work);
+	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+	INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work);
+
 	chip->fcc_main_votable = create_votable("FCC_MAIN", VOTE_MIN,
 					pl_fcc_main_vote_callback,
 					chip);
@@ -1987,11 +2033,6 @@ int qcom_batt_init(struct charger_param *chg_param)
 
 	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
 
-	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
-	INIT_WORK(&chip->pl_taper_work, pl_taper_work);
-	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
-	INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work);
-
 	rc = pl_register_notifier(chip);
 	if (rc < 0) {
 		pr_err("Couldn't register psy notifier rc = %d\n", rc);
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
index fbc6b25..b25afd6 100644
--- a/drivers/power/supply/qcom/battery.h
+++ b/drivers/power/supply/qcom/battery.h
@@ -11,6 +11,7 @@ struct charger_param {
 	u32 fcc_step_size_ua;
 	u32 smb_version;
 	u32 hvdcp3_max_icl_ua;
+	u32 forced_main_fcc;
 };
 
 int qcom_batt_init(struct charger_param *param);
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 58a4eb9..ead4839 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1437,11 +1437,10 @@ static void ttf_work(struct work_struct *work)
 	}
 	pr_debug("TTF: charge_status:%d charge_done:%d msoc:%d\n",
 			charge_status, charge_done, msoc);
-	/*
-	 * Do not schedule ttf work when SOC is 100%
-	 * or charge terminated
-	 */
-	if ((msoc == 100) || charge_done)
+	/* Do not schedule ttf work if SOC is 100% or charge teminated. */
+	if (charge_done ||
+		((msoc == 100) &&
+			(charge_status == POWER_SUPPLY_STATUS_CHARGING)))
 		goto end_work;
 
 	rc =  ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now);
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index f8e4881..4e29fb6 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -72,6 +72,7 @@ struct qg_dt {
 	bool			fvss_enable;
 	bool			multi_profile_load;
 	bool			tcss_enable;
+	bool			bass_enable;
 };
 
 struct qg_esr_data {
@@ -138,6 +139,7 @@ struct qpnp_qg {
 	bool			force_soc;
 	bool			fvss_active;
 	bool			tcss_active;
+	bool			bass_active;
 	int			charge_status;
 	int			charge_type;
 	int			chg_iterm_ma;
@@ -153,6 +155,8 @@ struct qpnp_qg {
 	int			ibat_tcss_entry;
 	int			soc_tcss;
 	int			tcss_entry_count;
+	int			max_fcc_limit_ma;
+	int			bsoc_bass_entry;
 	u32			fifo_done_count;
 	u32			wa_flags;
 	u32			seq_no;
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 8454ff6..f16f2b7 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -20,11 +20,38 @@
 #include "qg-profile-lib.h"
 #include "qg-soc.h"
 
+enum soc_scaling_feature {
+	QG_FVSS = BIT(0),
+	QG_TCSS = BIT(1),
+	QG_BASS = BIT(2),
+};
+
 #define DEFAULT_UPDATE_TIME_MS			64000
 #define SOC_SCALE_HYST_MS			2000
 #define VBAT_LOW_HYST_UV			50000
 #define FULL_SOC				100
 
+static int qg_ss_feature;
+static ssize_t qg_ss_feature_show(struct device *dev, struct device_attribute
+				     *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%4x\n", qg_ss_feature);
+}
+
+static ssize_t qg_ss_feature_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+
+	if (kstrtos32(buf, 0, &val))
+		return -EINVAL;
+
+	qg_ss_feature = val;
+
+	return count;
+}
+DEVICE_ATTR_RW(qg_ss_feature);
+
 static int qg_delta_soc_interval_ms = 20000;
 static ssize_t soc_interval_ms_show(struct device *dev, struct device_attribute
 				     *attr, char *buf)
@@ -47,9 +74,25 @@ static ssize_t soc_interval_ms_store(struct device *dev,
 DEVICE_ATTR_RW(soc_interval_ms);
 
 static int qg_fvss_delta_soc_interval_ms = 10000;
-module_param_named(
-	fvss_soc_interval_ms, qg_fvss_delta_soc_interval_ms, int, 0600
-);
+static ssize_t fvss_delta_soc_interval_ms_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", qg_fvss_delta_soc_interval_ms);
+}
+
+static ssize_t fvss_delta_soc_interval_ms_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+
+	if (kstrtos32(buf, 0, &val))
+		return -EINVAL;
+
+	qg_fvss_delta_soc_interval_ms = val;
+
+	return count;
+}
+DEVICE_ATTR_RW(fvss_delta_soc_interval_ms);
 
 static int qg_delta_soc_cold_interval_ms = 4000;
 static ssize_t soc_cold_interval_ms_show(struct device *dev,
@@ -95,16 +138,32 @@ DEVICE_ATTR_RW(maint_soc_update_ms);
 
 /* FVSS scaling only based on VBAT */
 static int qg_fvss_vbat_scaling = 1;
-module_param_named(
-	fvss_vbat_scaling, qg_fvss_vbat_scaling, int, 0600
-);
+static ssize_t fvss_vbat_scaling_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", qg_fvss_vbat_scaling);
+}
+
+static ssize_t fvss_vbat_scaling_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+
+	if (kstrtos32(buf, 0, &val))
+		return -EINVAL;
+
+	qg_fvss_vbat_scaling = val;
+
+	return count;
+}
+DEVICE_ATTR_RW(fvss_vbat_scaling);
 
 static int qg_process_fvss_soc(struct qpnp_qg *chip, int sys_soc)
 {
 	int rc, vbat_uv = 0, vbat_cutoff_uv = chip->dt.vbatt_cutoff_mv * 1000;
 	int soc_vbat = 0, wt_vbat = 0, wt_sys = 0, soc_fvss = 0;
 
-	if (!chip->dt.fvss_enable)
+	if (!chip->dt.fvss_enable && !(qg_ss_feature & QG_FVSS))
 		goto exit_soc_scale;
 
 	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING)
@@ -180,7 +239,7 @@ static int qg_process_tcss_soc(struct qpnp_qg *chip, int sys_soc)
 	int soc_ibat, wt_ibat, wt_sys;
 	union power_supply_propval prop = {0, };
 
-	if (!chip->dt.tcss_enable)
+	if (!chip->dt.tcss_enable && !(qg_ss_feature & QG_TCSS))
 		goto exit_soc_scale;
 
 	if (chip->sys_soc < (chip->dt.tcss_entry_soc * 100))
@@ -262,12 +321,60 @@ static int qg_process_tcss_soc(struct qpnp_qg *chip, int sys_soc)
 	chip->tcss_entry_count = 0;
 skip_entry_count:
 	chip->tcss_active = false;
-	qg_dbg(chip, QG_DEBUG_SOC, "TCSS: Quit - enabled=%d sys_soc=%d tcss_entry_count=%d fifo_i_ua=%d\n",
+	if (chip->dt.tcss_enable || (qg_ss_feature & QG_TCSS))
+		qg_dbg(chip, QG_DEBUG_SOC, "TCSS: Quit - enabled=%d sys_soc=%d tcss_entry_count=%d fifo_i_ua=%d\n",
 			chip->dt.tcss_enable, sys_soc, chip->tcss_entry_count,
 			chip->last_fifo_i_ua);
 	return sys_soc;
 }
 
+#define BASS_SYS_MSOC_DELTA			2
+static int qg_process_bass_soc(struct qpnp_qg *chip, int sys_soc)
+{
+	int bass_soc = sys_soc, msoc = chip->msoc;
+	int batt_soc = CAP(0, 100, DIV_ROUND_CLOSEST(chip->batt_soc, 100));
+
+	if (!chip->dt.bass_enable && !(qg_ss_feature & QG_BASS))
+		goto exit_soc_scale;
+
+	qg_dbg(chip, QG_DEBUG_SOC, "BASS Entry: fifo_i=%d sys_soc=%d msoc=%d batt_soc=%d fvss_active=%d\n",
+			chip->last_fifo_i_ua, sys_soc, msoc,
+			batt_soc, chip->fvss_active);
+
+	/* Skip BASS if FVSS is active */
+	if (chip->fvss_active)
+		goto exit_soc_scale;
+
+	if (((sys_soc - msoc) < BASS_SYS_MSOC_DELTA) ||
+				chip->last_fifo_i_ua <= 0)
+		goto exit_soc_scale;
+
+	if (!chip->bass_active) {
+		chip->bass_active = true;
+		chip->bsoc_bass_entry = batt_soc;
+	}
+
+	/* Drop the sys_soc by 1% if batt_soc has dropped */
+	if ((chip->bsoc_bass_entry - batt_soc) >= 1) {
+		bass_soc = (msoc > 0) ? msoc - 1 : 0;
+		chip->bass_active = false;
+	}
+
+	qg_dbg(chip, QG_DEBUG_SOC, "BASS Exit: fifo_i_ua=%d sys_soc=%d msoc=%d bsoc_bass_entry=%d batt_soc=%d bass_soc=%d\n",
+			chip->last_fifo_i_ua, sys_soc, msoc,
+			chip->bsoc_bass_entry, chip->batt_soc, bass_soc);
+
+	return bass_soc;
+
+exit_soc_scale:
+	chip->bass_active = false;
+	if (chip->dt.bass_enable || (qg_ss_feature & QG_BASS))
+		qg_dbg(chip, QG_DEBUG_SOC, "BASS Quit: enabled=%d fifo_i_ua=%d sys_soc=%d msoc=%d batt_soc=%d\n",
+			chip->dt.bass_enable, chip->last_fifo_i_ua,
+			sys_soc, msoc, chip->batt_soc);
+	return sys_soc;
+}
+
 int qg_adjust_sys_soc(struct qpnp_qg *chip)
 {
 	int soc, vbat_uv, rc;
@@ -275,6 +382,7 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 
 	chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
 
+	/* TCSS */
 	chip->sys_soc = qg_process_tcss_soc(chip, chip->sys_soc);
 
 	if (chip->sys_soc <= 50) { /* 0.5% */
@@ -299,8 +407,12 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
 	qg_dbg(chip, QG_DEBUG_SOC, "sys_soc=%d adjusted sys_soc=%d\n",
 					chip->sys_soc, soc);
 
+	/* FVSS */
 	soc = qg_process_fvss_soc(chip, soc);
 
+	/* BASS */
+	soc = qg_process_bass_soc(chip, soc);
+
 	chip->last_adj_ssoc = soc;
 
 	return soc;
diff --git a/drivers/power/supply/qcom/qg-soc.h b/drivers/power/supply/qcom/qg-soc.h
index 4d2003d..ba3e0f1 100644
--- a/drivers/power/supply/qcom/qg-soc.h
+++ b/drivers/power/supply/qcom/qg-soc.h
@@ -14,5 +14,8 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip);
 extern struct device_attribute dev_attr_soc_interval_ms;
 extern struct device_attribute dev_attr_soc_cold_interval_ms;
 extern struct device_attribute dev_attr_maint_soc_update_ms;
+extern struct device_attribute dev_attr_fvss_delta_soc_interval_ms;
+extern struct device_attribute dev_attr_fvss_vbat_scaling;
+extern struct device_attribute dev_attr_qg_ss_feature;
 
 #endif /* __QG_SOC_H__ */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index daee40e..38af5c9 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -3213,7 +3213,7 @@ static int fg_gen4_esr_fast_calib_config(struct fg_gen4_chip *chip, bool en)
 	 * discharging when ESR fast calibration is disabled. Otherwise, keep
 	 * it enabled so that ESR pulses can happen during discharging.
 	 */
-	val = en ? BIT(6) | BIT(7) : 0;
+	val = (en || chip->dt.esr_calib_dischg) ? BIT(6) | BIT(7) : 0;
 	mask = BIT(6) | BIT(7);
 	rc = fg_sram_masked_write(fg, SYS_CONFIG_WORD,
 			SYS_CONFIG_OFFSET, mask, val, FG_IMA_DEFAULT);
@@ -5657,15 +5657,6 @@ static int fg_parse_esr_cal_params(struct fg_dev *fg)
 	struct device_node *node = fg->dev->of_node;
 	int rc, i, temp;
 
-	if (chip->dt.esr_timer_dischg_slow[TIMER_RETRY] >= 0 &&
-			chip->dt.esr_timer_dischg_slow[TIMER_MAX] >= 0) {
-		/* ESR calibration only during discharging */
-		chip->dt.esr_calib_dischg = of_property_read_bool(node,
-						"qcom,fg-esr-calib-dischg");
-		if (chip->dt.esr_calib_dischg)
-			return 0;
-	}
-
 	if (!of_find_property(node, "qcom,fg-esr-cal-soc-thresh", NULL) ||
 		!of_find_property(node, "qcom,fg-esr-cal-temp-thresh", NULL))
 		return 0;
@@ -5700,6 +5691,15 @@ static int fg_parse_esr_cal_params(struct fg_dev *fg)
 		}
 	}
 
+	if (chip->dt.esr_timer_dischg_slow[TIMER_RETRY] >= 0 &&
+			chip->dt.esr_timer_dischg_slow[TIMER_MAX] >= 0) {
+		/* ESR calibration only during discharging */
+		chip->dt.esr_calib_dischg = of_property_read_bool(node,
+						"qcom,fg-esr-calib-dischg");
+		if (chip->dt.esr_calib_dischg)
+			return 0;
+	}
+
 	chip->dt.delta_esr_disable_count = DEFAULT_ESR_DISABLE_COUNT;
 	rc = of_property_read_u32(node, "qcom,fg-delta-esr-disable-count",
 		&temp);
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index eea69fe..ed11038 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -86,6 +86,9 @@ static struct attribute *qg_attrs[] = {
 	&dev_attr_soc_interval_ms.attr,
 	&dev_attr_soc_cold_interval_ms.attr,
 	&dev_attr_maint_soc_update_ms.attr,
+	&dev_attr_fvss_delta_soc_interval_ms.attr,
+	&dev_attr_fvss_vbat_scaling.attr,
+	&dev_attr_qg_ss_feature.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(qg);
@@ -312,6 +315,7 @@ static int qg_store_soc_params(struct qpnp_qg *chip)
 	return rc;
 }
 
+#define MAX_FIFO_CNT_FOR_ESR			50
 static int qg_config_s2_state(struct qpnp_qg *chip,
 		enum s2_state requested_state, bool state_enable,
 		bool process_fifo)
@@ -369,6 +373,9 @@ static int qg_config_s2_state(struct qpnp_qg *chip,
 		return -EINVAL;
 	}
 
+	if (fifo_length)
+		qg_esr_mod_count = MAX_FIFO_CNT_FOR_ESR / fifo_length;
+
 	rc = qg_master_hold(chip, true);
 	if (rc < 0) {
 		pr_err("Failed to hold master, rc=%d\n", rc);
@@ -2980,6 +2987,13 @@ static int qg_load_battery_profile(struct qpnp_qg *chip)
 		chip->bp.fastchg_curr_ma = -EINVAL;
 	}
 
+	/*
+	 * Update the max fcc values based on QG subtype including
+	 * error margins.
+	 */
+	chip->bp.fastchg_curr_ma = min(chip->max_fcc_limit_ma,
+					chip->bp.fastchg_curr_ma);
+
 	rc = of_property_read_u32(profile_node, "qcom,qg-batt-profile-ver",
 				&chip->bp.qg_profile_version);
 	if (rc < 0) {
@@ -3354,6 +3368,8 @@ static int qg_sanitize_sdam(struct qpnp_qg *chip)
 }
 
 #define ADC_CONV_DLY_512MS		0xA
+#define IBAT_5A_FCC_MA			4800
+#define IBAT_10A_FCC_MA			9600
 static int qg_hw_init(struct qpnp_qg *chip)
 {
 	int rc, temp;
@@ -3367,6 +3383,11 @@ static int qg_hw_init(struct qpnp_qg *chip)
 		return rc;
 	}
 
+	if (chip->qg_subtype == QG_ADC_IBAT_5A)
+		chip->max_fcc_limit_ma = IBAT_5A_FCC_MA;
+	else
+		chip->max_fcc_limit_ma = IBAT_10A_FCC_MA;
+
 	rc = qg_set_wa_flags(chip);
 	if (rc < 0) {
 		pr_err("Failed to update PMIC type flags, rc=%d\n", rc);
@@ -4262,6 +4283,8 @@ static int qg_parse_dt(struct qpnp_qg *chip)
 			chip->dt.tcss_entry_soc = temp;
 	}
 
+	chip->dt.bass_enable = of_property_read_bool(node, "qcom,bass-enable");
+
 	chip->dt.multi_profile_load = of_property_read_bool(node,
 					"qcom,multi-profile-load");
 
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 4d92ad8..827a315 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -1225,6 +1225,7 @@ static enum power_supply_property smb5_usb_main_props[] = {
 	POWER_SUPPLY_PROP_FORCE_MAIN_ICL,
 	POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL,
 	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_HOT_TEMP,
 };
 
 static int smb5_usb_main_get_prop(struct power_supply *psy,
@@ -1288,6 +1289,10 @@ static int smb5_usb_main_get_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_HEALTH:
 		val->intval = smblib_get_prop_smb_health(chg);
 		break;
+	/* Use this property to report overheat status */
+	case POWER_SUPPLY_PROP_HOT_TEMP:
+		val->intval = chg->thermal_overheat;
+		break;
 	default:
 		pr_debug("get prop %d is not supported in usb-main\n", psp);
 		rc = -EINVAL;
@@ -1367,6 +1372,15 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
 		vote_override(chg->fcc_main_votable, CC_MODE_VOTER,
 				(val->intval < 0) ? false : true, val->intval);
+		if (val->intval >= 0)
+			chg->chg_param.forced_main_fcc = val->intval;
+		/*
+		 * Remove low vote on FCC_MAIN, for WLS, to allow FCC_MAIN to
+		 * rise to its full value.
+		 */
+		if (val->intval < 0)
+			vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER,
+								false, 0);
 		/* Main FCC updated re-calculate FCC */
 		rerun_election(chg->fcc_votable);
 		break;
@@ -1380,6 +1394,9 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
 		rc = smb5_set_prop_comp_clamp_level(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_HOT_TEMP:
+		rc = smblib_set_prop_thermal_overheat(chg, val->intval);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -1400,6 +1417,7 @@ static int smb5_usb_main_prop_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_FORCE_MAIN_FCC:
 	case POWER_SUPPLY_PROP_FORCE_MAIN_ICL:
 	case POWER_SUPPLY_PROP_COMP_CLAMP_LEVEL:
+	case POWER_SUPPLY_PROP_HOT_TEMP:
 		rc = 1;
 		break;
 	default:
@@ -1670,7 +1688,10 @@ static int smb5_batt_get_prop(struct power_supply *psy,
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		val->intval = get_client_vote(chg->fv_votable,
-				BATT_PROFILE_VOTER);
+					      QNOVO_VOTER);
+		if (val->intval < 0)
+			val->intval = get_client_vote(chg->fv_votable,
+						      BATT_PROFILE_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
 		val->intval = get_client_vote_locked(chg->fv_votable,
@@ -2583,6 +2604,10 @@ static int smb5_init_hw(struct smb5 *chip)
 	/* Set HVDCP autonomous mode per DT option */
 	smblib_hvdcp_hw_inov_enable(chg, chip->dt.hvdcp_autonomous);
 
+	/* Enable HVDCP authentication algorithm for non-PD designs */
+	if (chg->pd_not_supported)
+		smblib_hvdcp_detect_enable(chg, true);
+
 	/* Disable HVDCP and authentication algorithm if specified in DT */
 	if (chg->hvdcp_disable)
 		smblib_hvdcp_detect_enable(chg, false);
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 7f48d20..0977b65 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -1553,8 +1553,7 @@ static int smb1390_parse_dt(struct smb1390 *chip)
 	of_property_read_u32(chip->dev->of_node, "qcom,parallel-input-mode",
 			&chip->pl_input_mode);
 
-	chip->cp_slave_thr_taper_ua = smb1390_is_adapter_cc_mode(chip) ?
-			(3 * chip->min_ilim_ua) : (4 * chip->min_ilim_ua);
+	chip->cp_slave_thr_taper_ua = 3 * chip->min_ilim_ua;
 	of_property_read_u32(chip->dev->of_node, "qcom,cp-slave-thr-taper-ua",
 			      &chip->cp_slave_thr_taper_ua);
 
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 0be8676..5c1be1a 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -1004,25 +1004,31 @@ static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
 		}
 	}
 
+	mutex_lock(&chg->dpdm_lock);
 	if (enable) {
-		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+		if (chg->dpdm_reg && !chg->dpdm_enabled) {
 			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
 			rc = regulator_enable(chg->dpdm_reg);
 			if (rc < 0)
 				smblib_err(chg,
 					"Couldn't enable dpdm regulator rc=%d\n",
 					rc);
+			else
+				chg->dpdm_enabled = true;
 		}
 	} else {
-		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+		if (chg->dpdm_reg && chg->dpdm_enabled) {
 			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
 			rc = regulator_disable(chg->dpdm_reg);
 			if (rc < 0)
 				smblib_err(chg,
 					"Couldn't disable dpdm regulator rc=%d\n",
 					rc);
+			else
+				chg->dpdm_enabled = false;
 		}
 	}
+	mutex_unlock(&chg->dpdm_lock);
 
 	return rc;
 }
@@ -1200,6 +1206,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
 	chg->usb_icl_delta_ua = 0;
 	chg->pulse_cnt = 0;
 	chg->uusb_apsd_rerun_done = false;
+	chg->chg_param.forced_main_fcc = 0;
 
 	/* write back the default FLOAT charger configuration */
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
@@ -1365,6 +1372,10 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 	/* suspend if 25mA or less is requested */
 	bool suspend = (icl_ua <= USBIN_25MA);
 
+	if (chg->chg_param.smb_version == PMI632_SUBTYPE)
+		schgm_flash_torch_priority(chg, suspend ? TORCH_BOOST_MODE :
+					TORCH_BUCK_MODE);
+
 	/* Do not configure ICL from SW for DAM cables */
 	if (smblib_get_prop_typec_mode(chg) ==
 			    POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY)
@@ -4504,7 +4515,7 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
 	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
 
 	int rc = 0;
-	int sec_charger;
+	int sec_charger, typec_mode;
 
 	/*
 	 * Ignore repetitive notification while PD is active, which
@@ -4574,6 +4585,14 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
 	smblib_usb_pd_adapter_allowance_override(chg,
 			!!chg->pd_active ? FORCE_5V : FORCE_NULL);
 	smblib_update_usb_type(chg);
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+			!chg->ok_to_pd) {
+		typec_mode = smblib_get_prop_typec_mode(chg);
+		if (typec_rp_med_high(chg, typec_mode))
+			vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	}
+
 	power_supply_changed(chg->usb_psy);
 	return rc;
 }
@@ -4863,6 +4882,33 @@ int smblib_get_charge_current(struct smb_charger *chg,
 	return 0;
 }
 
+#define IADP_OVERHEAT_UA	500000
+int smblib_set_prop_thermal_overheat(struct smb_charger *chg,
+						int therm_overheat)
+{
+	int icl_ua = 0;
+
+	if (chg->thermal_overheat == !!therm_overheat)
+		return 0;
+
+	/* Configure ICL to 500mA in case system health is Overheat */
+	if (therm_overheat)
+		icl_ua = IADP_OVERHEAT_UA;
+
+	if (!chg->cp_disable_votable)
+		chg->cp_disable_votable = find_votable("CP_DISABLE");
+
+	if (chg->cp_disable_votable) {
+		vote(chg->cp_disable_votable, OVERHEAT_LIMIT_VOTER,
+							therm_overheat, 0);
+		vote(chg->usb_icl_votable, OVERHEAT_LIMIT_VOTER,
+							therm_overheat, icl_ua);
+	}
+
+	chg->thermal_overheat = !!therm_overheat;
+	return 0;
+}
+
 /**********************
  * INTERRUPT HANDLERS *
  **********************/
@@ -5866,12 +5912,16 @@ static void typec_src_removal(struct smb_charger *chg)
 	chg->voltage_min_uv = MICRO_5V;
 	chg->voltage_max_uv = MICRO_5V;
 	chg->usbin_forced_max_uv = 0;
+	chg->chg_param.forced_main_fcc = 0;
 
 	/* Reset all CC mode votes */
 	vote(chg->fcc_main_votable, MAIN_FCC_VOTER, false, 0);
 	chg->adapter_cc_mode = 0;
+	chg->thermal_overheat = 0;
 	vote_override(chg->fcc_votable, CC_MODE_VOTER, false, 0);
 	vote_override(chg->usb_icl_votable, CC_MODE_VOTER, false, 0);
+	vote(chg->cp_disable_votable, OVERHEAT_LIMIT_VOTER, false, 0);
+	vote(chg->usb_icl_votable, OVERHEAT_LIMIT_VOTER, false, 0);
 
 	/* write back the default FLOAT charger configuration */
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
@@ -6544,8 +6594,7 @@ irqreturn_t wdog_snarl_irq_handler(int irq, void *data)
 		schedule_delayed_work(&chg->thermal_regulation_work, 0);
 	}
 
-	if (chg->step_chg_enabled)
-		power_supply_changed(chg->batt_psy);
+	power_supply_changed(chg->batt_psy);
 
 	return IRQ_HANDLED;
 }
@@ -7540,6 +7589,7 @@ int smblib_init(struct smb_charger *chg)
 	mutex_init(&chg->smb_lock);
 	mutex_init(&chg->irq_status_lock);
 	mutex_init(&chg->dcin_aicl_lock);
+	mutex_init(&chg->dpdm_lock);
 	spin_lock_init(&chg->typec_pr_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->pl_update_work, pl_update_work);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index c77e875..dd2d7e5 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -77,6 +77,7 @@ enum print_reason {
 #define DCIN_AICL_VOTER			"DCIN_AICL_VOTER"
 #define WLS_PL_CHARGING_VOTER		"WLS_PL_CHARGING_VOTER"
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
+#define OVERHEAT_LIMIT_VOTER		"OVERHEAT_LIMIT_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
@@ -389,6 +390,7 @@ struct smb_charger {
 	struct mutex		dcin_aicl_lock;
 	spinlock_t		typec_pr_lock;
 	struct mutex		adc_lock;
+	struct mutex		dpdm_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -409,6 +411,7 @@ struct smb_charger {
 
 	/* CC Mode */
 	int	adapter_cc_mode;
+	int	thermal_overheat;
 
 	/* regulators */
 	struct smb_regulator	*vbus_vreg;
@@ -556,6 +559,7 @@ struct smb_charger {
 	bool			dcin_aicl_done;
 	bool			hvdcp3_standalone_config;
 	bool			dcin_icl_user_set;
+	bool			dpdm_enabled;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -745,6 +749,8 @@ int smblib_get_die_health(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_smb_health(struct smb_charger *chg);
 int smblib_get_prop_connector_health(struct smb_charger *chg);
+int smblib_set_prop_thermal_overheat(struct smb_charger *chg,
+			       int therm_overheat);
 int smblib_get_skin_temp_status(struct smb_charger *chg);
 int smblib_get_prop_vph_voltage_now(struct smb_charger *chg,
 				union power_supply_propval *val);
diff --git a/drivers/regulator/qcom_pm8008-regulator.c b/drivers/regulator/qcom_pm8008-regulator.c
index d7b74d5..ae7271f 100644
--- a/drivers/regulator/qcom_pm8008-regulator.c
+++ b/drivers/regulator/qcom_pm8008-regulator.c
@@ -30,10 +30,14 @@
 #define MISC_CHIP_ENABLE_REG		(MISC_BASE + 0x50)
 #define CHIP_ENABLE_BIT			BIT(0)
 
+#define MISC_SHUTDOWN_CTRL_REG		(MISC_BASE + 0x59)
+#define IGNORE_LDO_OCP_SHUTDOWN		BIT(3)
+
 #define LDO_ENABLE_REG(base)		(base + 0x46)
 #define ENABLE_BIT			BIT(7)
 
 #define LDO_STATUS1_REG(base)		(base + 0x08)
+#define VREG_OCP_BIT			BIT(5)
 #define VREG_READY_BIT			BIT(7)
 #define MODE_STATE_MASK			GENMASK(1, 0)
 #define MODE_STATE_NPM			3
@@ -50,6 +54,10 @@
 #define LDO_MODE_LPM			4
 #define FORCED_BYPASS			2
 
+#define LDO_OCP_CTL1_REG(base)		(base + 0x88)
+#define VREG_OCP_STATUS_CLR		BIT(1)
+#define LDO_OCP_BROADCAST_EN_BIT	BIT(2)
+
 #define LDO_STEPPER_CTL_REG(base)	(base + 0x3b)
 #define STEP_RATE_MASK			GENMASK(1, 0)
 
@@ -64,7 +72,7 @@ struct pm8008_chip {
 	struct regmap		*regmap;
 	struct regulator_dev	*rdev;
 	struct regulator_desc	rdesc;
-
+	int			ocp_irq;
 };
 
 struct regulator_data {
@@ -82,10 +90,12 @@ struct pm8008_regulator {
 	struct regulator	*parent_supply;
 	struct regulator	*en_supply;
 	struct device_node	*of_node;
+	struct notifier_block	nb;
 	u16			base;
 	int			hpm_min_load_ua;
 	int			min_dropout_uv;
 	int			step_rate;
+	bool			enable_ocp_broadcast;
 };
 
 static struct regulator_data reg_data[] = {
@@ -433,6 +443,60 @@ static struct regulator_ops pm8008_regulator_ops = {
 	.set_voltage_time	= pm8008_regulator_set_voltage_time,
 };
 
+static int pm8008_ldo_cb(struct notifier_block *nb, ulong event, void *data)
+{
+	struct pm8008_regulator *pm8008_reg = container_of(nb,
+						struct pm8008_regulator, nb);
+	u8 val;
+	int rc;
+
+	if (event != REGULATOR_EVENT_OVER_CURRENT)
+		return NOTIFY_OK;
+
+	rc = pm8008_read(pm8008_reg->regmap,
+			 LDO_STATUS1_REG(pm8008_reg->base), &val, 1);
+	if (rc < 0) {
+		pm8008_err(pm8008_reg,
+			"failed to read regulator status rc=%d\n", rc);
+		goto error;
+	}
+
+	if (!(val & VREG_OCP_BIT))
+		return NOTIFY_OK;
+
+	pr_err("OCP triggered on %s\n", pm8008_reg->rdesc.name);
+	/*
+	 * Toggle the OCP_STATUS_CLR bit to re-arm the OCP status for
+	 * the next OCP event
+	 */
+	rc = pm8008_masked_write(pm8008_reg->regmap,
+				 LDO_OCP_CTL1_REG(pm8008_reg->base),
+				 VREG_OCP_STATUS_CLR, VREG_OCP_STATUS_CLR);
+	if (rc < 0) {
+		pm8008_err(pm8008_reg, "failed to write OCP_STATUS_CLR rc=%d\n",
+			   rc);
+		goto error;
+	}
+
+	rc = pm8008_masked_write(pm8008_reg->regmap,
+				 LDO_OCP_CTL1_REG(pm8008_reg->base),
+				 VREG_OCP_STATUS_CLR, 0);
+	if (rc < 0) {
+		pm8008_err(pm8008_reg, "failed to write OCP_STATUS_CLR rc=%d\n",
+			   rc);
+		goto error;
+	}
+
+	/* Notify the consumers about the OCP event */
+	mutex_lock(&pm8008_reg->rdev->mutex);
+	regulator_notifier_call_chain(pm8008_reg->rdev,
+				REGULATOR_EVENT_OVER_CURRENT, NULL);
+	mutex_unlock(&pm8008_reg->rdev->mutex);
+
+error:
+	return NOTIFY_OK;
+}
+
 static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg,
 						const char *name)
 {
@@ -481,6 +545,17 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg,
 		}
 	}
 
+	if (pm8008_reg->enable_ocp_broadcast) {
+		rc = pm8008_masked_write(pm8008_reg->regmap,
+				LDO_OCP_CTL1_REG(pm8008_reg->base),
+				LDO_OCP_BROADCAST_EN_BIT,
+				LDO_OCP_BROADCAST_EN_BIT);
+		if (rc < 0) {
+			pr_err("%s: failed to configure ocp broadcast rc=%d\n",
+				name, rc);
+			return rc;
+		}
+	}
 
 	/* get slew rate */
 	rc = pm8008_read(pm8008_reg->regmap,
@@ -558,6 +633,17 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg,
 		return rc;
 	}
 
+	if (pm8008_reg->enable_ocp_broadcast) {
+		pm8008_reg->nb.notifier_call = pm8008_ldo_cb;
+		rc = devm_regulator_register_notifier(pm8008_reg->en_supply,
+						 &pm8008_reg->nb);
+		if (rc < 0) {
+			pr_err("Failed to register a regulator notifier rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	pr_debug("%s regulator registered\n", name);
 
 	return 0;
@@ -570,6 +656,9 @@ static int pm8008_parse_regulator(struct regmap *regmap, struct device *dev)
 	const char *name;
 	struct device_node *child;
 	struct pm8008_regulator *pm8008_reg;
+	bool ocp;
+
+	ocp = of_property_read_bool(dev->of_node, "qcom,enable-ocp-broadcast");
 
 	/* parse each subnode and register regulator for regulator child */
 	for_each_available_child_of_node(dev->of_node, child) {
@@ -580,6 +669,7 @@ static int pm8008_parse_regulator(struct regmap *regmap, struct device *dev)
 		pm8008_reg->regmap = regmap;
 		pm8008_reg->of_node = child;
 		pm8008_reg->dev = dev;
+		pm8008_reg->enable_ocp_broadcast = ocp;
 
 		rc = of_property_read_string(child, "regulator-name", &name);
 		if (rc)
@@ -694,6 +784,18 @@ static int pm8008_init_enable_regulator(struct pm8008_chip *chip)
 	return 0;
 }
 
+static irqreturn_t pm8008_ocp_irq(int irq, void *_chip)
+{
+	struct pm8008_chip *chip = _chip;
+
+	mutex_lock(&chip->rdev->mutex);
+	regulator_notifier_call_chain(chip->rdev, REGULATOR_EVENT_OVER_CURRENT,
+				      NULL);
+	mutex_unlock(&chip->rdev->mutex);
+
+	return IRQ_HANDLED;
+}
+
 static int pm8008_chip_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -717,6 +819,29 @@ static int pm8008_chip_probe(struct platform_device *pdev)
 		return rc;
 	}
 
+	chip->ocp_irq = of_irq_get_byname(chip->dev->of_node, "ocp");
+	if (chip->ocp_irq < 0) {
+		pr_debug("Failed to get pm8008-ocp-irq\n");
+	} else {
+		rc = devm_request_threaded_irq(chip->dev, chip->ocp_irq, NULL,
+				pm8008_ocp_irq, IRQF_ONESHOT,
+				"ocp", chip);
+		if (rc < 0) {
+			pr_err("Failed to request 'pm8008-ocp-irq' rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Ignore PMIC shutdown for LDO OCP event */
+		rc = pm8008_masked_write(chip->regmap, MISC_SHUTDOWN_CTRL_REG,
+			IGNORE_LDO_OCP_SHUTDOWN, IGNORE_LDO_OCP_SHUTDOWN);
+		if (rc < 0) {
+			pr_err("Failed to write MISC_SHUTDOWN register rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	pr_debug("PM8008 chip registered\n");
 	return 0;
 }
diff --git a/drivers/regulator/qpnp-amoled-regulator.c b/drivers/regulator/qpnp-amoled-regulator.c
index c6b3e4d..b311f3d 100644
--- a/drivers/regulator/qpnp-amoled-regulator.c
+++ b/drivers/regulator/qpnp-amoled-regulator.c
@@ -32,22 +32,13 @@
 #define OLEDB_PERIPH_TYPE		0x2C
 
 /* AB */
-#define AB_STATUS1(chip)		(chip->ab_base + 0x08)
-#define AB_LDO_SW_DBG_CTL(chip)		(chip->ab_base + 0x72)
 #define AB_LDO_PD_CTL(chip)		(chip->ab_base + 0x78)
 
-/* AB_STATUS1 */
-#define VREG_OK_BIT			BIT(6)
-#define VREG_OK_SHIFT			6
-
 /* AB_LDO_PD_CTL */
 #define PULLDN_EN_BIT			BIT(7)
 
 /* IBB */
 #define IBB_PD_CTL(chip)		(chip->ibb_base + 0x47)
-#define IBB_PS_CTL(chip)		(chip->ibb_base + 0x50)
-#define IBB_NLIMIT_DAC(chip)		(chip->ibb_base + 0x61)
-#define IBB_SMART_PS_CTL(chip)		(chip->ibb_base + 0x65)
 
 /* IBB_PD_CTL */
 #define ENABLE_PD_BIT			BIT(7)
@@ -102,7 +93,7 @@ enum reg_type {
 	IBB,
 };
 
-int qpnp_amoled_read(struct qpnp_amoled *chip,
+static int qpnp_amoled_read(struct qpnp_amoled *chip,
 			u16 addr, u8 *value, u8 count)
 {
 	int rc = 0;
@@ -494,7 +485,8 @@ static int qpnp_amoled_parse_dt(struct qpnp_amoled *chip)
 	struct device_node *temp, *node = chip->dev->of_node;
 	const __be32 *prop_addr;
 	int rc = 0;
-	u32 base, val;
+	u32 base;
+	u8 val;
 
 	for_each_available_child_of_node(node, temp) {
 		prop_addr = of_get_address(temp, 0, NULL, NULL);
@@ -504,7 +496,7 @@ static int qpnp_amoled_parse_dt(struct qpnp_amoled *chip)
 		}
 
 		base = be32_to_cpu(*prop_addr);
-		rc = regmap_read(chip->regmap, base + PERIPH_TYPE, &val);
+		rc = qpnp_amoled_read(chip, base + PERIPH_TYPE, &val, 1);
 		if (rc < 0) {
 			pr_err("Couldn't read PERIPH_TYPE for base %x\n", base);
 			return rc;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e731af5..f20c848 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2313,12 +2313,17 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
 	struct scsi_cmnd *scmd;
 	struct Scsi_Host *shost = dev->host;
 	struct request *rq;
+	const char *string;
 	unsigned long flags;
 	int error = 0, rtn, val;
 
 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 		return -EACCES;
 
+	string = shost->hostt->name;
+	if (!strcmp(string, "ufshcd"))
+		return -EACCES;
+
 	error = get_user(val, arg);
 	if (error)
 		return error;
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 1a3046f..48fd18c 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -180,7 +180,8 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
 		return;
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
-	if (!qcom_host->req_pending) {
+	if (!qcom_host->req_pending ||
+			ufshcd_is_shutdown_ongoing(qcom_host->hba)) {
 		qcom_host->work_pending = false;
 		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 		return;
@@ -228,7 +229,7 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
 	qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
 	if (!ice_workqueue) {
 		ice_workqueue = alloc_workqueue("ice-set-key",
-			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0);
 		if (!ice_workqueue) {
 			dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
 			__func__);
@@ -660,6 +661,28 @@ int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
 }
 
 /**
+ * ufs_qcom_is_ice_busy() - lets the caller of the function know if
+ * there is any ongoing operation in ICE in workqueue context.
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host should be a valid pointer.
+ *
+ * Return:	1 if ICE is busy, 0 if it is free.
+ *		-EINVAL in case of error.
+ */
+int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
+{
+	if (!qcom_host) {
+		pr_err("%s: invalid qcom_host\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcom_host->req_pending)
+		return 1;
+	else
+		return 0;
+}
+
+/**
  * ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
  * @qcom_host:	Pointer to a UFS QCom internal host structure.
  *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h
index 50a0a81..2b42459 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.h
+++ b/drivers/scsi/ufs/ufs-qcom-ice.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -84,6 +84,7 @@ int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
 int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
 int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
 void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host);
 #else
 inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
 {
@@ -127,6 +128,10 @@ inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
 inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
 {
 }
+static inline int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
 #endif /* CONFIG_SCSI_UFS_QCOM_ICE */
 
 #endif /* UFS_QCOM_ICE_H_ */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 68f7fc1..4a0cef3 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -244,8 +244,11 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
 	if (param_size > 8)
 		return -EINVAL;
 
+	pm_runtime_get_sync(hba->dev);
 	ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
 				param_offset, desc_buf, param_size);
+	pm_runtime_put_sync(hba->dev);
+
 	if (ret)
 		return -EINVAL;
 	switch (param_size) {
@@ -591,6 +594,7 @@ static ssize_t _name##_show(struct device *dev,				\
 	desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC);		\
 	if (!desc_buf)							\
 		return -ENOMEM;						\
+	pm_runtime_get_sync(hba->dev);					\
 	ret = ufshcd_query_descriptor_retry(hba,			\
 		UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE,	\
 		0, 0, desc_buf, &desc_len);				\
@@ -608,6 +612,7 @@ static ssize_t _name##_show(struct device *dev,				\
 	ret = snprintf(buf, PAGE_SIZE, "%s\n",				\
 		desc_buf + QUERY_DESC_HDR_SIZE);			\
 out:									\
+	pm_runtime_put_sync(hba->dev);					\
 	kfree(desc_buf);						\
 	return ret;							\
 }									\
@@ -638,9 +643,13 @@ static ssize_t _name##_show(struct device *dev,				\
 	struct device_attribute *attr, char *buf)			\
 {									\
 	bool flag;							\
+	int ret;							\
 	struct ufs_hba *hba = dev_get_drvdata(dev);			\
-	if (ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,		\
-		QUERY_FLAG_IDN##_uname, &flag))				\
+	pm_runtime_get_sync(hba->dev);					\
+	ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,	\
+		QUERY_FLAG_IDN##_uname, &flag);				\
+	pm_runtime_put_sync(hba->dev);					\
+	if (ret)							\
 		return -EINVAL;						\
 	return sprintf(buf, "%s\n", flag ? "true" : "false");		\
 }									\
@@ -678,8 +687,12 @@ static ssize_t _name##_show(struct device *dev,				\
 {									\
 	struct ufs_hba *hba = dev_get_drvdata(dev);			\
 	u32 value;							\
-	if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,		\
-		QUERY_ATTR_IDN##_uname, 0, 0, &value))			\
+	int ret;							\
+	pm_runtime_get_sync(hba->dev);					\
+	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,	\
+		QUERY_ATTR_IDN##_uname, 0, 0, &value);			\
+	pm_runtime_put_sync(hba->dev);					\
+	if (ret)							\
 		return -EINVAL;						\
 	return sprintf(buf, "0x%08X\n", value);				\
 }									\
@@ -800,10 +813,15 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct ufs_hba *hba = shost_priv(sdev->host);
 	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
+	int ret;
 
-	if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
-		QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value))
+	pm_runtime_get_sync(hba->dev);
+	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+		QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
+	pm_runtime_put_sync(hba->dev);
+	if (ret)
 		return -EINVAL;
+
 	return sprintf(buf, "0x%08X\n", value);
 }
 static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc642ff..14c3355 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -430,6 +430,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_WDC, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
@@ -1475,7 +1477,8 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (scale_up && clki->max_freq) {
-				if (clki->curr_freq == clki->max_freq)
+				if ((clki->curr_freq == clki->max_freq) ||
+				   (!strcmp(clki->name, "core_clk_ice_hw_ctl")))
 					continue;
 
 				ret = clk_set_rate(clki->clk, clki->max_freq);
@@ -1493,7 +1496,8 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
 				clki->curr_freq = clki->max_freq;
 
 			} else if (!scale_up && clki->min_freq) {
-				if (clki->curr_freq == clki->min_freq)
+				if ((clki->curr_freq == clki->min_freq) ||
+				   (!strcmp(clki->name, "core_clk_ice_hw_ctl")))
 					continue;
 
 				ret = clk_set_rate(clki->clk, clki->min_freq);
@@ -8114,6 +8118,8 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 	unsigned long flags;
 	int retries = MAX_HOST_RESET_RETRIES;
 
+	ufshcd_enable_irq(hba);
+
 	do {
 		err = ufshcd_detect_device(hba);
 	} while (err && --retries);
@@ -9822,7 +9828,8 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
 		goto out;
 
 	list_for_each_entry(clki, head, list) {
-		if (!clki->name)
+		if ((!clki->name) ||
+		   (!strcmp(clki->name, "core_clk_ice_hw_ctl")))
 			continue;
 
 		clki->clk = devm_clk_get(dev, clki->name);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 06615db..b5f7d7f 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -749,7 +749,7 @@
 
 config QTI_RPM_STATS_LOG
 	bool "Qualcomm Technologies RPM Stats Driver"
-	depends on QCOM_RPMH
+	depends on QCOM_RPMH || MSM_RPM_SMD
 	help
 	  This option enables a driver which reads RPM messages from a shared
 	  memory location. These messages provide statistical information about
@@ -819,6 +819,14 @@
 	  This is part of a security feature in QHEE and need to be enabled by
 	  default.
 
+config QCOM_CX_IPEAK
+	bool "Common driver to handle Cx iPeak limitation"
+	help
+	  Cx ipeak HW module is used to limit the current drawn by various
+	  subsystem blocks on Cx power rail. Each client needs to set their
+	  bit in tcsr register if it is going to cross its own threshold.
+	  If all clients are going to cross their thresholds then Cx ipeak
+	  hw module will raise an interrupt to cDSP block to throttle cDSP fmax.
 endmenu
 
 config QCOM_HYP_CORE_CTL
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 45ecf84..19d915e 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -96,3 +96,4 @@
 obj-$(CONFIG_ICNSS) += icnss.o
 obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
 obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
+obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
diff --git a/drivers/soc/qcom/cdsprm.c b/drivers/soc/qcom/cdsprm.c
index 31e0b27..3766a14 100644
--- a/drivers/soc/qcom/cdsprm.c
+++ b/drivers/soc/qcom/cdsprm.c
@@ -534,7 +534,7 @@ static void process_delayed_rm_request(struct work_struct *work)
 			(curr_timestamp < timestamp)) {
 		if ((timestamp - curr_timestamp) <
 		(gcdsprm.qos_max_ms * SYS_CLK_TICKS_PER_MS))
-			time_ms = (timestamp - curr_timestamp) /
+			time_ms = ((unsigned int)(timestamp - curr_timestamp)) /
 						SYS_CLK_TICKS_PER_MS;
 		else
 			break;
@@ -850,6 +850,9 @@ static int cdsp_get_cur_state(struct thermal_cooling_device *cdev,
 static int cdsp_set_cur_state(struct thermal_cooling_device *cdev,
 				unsigned long state)
 {
+	if (state > CDSP_THERMAL_MAX_STATE)
+		return -EINVAL;
+
 	if (gcdsprm.thermal_cdsp_level == state)
 		return 0;
 
@@ -883,6 +886,9 @@ static int hvx_get_cur_state(struct thermal_cooling_device *cdev,
 static int hvx_set_cur_state(struct thermal_cooling_device *cdev,
 				unsigned long state)
 {
+	if (state > HVX_THERMAL_MAX_STATE)
+		return -EINVAL;
+
 	if (gcdsprm.thermal_hvx_level == state)
 		return 0;
 
diff --git a/drivers/soc/qcom/cx_ipeak.c b/drivers/soc/qcom/cx_ipeak.c
new file mode 100644
index 0000000..543885b
--- /dev/null
+++ b/drivers/soc/qcom/cx_ipeak.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <soc/qcom/cx_ipeak.h>
+
+#define TCSR_CXIP_LM_VOTE_FEATURE_ENABLE_OFFSET		0x10
+
+/* v1 register set */
+#define TCSR_CXIP_LM_VOTE_BYPASS_OFFSET			0x4
+#define TCSR_CXIP_LM_VOTE_CLEAR_OFFSET			0x8
+#define TCSR_CXIP_LM_VOTE_SET_OFFSET			0xC
+#define TCSR_CXIP_LM_TRS_OFFSET				0x24
+
+/* v2 register set */
+#define TCSR_CXIP_LM_VOTE_CLIENTx_BYPASS_OFFSET		0x4
+#define TCSR_CXIP_LM_DANGER_OFFSET			0x24
+
+#define CXIP_CLIENT_OFFSET				0x1000
+#define CXIP_CLIENT10_OFFSET				0x3000
+
+#define CXIP_POLL_TIMEOUT_US (50 * 1000)
+
+struct cx_ipeak_client;
+
+struct cx_ipeak_core_ops {
+	int (*update)(struct cx_ipeak_client *client, bool vote);
+	struct cx_ipeak_client* (*register_client)(int client_id);
+};
+
+static struct cx_ipeak_device {
+	spinlock_t vote_lock;
+	void __iomem *tcsr_vptr;
+	struct cx_ipeak_core_ops *core_ops;
+} device_ipeak;
+
+struct cx_ipeak_client {
+	int vote_count;
+	unsigned int offset;
+	struct cx_ipeak_device *dev;
+};
+
+/**
+ * cx_ipeak_register() - allocate client structure and fill device private and
+ *			offset details.
+ * @dev_node: device node of the client
+ * @client_name: property name of the client
+ *
+ * Allocate client memory and fill the structure with device private and bit
+ *
+ */
+struct cx_ipeak_client *cx_ipeak_register(struct device_node *dev_node,
+		const char *client_name)
+{
+	struct of_phandle_args cx_spec;
+	struct cx_ipeak_client *client = NULL;
+	int ret;
+
+	ret = of_parse_phandle_with_fixed_args(dev_node, client_name,
+			1, 0, &cx_spec);
+	if (ret)
+		return ERR_PTR(-EINVAL);
+
+	if (!of_device_is_available(cx_spec.np))
+		return NULL;
+
+	if (device_ipeak.tcsr_vptr == NULL)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (cx_spec.args[0] > 31)
+		return ERR_PTR(-EINVAL);
+
+	if (device_ipeak.core_ops)
+		client =  device_ipeak.core_ops->register_client
+						(cx_spec.args[0]);
+	return client;
+}
+EXPORT_SYMBOL(cx_ipeak_register);
+
+static struct cx_ipeak_client *cx_ipeak_register_v1(int client_id)
+{
+	struct cx_ipeak_client *client;
+	unsigned int reg_enable, reg_bypass;
+	void __iomem *vptr = device_ipeak.tcsr_vptr;
+
+	reg_enable = readl_relaxed(device_ipeak.tcsr_vptr +
+			TCSR_CXIP_LM_VOTE_FEATURE_ENABLE_OFFSET);
+	reg_bypass = readl_relaxed(vptr +
+			TCSR_CXIP_LM_VOTE_BYPASS_OFFSET) &
+			BIT(client_id);
+	if (!reg_enable || reg_bypass)
+		return NULL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	client->offset = BIT(client_id);
+	client->dev = &device_ipeak;
+
+	return client;
+}
+
+static struct cx_ipeak_client *cx_ipeak_register_v2(int client_id)
+{
+	unsigned int reg_bypass, reg_enable;
+	struct cx_ipeak_client *client;
+	unsigned int client_offset = 0;
+	void __iomem *vptr = device_ipeak.tcsr_vptr;
+	int i;
+
+	for (i = 0; i <= client_id; i++)
+		client_offset += CXIP_CLIENT_OFFSET;
+
+	if (client_id >= 10)
+		client_offset += CXIP_CLIENT10_OFFSET;
+
+	reg_enable = readl_relaxed(device_ipeak.tcsr_vptr +
+			TCSR_CXIP_LM_VOTE_FEATURE_ENABLE_OFFSET);
+	reg_bypass = readl_relaxed(vptr + client_offset +
+			TCSR_CXIP_LM_VOTE_CLIENTx_BYPASS_OFFSET) &
+			BIT(0);
+
+	if (!reg_enable || reg_bypass)
+		return NULL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	client->offset = client_offset;
+	client->dev = &device_ipeak;
+
+	return client;
+}
+
+/*
+ * cx_ipeak_update() - Set/Clear client vote for Cx iPeak limit
+ * manager to throttle cDSP.
+ * @client: client handle.
+ * @vote: True to set the vote and False for reset.
+ *
+ * Receives vote from each client and decides whether to throttle cDSP or not.
+ * This function is NOP for the targets which does not support TCSR Cx iPeak.
+ */
+int cx_ipeak_update(struct cx_ipeak_client *client, bool vote)
+{
+	/* Check for client and device availability and proceed */
+	if (!client)
+		return 0;
+
+	if (!client->dev || !client->dev->core_ops || !client->dev->tcsr_vptr)
+		return -EINVAL;
+
+	return client->dev->core_ops->update(client, vote);
+}
+EXPORT_SYMBOL(cx_ipeak_update);
+
+static int cx_ipeak_update_v1(struct cx_ipeak_client *client, bool vote)
+{
+	unsigned int reg_val;
+	int ret = 0;
+
+	spin_lock(&client->dev->vote_lock);
+
+	if (vote) {
+		if (client->vote_count == 0) {
+			writel_relaxed(client->offset,
+				       client->dev->tcsr_vptr +
+				       TCSR_CXIP_LM_VOTE_SET_OFFSET);
+			/*
+			 * Do a dummy read to give enough time for TRS register
+			 * to become 1 when the last client votes.
+			 */
+			readl_relaxed(client->dev->tcsr_vptr +
+				      TCSR_CXIP_LM_TRS_OFFSET);
+
+			ret = readl_poll_timeout(client->dev->tcsr_vptr +
+						 TCSR_CXIP_LM_TRS_OFFSET,
+						 reg_val, !reg_val, 0,
+						 CXIP_POLL_TIMEOUT_US);
+			if (ret) {
+				writel_relaxed(client->offset,
+					       client->dev->tcsr_vptr +
+					       TCSR_CXIP_LM_VOTE_CLEAR_OFFSET);
+				goto done;
+			}
+		}
+		client->vote_count++;
+	} else {
+		if (client->vote_count > 0) {
+			client->vote_count--;
+			if (client->vote_count == 0) {
+				writel_relaxed(client->offset,
+					       client->dev->tcsr_vptr +
+					       TCSR_CXIP_LM_VOTE_CLEAR_OFFSET);
+			}
+		} else
+			ret = -EINVAL;
+	}
+
+done:
+	spin_unlock(&client->dev->vote_lock);
+	return ret;
+}
+
+static int cx_ipeak_update_v2(struct cx_ipeak_client *client, bool vote)
+{
+	unsigned int reg_val;
+	int ret = 0;
+
+	spin_lock(&client->dev->vote_lock);
+
+	if (vote) {
+		if (client->vote_count == 0) {
+			writel_relaxed(BIT(0),
+				       client->dev->tcsr_vptr +
+				       client->offset);
+
+			ret = readl_poll_timeout(client->dev->tcsr_vptr +
+						 TCSR_CXIP_LM_DANGER_OFFSET,
+						 reg_val, !reg_val, 0,
+						 CXIP_POLL_TIMEOUT_US);
+			if (ret) {
+				writel_relaxed(0,
+					       client->dev->tcsr_vptr +
+					       client->offset);
+				goto done;
+			}
+		}
+		client->vote_count++;
+	} else {
+		if (client->vote_count > 0) {
+			client->vote_count--;
+			if (client->vote_count == 0) {
+				writel_relaxed(0,
+					       client->dev->tcsr_vptr +
+					       client->offset);
+			}
+		} else {
+			ret = -EINVAL;
+		}
+	}
+
+done:
+	spin_unlock(&client->dev->vote_lock);
+	return ret;
+}
+
+/**
+ * cx_ipeak_unregister() - unregister client
+ * @client: client address to free
+ *
+ * Free the client memory
+ */
+void cx_ipeak_unregister(struct cx_ipeak_client *client)
+{
+	kfree(client);
+}
+EXPORT_SYMBOL(cx_ipeak_unregister);
+
+struct cx_ipeak_core_ops core_ops_v1 = {
+	.update = cx_ipeak_update_v1,
+	.register_client = cx_ipeak_register_v1,
+};
+
+struct cx_ipeak_core_ops core_ops_v2 = {
+	.update = cx_ipeak_update_v2,
+	.register_client = cx_ipeak_register_v2,
+};
+
+static int cx_ipeak_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	device_ipeak.tcsr_vptr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(device_ipeak.tcsr_vptr))
+		return PTR_ERR(device_ipeak.tcsr_vptr);
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,cx-ipeak-v1"))
+		device_ipeak.core_ops = &core_ops_v1;
+	else if (of_device_is_compatible(pdev->dev.of_node,
+					 "qcom,cx-ipeak-v2"))
+		device_ipeak.core_ops = &core_ops_v2;
+	else
+		device_ipeak.core_ops = NULL;
+
+	spin_lock_init(&device_ipeak.vote_lock);
+	return 0;
+}
+
+static const struct of_device_id cx_ipeak_match_table[] = {
+	{ .compatible = "qcom,cx-ipeak-v1"},
+	{ .compatible = "qcom,cx-ipeak-v2"},
+	{}
+};
+
+static struct platform_driver cx_ipeak_platform_driver = {
+	.probe = cx_ipeak_probe,
+	.driver = {
+		.name  = "cx_ipeak",
+		.of_match_table = cx_ipeak_match_table,
+		.suppress_bind_attrs = true,
+	}
+};
+
+static int __init cx_ipeak_init(void)
+{
+	return platform_driver_register(&cx_ipeak_platform_driver);
+}
+
+arch_initcall(cx_ipeak_init);
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index f9c54b6..1054532 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -916,18 +916,11 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
 	enable = bearer->grant_size ? true : false;
 
 	qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
-	trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
-			 bearer->grant_size,
-			 0, bearer->mq_idx, enable);
 
 	/* Do not flow disable tcp ack q in tcp bidir */
 	if (bearer->ack_mq_idx != INVALID_MQ &&
-	    (enable || !bearer->tcp_bidir)) {
+	    (enable || !bearer->tcp_bidir))
 		qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
-		trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
-				 bearer->grant_size,
-				 0, bearer->ack_mq_idx, enable);
-	}
 
 	if (!enable && bearer->ack_req)
 		dfc_send_ack(dev, bearer->bearer_id,
@@ -968,6 +961,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 	bool action = false;
 
 	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
+	if (!itm)
+		itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
+
 	if (itm) {
 		/* The RAT switch flag indicates the start and end of
 		 * the switch. Ignore indications in between.
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index a8e21ab..1204ebc 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -58,6 +58,8 @@
 #define UART_ID			0x90
 #define MAX_FIFO_SIZE		14
 
+#define EUD_TCSR_ENABLE_BIT	BIT(0)
+
 struct eud_chip {
 	struct device			*dev;
 	int				eud_irq;
@@ -508,6 +510,8 @@ static int msm_eud_probe(struct platform_device *pdev)
 	struct uart_port *port;
 	struct resource *res;
 	int ret;
+	bool eud_tcsr_check_state;
+	phys_addr_t eud_tcsr_check;
 
 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip) {
@@ -600,6 +604,32 @@ static int msm_eud_probe(struct platform_device *pdev)
 	port->irq = chip->eud_irq;
 	port->ops = &eud_uart_ops;
 
+	/*
+	 * Before enabling EUD, check for TCSR register
+	 * and if present, enable it.
+	 */
+	eud_tcsr_check_state = of_property_read_bool(
+		pdev->dev.of_node, "qcom,eud-tcsr-check-enable");
+
+	if (eud_tcsr_check_state) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					"eud_tcsr_check_reg");
+		if (res) {
+			eud_tcsr_check = res->start;
+			ret = scm_io_write(eud_tcsr_check,
+					EUD_TCSR_ENABLE_BIT);
+			if (ret) {
+				dev_err(&pdev->dev,
+				"TCSR scm_io_write failed with rc:%d\n", ret);
+				goto error;
+			}
+		} else {
+			dev_err(chip->dev,
+				"Failed to get resource for tcsr check!\n");
+			goto error;
+		}
+	}
+
 	ret = uart_add_one_port(&eud_uart_driver, port);
 	if (!ret) {
 		dev_err(chip->dev, "failed to add uart port!\n");
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 622e642..1d0e7a1 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -16,6 +16,7 @@
 
 #define GLINK_PROBE_LOG_PAGE_CNT 4
 static void *glink_ilc;
+static DEFINE_MUTEX(ssr_lock);
 
 #define GLINK_INFO(x, ...)						       \
 do {									       \
@@ -117,14 +118,15 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
 {
 	struct glink_ssr_nb *nb = container_of(this, struct glink_ssr_nb, nb);
 	struct glink_ssr *ssr = nb->ssr;
-	struct device *dev = ssr->dev;
+	struct device *dev;
 	struct do_cleanup_msg msg;
 	int ret;
 
-	if (!dev || !ssr->ept)
-		return NOTIFY_DONE;
-
 	kref_get(&ssr->refcount);
+	mutex_lock(&ssr_lock);
+	dev = ssr->dev;
+	if (!dev || !ssr->ept)
+		goto out;
 
 	if (code == SUBSYS_AFTER_SHUTDOWN) {
 		ssr->seq_num++;
@@ -144,14 +146,15 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
 		if (ret) {
 			GLINK_ERR(dev, "fail to send do cleanup to %s %d\n",
 				  nb->ssr_label, ret);
-			kref_put(&ssr->refcount, glink_ssr_release);
-			return NOTIFY_DONE;
+			goto out;
 		}
 
 		ret = wait_for_completion_timeout(&ssr->completion, HZ);
 		if (!ret)
 			GLINK_ERR(dev, "timeout waiting for cleanup resp\n");
 	}
+out:
+	mutex_unlock(&ssr_lock);
 	kref_put(&ssr->refcount, glink_ssr_release);
 	return NOTIFY_DONE;
 }
@@ -264,10 +267,12 @@ static void glink_ssr_remove(struct rpmsg_device *rpdev)
 {
 	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
 
+	mutex_lock(&ssr_lock);
 	ssr->dev = NULL;
 	ssr->ept = NULL;
-	dev_set_drvdata(&rpdev->dev, NULL);
+	mutex_unlock(&ssr_lock);
 
+	dev_set_drvdata(&rpdev->dev, NULL);
 	schedule_work(&ssr->unreg_work);
 }
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 88ce872..a889bb2 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -44,7 +44,6 @@
 #include <soc/qcom/service-notifier.h>
 #include <soc/qcom/socinfo.h>
 #include <soc/qcom/ramdump.h>
-#include <soc/qcom/scm.h>
 #include "icnss_private.h"
 #include "icnss_qmi.h"
 
@@ -95,7 +94,6 @@ static struct icnss_clk_info icnss_clk_info[] = {
 };
 
 #define ICNSS_CLK_INFO_SIZE		ARRAY_SIZE(icnss_clk_info)
-#define ICNSS_UTIL_GET_SEC_DUMP_STATE  0x10
 
 enum icnss_pdr_cause_index {
 	ICNSS_FW_CRASH,
@@ -1419,26 +1417,6 @@ static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
 	}
 }
 
-static bool icnss_is_mem_dump_allowed(void)
-{
-	struct scm_desc desc = {0};
-	int ret = 0;
-
-	desc.args[0] = 0;
-	desc.arginfo = 0;
-	ret = scm_call2(
-		SCM_SIP_FNID(SCM_SVC_UTIL, ICNSS_UTIL_GET_SEC_DUMP_STATE),
-		&desc);
-
-	if (ret) {
-		icnss_pr_err("SCM DUMP_STATE call failed\n");
-		return false;
-	}
-
-	icnss_pr_dbg("Dump State: %llu\n", desc.ret[0]);
-	return (desc.ret[0] == 1);
-}
-
 static int icnss_modem_notifier_nb(struct notifier_block *nb,
 				  unsigned long code,
 				  void *data)
@@ -1453,10 +1431,9 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	if (code == SUBSYS_AFTER_SHUTDOWN &&
 	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
-		if (icnss_is_mem_dump_allowed()) {
-			icnss_pr_info("Collecting msa0 segment dump\n");
-			icnss_msa0_ramdump(priv);
-		}
+		icnss_pr_info("Collecting msa0 segment dump\n");
+		icnss_msa0_ramdump(priv);
+
 		return NOTIFY_OK;
 	}
 
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index ab63799..1f5cc0b 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -81,13 +81,26 @@ static struct msm_memory_dump memdump;
 static int update_reg_dump_table(struct device *dev, u32 core_reg_num)
 {
 	int ret = 0;
-	u32 system_regs_input_index = SYSTEM_REGS_INPUT_INDEX +
-			core_reg_num * 2;
-	u32 regdump_output_byte_offset = (system_regs_input_index + 1)
-			* sizeof(uint32_t);
+	u32 system_regs_input_index;
+	u32 regdump_output_byte_offset;
 	struct reg_dump_data *p;
-	struct cpuss_dump_data *cpudata = dev_get_drvdata(dev);
+	struct cpuss_dump_data *cpudata;
 
+	if (core_reg_num * 2 < core_reg_num) {
+		ret = -EINVAL;
+		goto err1;
+	}
+	system_regs_input_index = SYSTEM_REGS_INPUT_INDEX +
+			core_reg_num * 2;
+	if (system_regs_input_index < SYSTEM_REGS_INPUT_INDEX ||
+			system_regs_input_index + 1 < system_regs_input_index) {
+		ret = -EINVAL;
+		goto err1;
+	}
+	regdump_output_byte_offset = (system_regs_input_index + 1)
+			* sizeof(uint32_t);
+
+	cpudata = dev_get_drvdata(dev);
 	mutex_lock(&cpudata->mutex);
 
 	if (regdump_output_byte_offset >= cpudata->size ||
@@ -115,6 +128,7 @@ static int update_reg_dump_table(struct device *dev, u32 core_reg_num)
 
 err:
 	mutex_unlock(&cpudata->mutex);
+err1:
 	return ret;
 }
 
@@ -478,7 +492,7 @@ static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id)
 {
 	struct msm_dump_table *table = memdump.table;
 	int i;
-
+	unsigned long offset;
 	if (!table) {
 		pr_err("mem dump base table does not exist\n");
 		return ERR_PTR(-EINVAL);
@@ -493,8 +507,9 @@ static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id)
 		return ERR_PTR(-EINVAL);
 	}
 
+	offset = table->entries[i].addr - memdump.table_phys;
 	/* Get the apps table pointer */
-	table = phys_to_virt(table->entries[i].addr);
+	table = (void *)memdump.table + offset;
 
 	return table;
 }
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index cf3ee01..a3e2070 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -120,7 +120,7 @@ void dump_stack_minidump(u64 sp)
 	if (is_idle_task(current))
 		return;
 
-	if (sp < KIMAGE_VADDR || sp > -256UL)
+	if (sp < MODULES_END || sp > -256UL)
 		sp = current_stack_pointer;
 
 	/*
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
index a764258..0c88112 100644
--- a/drivers/soc/qcom/msm_bus/Makefile
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -10,10 +10,10 @@
 	obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
 		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o msm_bus_proxy_client.o
 	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
+	obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg_rpmh.o
 else
 	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
-		msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
+		msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o msm_bus_qnoc_adhoc.o
 	obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+	obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
 endif
-
-obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
index 6fab4e7..866abfbd 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
@@ -60,6 +60,23 @@ struct msm_bus_fab_device_type {
 	bool bypass_qos_prg;
 };
 
+struct msm_bus_noc_limiter {
+	uint32_t bw;
+	uint32_t sat;
+};
+
+struct msm_bus_noc_regulator {
+	uint32_t low_prio;
+	uint32_t hi_prio;
+	uint32_t bw;
+	uint32_t sat;
+};
+
+struct msm_bus_noc_regulator_mode {
+	uint32_t read;
+	uint32_t write;
+};
+
 struct qos_params_type {
 	int mode;
 	unsigned int prio_lvl;
@@ -72,6 +89,12 @@ struct qos_params_type {
 	unsigned int gp;
 	unsigned int thmp;
 	unsigned int ws;
+	unsigned int prio_dflt;
+	struct msm_bus_noc_limiter limiter;
+	bool limiter_en;
+	struct msm_bus_noc_regulator reg;
+	struct msm_bus_noc_regulator_mode reg_mode;
+	bool urg_fwd_en;
 	u64 bw_buffer;
 };
 
@@ -151,6 +174,7 @@ extern struct msm_bus_device_node_registration
 extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
 extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
 extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_qnoc_set_ops(struct msm_bus_node_device_type *bus_dev);
 extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
 					struct bus_rule_type **static_rule);
 extern int msm_rules_update_path(struct list_head *input_list,
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
index 96d82be..6c72d93 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
@@ -172,46 +172,46 @@ enum bimc_m_bke_thresh_low {
 #define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
 	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
 enum bimc_m_bke_health_0 {
-	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000707,
 	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
 	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
-	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x700,
 	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
-	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x7,
 	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
 };
 
 #define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
 	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
 enum bimc_m_bke_health_1 {
-	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000707,
 	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
 	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
-	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x700,
 	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
-	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x7,
 	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
 };
 
 #define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
 	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
 enum bimc_m_bke_health_2 {
-	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000707,
 	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
 	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
-	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x700,
 	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
-	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x7,
 	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
 };
 
 #define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
 	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
 enum bimc_m_bke_health_3 {
-	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
-	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x707,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x700,
 	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
-	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x7,
 	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
 };
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
index 4084d86..e048c8b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_core.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
@@ -47,6 +47,7 @@ enum msm_bus_hw_sel {
 	MSM_BUS_RPM = 0,
 	MSM_BUS_NOC,
 	MSM_BUS_BIMC,
+	MSM_BUS_QNOC,
 };
 
 struct msm_bus_arb_ops {
@@ -329,6 +330,8 @@ int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
 int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
 						u64 ab, u64 ib);
 void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_add_bcm(struct msm_bus_node_device_type *cur_bcm);
+void msm_bus_dbg_remove_bcm(struct msm_bus_node_device_type *cur_bcm);
 
 #else
 static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
@@ -357,6 +360,17 @@ msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
 {
 	return 0;
 }
+
+static inline int
+msm_bus_dbg_add_bcm(struct msm_bus_node_device_type *cur_bcm)
+{
+	return 0;
+}
+
+static inline void
+msm_bus_dbg_remove_bcm(struct msm_bus_node_device_type *cur_bcm)
+{
+}
 #endif
 
 #ifdef CONFIG_CORESIGHT
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_rpmh.c
new file mode 100644
index 0000000..efe9d23
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_rpmh.c
@@ -0,0 +1,1070 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
+static struct msm_bus_dbg_state {
+	uint32_t cl;
+	uint8_t enable;
+	uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+	const struct msm_bus_scale_pdata *pdata;
+	const struct msm_bus_client_handle *handle;
+	int index;
+	uint32_t clid;
+	int size;
+	int vote_count;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+	const char *name;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+static struct msm_bus_cldata *dbg_cldata1;
+static struct msm_bus_cldata *dbg_cldata2;
+static struct msm_bus_fab_list *dbg_fablist;
+static char *dbg_buf;
+
+static LIST_HEAD(fabdata_list);
+static LIST_HEAD(cl_list);
+static LIST_HEAD(bcm_list);
+
+/**
+ * The following structures and functions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+	{
+		.num_paths = ARRAY_SIZE(init_vectors),
+		.vectors = init_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(current_vectors),
+		.vectors = current_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(requested_vectors),
+		.vectors = requested_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+	.usecase = shell_client_usecases,
+	.num_usecases = ARRAY_SIZE(shell_client_usecases),
+	.name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+	init_vectors[0].src = -1;
+	init_vectors[0].dst = -1;
+	init_vectors[0].ab = 0;
+	init_vectors[0].ib = 0;
+	current_vectors[0].src = -1;
+	current_vectors[0].dst = -1;
+	current_vectors[0].ab = 0;
+	current_vectors[0].ib = 0;
+	requested_vectors[0].src = -1;
+	requested_vectors[0].dst = -1;
+	requested_vectors[0].ab = 0;
+	requested_vectors[0].ib = 0;
+	clstate.enable = 0;
+	clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+	int ret = 0;
+
+	if (clstate.current_index < 2)
+		clstate.current_index = 2;
+	else {
+		clstate.current_index = 1;
+		current_vectors[0].ab = requested_vectors[0].ab;
+		current_vectors[0].ib = requested_vectors[0].ib;
+	}
+
+	if (clstate.enable) {
+		MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+			clstate.current_index);
+		ret = msm_bus_scale_client_update_request(clstate.cl,
+			clstate.current_index);
+	} else
+		MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+	return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+	MSM_BUS_DBG("Unregistering shell client\n");
+	msm_bus_scale_unregister_client(clstate.cl);
+	clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+	int ret = 0;
+
+	if (init_vectors[0].src != requested_vectors[0].src) {
+		MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+	if (init_vectors[0].dst != requested_vectors[0].dst) {
+		MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+
+	current_vectors[0].src = init_vectors[0].src;
+	requested_vectors[0].src = init_vectors[0].src;
+	current_vectors[0].dst = init_vectors[0].dst;
+	requested_vectors[0].dst = init_vectors[0].dst;
+
+	if (!clstate.enable) {
+		MSM_BUS_DBG("Enable bit not set, skipping registration: cl %d\n"
+			, clstate.cl);
+		return 0;
+	}
+
+	if (clstate.cl) {
+		MSM_BUS_DBG("Client  registered, skipping registration\n");
+		return clstate.cl;
+	}
+
+	MSM_BUS_DBG("Registering shell client\n");
+	ret = msm_bus_scale_register_client(&shell_client);
+	return ret;
+}
+
+static int msm_bus_dbg_mas_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].src;
+	MSM_BUS_DBG("Get master: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_mas_set(void  *data, u64 val)
+{
+	init_vectors[0].src = val;
+	MSM_BUS_DBG("Set master: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+	msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].dst;
+	MSM_BUS_DBG("Get slave: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_slv_set(void  *data, u64 val)
+{
+	init_vectors[0].dst = val;
+	MSM_BUS_DBG("Set slave: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+	msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ab;
+	MSM_BUS_DBG("Get ab: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ab_set(void  *data, u64 val)
+{
+	requested_vectors[0].ab = val;
+	MSM_BUS_DBG("Set ab: %llu\n", val);
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+	msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ib;
+	MSM_BUS_DBG("Get ib: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ib_set(void  *data, u64 val)
+{
+	requested_vectors[0].ib = val;
+	MSM_BUS_DBG("Set ib: %llu\n", val);
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+	msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void  *data, u64 *val)
+{
+	*val = clstate.enable;
+	MSM_BUS_DBG("Get enable: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_en_set(void  *data, u64 val)
+{
+	int ret = 0;
+
+	clstate.enable = val;
+	if (clstate.enable) {
+		if (!clstate.cl) {
+			MSM_BUS_DBG("client: %u\n", clstate.cl);
+			clstate.cl = msm_bus_dbg_register_client();
+			if (clstate.cl)
+				ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		} else {
+			MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+			ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		}
+	}
+
+	MSM_BUS_DBG("Set enable: %llu\n", val);
+	return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+	msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following functions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	int bsize = 0;
+	uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+	struct msm_bus_cldata *cldata = NULL;
+	const struct msm_bus_client_handle *handle = file->private_data;
+	int found = 0;
+	ssize_t ret;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if ((cldata->clid == cl) ||
+			(cldata->handle && (cldata->handle == handle))) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return 0;
+	}
+
+	bsize = cldata->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		cldata->buffer, bsize);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return ret;
+}
+
+static const struct file_operations client_data_fops = {
+	.open		= simple_open,
+	.read		= client_data_read,
+};
+
+static struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+	struct dentry *dent, uint32_t clid)
+{
+	if (dent == NULL) {
+		MSM_BUS_DBG("debugfs not ready yet\n");
+		return NULL;
+	}
+	return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+		&client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+	dbg_cldata1 = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!dbg_cldata1) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	dbg_cldata1->handle = pdata;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&dbg_cldata1->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+int msm_bus_dbg_add_bcm(struct msm_bus_node_device_type *cur_bcm)
+{
+	if (!cur_bcm) {
+		MSM_BUS_DBG("Failed to add BCM node\n");
+		return -ENOMEM;
+	}
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&cur_bcm->dbg_link, &bcm_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	struct msm_bus_cldata *cldata;
+	int i;
+	struct timespec ts;
+	bool found = false;
+	char *buf = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			return -EINVAL;
+		}
+		cldata->file = debugfs_create_file(pdata->name, 0444,
+				clients, (void *)pdata, &client_data_fops);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->mas);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->slv);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ab);
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+	return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+void msm_bus_dbg_remove_bcm(struct msm_bus_node_device_type *cur_bcm)
+{
+	if (!cur_bcm) {
+		MSM_BUS_DBG("Failed to remove BCM node\n");
+		return;
+	}
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_del_init(&cur_bcm->dbg_link);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid, struct dentry *file)
+{
+	dbg_cldata2 = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!dbg_cldata2) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	dbg_cldata2->pdata = pdata;
+	dbg_cldata2->index = index;
+	dbg_cldata2->clid = clid;
+	dbg_cldata2->file = file;
+	dbg_cldata2->size = 0;
+	dbg_cldata2->vote_count = 0;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&dbg_cldata2->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid)
+{
+	int i = 0, j;
+	char *buf = NULL;
+	struct msm_bus_cldata *cldata = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			return -EINVAL;
+		}
+		cldata->file = msm_bus_dbg_create(pdata->name, 0444,
+			clients, clid);
+	}
+
+	cldata->vote_count++;
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr   : %d\n", index);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].src);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].dst);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ab);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name,
+		pdata->usecase[index].vectors[j].src,
+		pdata->usecase[index].vectors[j].dst,
+		pdata->usecase[index].vectors[j].ab,
+		pdata->usecase[index].vectors[j].ib);
+
+	cldata->index = index;
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return i;
+}
+
+static ssize_t  msm_bus_dbg_update_request_write(struct file *file,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct msm_bus_cldata *cldata;
+	unsigned long index = 0;
+	int ret = 0;
+	char *chid;
+	int found = 0;
+	uint32_t clid;
+	ssize_t res = cnt;
+
+	dbg_buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+	if (!dbg_buf)
+		return -ENOMEM;
+
+	if (cnt == 0) {
+		res = 0;
+		goto out;
+	}
+	if (copy_from_user(dbg_buf, ubuf, cnt)) {
+		res = -EFAULT;
+		goto out;
+	}
+	dbg_buf[cnt] = '\0';
+	chid = dbg_buf;
+	MSM_BUS_DBG("buffer: %s\n size: %zu\n", dbg_buf, sizeof(ubuf));
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (strnstr(chid, cldata->pdata->name, cnt)) {
+			found = 1;
+			strsep(&chid, " ");
+			if (chid) {
+				ret = kstrtoul(chid, 10, &index);
+				if (ret) {
+					MSM_BUS_DBG("Index conversion\n"
+						" failed\n");
+					rt_mutex_unlock(
+						&msm_bus_dbg_cllist_lock);
+					res = -EFAULT;
+					goto out;
+				}
+			} else {
+				MSM_BUS_DBG("Error parsing input. Index not\n"
+					" found\n");
+				found = 0;
+			}
+			if (index > cldata->pdata->num_usecases) {
+				MSM_BUS_DBG("Invalid index!\n");
+				rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+				res = -EINVAL;
+				goto out;
+			}
+			clid = cldata->clid;
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (found)
+		msm_bus_scale_client_update_request(clid, index);
+
+out:
+	kfree(dbg_buf);
+	return res;
+}
+
+/**
+ * The following functions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+	int bsize = 0;
+	ssize_t ret;
+	const char *name = file->private_data;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, name) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+	bsize = fablist->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		fablist->buffer, bsize);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+	.open		= simple_open,
+	.read		= fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+
+	memset(rules_buf, 0, MAX_BUFF_SIZE);
+	print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+	ret = simple_read_from_buffer(buf, count, ppos,
+		rules_buf, MAX_BUFF_SIZE);
+	return ret;
+}
+
+static const struct file_operations rules_dbg_fops = {
+	.open		= simple_open,
+	.read		= rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+	int ret = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	dbg_fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+	if (!dbg_fablist) {
+		MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+		ret =  -ENOMEM;
+		goto err;
+	}
+
+	dbg_fablist->name = fabname;
+	dbg_fablist->size = 0;
+	list_add_tail(&dbg_fablist->list, &fabdata_list);
+err:
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			debugfs_remove(fablist->file);
+			list_del(&fablist->list);
+			kfree(fablist);
+			break;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+	void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+	int i;
+	char *buf = NULL;
+	struct msm_bus_fab_list *fablist = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+
+	if (fablist->file == NULL) {
+		MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -EFAULT;
+	}
+
+	if (fablist->size < MAX_BUFF_SIZE - 256)
+		i = fablist->size;
+	else {
+		i = 0;
+		fablist->size = 0;
+	}
+	buf = fablist->buffer;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+
+	msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+		nmasters, nslaves, ntslaves);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist->size = i;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+	.open = simple_open,
+	.write = msm_bus_dbg_update_request_write,
+};
+
+static ssize_t bcm_client_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	int i, j, cnt;
+	char msg[50];
+	struct device *dev = NULL;
+	struct link_node *lnode_list = NULL;
+	struct msm_bus_node_device_type *cur_node = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	u64 act_ab, act_ib, dual_ab, dual_ib;
+
+	cnt = scnprintf(msg, 64,
+		"\nDumping curent BCM client votes to trace log\n");
+	if (*ppos)
+		goto exit_dump_bcm_clients_read;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cur_bcm, &bcm_list, dbg_link) {
+		for (i = 0; i < cur_bcm->num_lnodes; i++) {
+			if (!cur_bcm->lnode_list[i].in_use)
+				continue;
+
+			dev = bus_find_device(&msm_bus_type, NULL,
+				(void *)&cur_bcm->lnode_list[i].bus_dev_id,
+				msm_bus_device_match_adhoc);
+			cur_node = to_msm_bus_node(dev);
+
+			for (j = 0; j < cur_node->num_lnodes; j++) {
+				if (!cur_node->lnode_list[j].in_use)
+					continue;
+
+				lnode_list = &cur_node->lnode_list[j];
+				act_ab = lnode_list->lnode_ab[ACTIVE_CTX];
+				act_ib = lnode_list->lnode_ib[ACTIVE_CTX];
+				dual_ab = lnode_list->lnode_ab[DUAL_CTX];
+				dual_ib = lnode_list->lnode_ib[DUAL_CTX];
+
+				if (!act_ab && !act_ib && !dual_ab && !dual_ib)
+					continue;
+
+				if (!act_ab && !act_ib) {
+					act_ab = dual_ab;
+					act_ib = dual_ib;
+				}
+
+				trace_bus_bcm_client_status(
+					cur_bcm->node_info->name,
+					cur_node->lnode_list[j].cl_name,
+					act_ab, act_ib, dual_ab, dual_ib);
+
+				MSM_BUS_ERR(
+					"bcm=%s client=%s act_ab=%llu act_ib=%llu slp_ab=%llu slp_ib=%llu\n",
+					cur_bcm->node_info->name,
+					cur_node->lnode_list[j].cl_name,
+					(unsigned long long)act_ab,
+					(unsigned long long)act_ib,
+					(unsigned long long)dual_ab,
+					(unsigned long long)dual_ib);
+			}
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+exit_dump_bcm_clients_read:
+	return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_bcm_clients_fops = {
+	.open = simple_open,
+	.read = bcm_client_read,
+};
+
+static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	int j, cnt;
+	char msg[50];
+	struct msm_bus_cldata *cldata = NULL;
+
+	cnt = scnprintf(msg, 50,
+		"\nDumping curent client votes to trace log\n");
+	if (*ppos)
+		goto exit_dump_clients_read;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (IS_ERR_OR_NULL(cldata->pdata))
+			continue;
+		for (j = 0; j < cldata->pdata->usecase->num_paths; j++) {
+			if (cldata->index == -1)
+				continue;
+			trace_bus_client_status(
+			cldata->pdata->name,
+			cldata->pdata->usecase[cldata->index].vectors[j].src,
+			cldata->pdata->usecase[cldata->index].vectors[j].dst,
+			cldata->pdata->usecase[cldata->index].vectors[j].ab,
+			cldata->pdata->usecase[cldata->index].vectors[j].ib,
+			cldata->pdata->active_only,
+			cldata->vote_count);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+exit_dump_clients_read:
+	return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_clients_fops = {
+	.open		= simple_open,
+	.read		= msm_bus_dbg_dump_clients_read,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t clid)
+{
+	struct dentry *file = NULL;
+
+	if (index == MSM_BUS_DBG_REGISTER) {
+		msm_bus_dbg_record_client(pdata, index, clid, file);
+		if (!pdata->name) {
+			MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+			return;
+		}
+	} else if (index == MSM_BUS_DBG_UNREGISTER) {
+		msm_bus_dbg_free_client(clid);
+		MSM_BUS_DBG("Client %d unregistered\n", clid);
+	} else
+		msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op)
+{
+	struct dentry *file = NULL;
+
+	if (op == MSM_BUS_DBG_REGISTER)
+		msm_bus_dbg_record_fabric(fabname, file);
+	else if (op == MSM_BUS_DBG_UNREGISTER)
+		msm_bus_dbg_free_fabric(fabname);
+	else
+		msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+			nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+	struct dentry *commit, *shell_client, *rules_dbg;
+	struct msm_bus_fab_list *fablist;
+	struct msm_bus_cldata *cldata = NULL;
+	uint64_t val = 0;
+
+	dir = debugfs_create_dir("msm-bus-dbg", NULL);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+		goto err;
+	}
+
+	clients = debugfs_create_dir("client-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	shell_client = debugfs_create_dir("shell-client", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	commit = debugfs_create_dir("commit-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create commit\n");
+		goto err;
+	}
+
+	rules_dbg = debugfs_create_dir("rules-dbg", dir);
+	if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+		MSM_BUS_ERR("Couldn't create rules-dbg\n");
+		goto err;
+	}
+
+	if (debugfs_create_file("print_rules", 0644,
+		rules_dbg, &val, &rules_dbg_fops) == NULL)
+		goto err;
+
+	if (debugfs_create_file("update_request", 0644,
+		shell_client, &val, &shell_client_en_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ib", 0644, shell_client, &val,
+		&shell_client_ib_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ab", 0644, shell_client, &val,
+		&shell_client_ab_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("slv", 0644, shell_client,
+		&val, &shell_client_slv_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("mas", 0644, shell_client,
+		&val, &shell_client_mas_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("update-request", 0644,
+		clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+		goto err;
+
+	rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+	if (!rules_buf) {
+		MSM_BUS_ERR("Failed to alloc rules_buf");
+		goto err;
+	}
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->pdata) {
+			if (cldata->pdata->name == NULL) {
+				MSM_BUS_DBG("Client name not found\n");
+				continue;
+			}
+			cldata->file = msm_bus_dbg_create(cldata->pdata->name,
+					0444, clients, cldata->clid);
+		} else if (cldata->handle) {
+			if (cldata->handle->name == NULL) {
+				MSM_BUS_DBG("Client doesn't have a name\n");
+				continue;
+			}
+			cldata->file = debugfs_create_file(cldata->handle->name,
+							0444, clients,
+							(void *)cldata->handle,
+							&client_data_fops);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (debugfs_create_file("dump_clients", 0644,
+		clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
+		goto err;
+
+	if (debugfs_create_file("dump_bcm_clients", 0644,
+		clients, NULL, &msm_bus_dbg_dump_bcm_clients_fops) == NULL)
+		goto err;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		fablist->file = debugfs_create_file(fablist->name, 0444,
+			commit, (void *)fablist->name, &fabric_data_fops);
+		if (fablist->file == NULL) {
+			MSM_BUS_DBG("Cannot create files for commit data\n");
+			kfree(rules_buf);
+			mutex_unlock(&msm_bus_dbg_fablist_lock);
+			goto err;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+	msm_bus_dbg_init_vectors();
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+	struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+	struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+	debugfs_remove_recursive(dir);
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+		list_del(&cldata->list);
+		kfree(cldata);
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+		list_del(&fablist->list);
+		kfree(fablist);
+	}
+	kfree(rules_buf);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
index 6d806da..e8da538 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2016, 2018, Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2018-2019, Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk.h>
@@ -470,6 +470,9 @@ static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
 	case MSM_BUS_BIMC:
 		msm_bus_bimc_set_ops(bus_dev);
 		break;
+	case MSM_BUS_QNOC:
+		msm_bus_qnoc_set_ops(bus_dev);
+		break;
 	default:
 		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
 	}
@@ -820,6 +823,9 @@ static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
 	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
 	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+	node_info->qos_params.prio_dflt = pdata_node_info->qos_params.prio_dflt;
+	node_info->qos_params.urg_fwd_en =
+				pdata_node_info->qos_params.urg_fwd_en;
 	node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
 	node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
 	node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index d0afd29..13472a3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -448,6 +448,7 @@ static int bcm_clist_add(struct msm_bus_node_device_type *cur_dev)
 		if (!cur_bcm->dirty) {
 			list_add_tail(&cur_bcm->link,
 					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
+			msm_bus_dbg_add_bcm(cur_bcm);
 			cur_bcm->dirty = true;
 		}
 		cur_bcm->updated = false;
@@ -517,6 +518,7 @@ static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
 			cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
 			!init_time) {
 			cur_bcm->dirty = false;
+			msm_bus_dbg_remove_bcm(cur_bcm);
 			list_del_init(&cur_bcm->link);
 		}
 	}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
index 520c9c4..b6a354e 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -183,6 +183,12 @@ static void get_qos_params(
 	of_property_read_u32(dev_node, "qcom,prio-wr",
 						&node_info->qos_params.prio_wr);
 
+	of_property_read_u32(dev_node, "qcom,prio",
+					&node_info->qos_params.prio_dflt);
+
+	node_info->qos_params.urg_fwd_en = of_property_read_bool(dev_node,
+						"qcom,forwarding");
+
 	of_property_read_u32(dev_node, "qcom,gp",
 						&node_info->qos_params.gp);
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_qnoc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_qnoc_adhoc.c
new file mode 100644
index 0000000..fb8760c
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_qnoc_adhoc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include "msm_bus_adhoc.h"
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+
+/* NOC_QOS generic */
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+#define NOC_QOS_MAINCTL_LOWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_mainctl_lown {
+	NOC_QOS_MCTL_DFLT_PRIOn_BMSK	= 0x00000070,
+	NOC_QOS_MCTL_DFLT_PRIOn_SHFT	= 0x4,
+	NOC_QOS_MCTL_URGFWD_ENn_BMSK	= 0x00000008,
+	NOC_QOS_MCTL_URGFWD_ENn_SHFT	= 0x3,
+	NOC_QOS_MCTL_LIMIT_ENn_BMSK	= 0x00000001,
+	NOC_QOS_MCTL_LIMIT_ENn_SHFT	= 0x0,
+};
+
+#define NOC_QOS_LIMITBWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x18 + (d) * (n))
+enum noc_qos_id_limitbwn {
+	NOC_QOS_LIMITBW_BWn_BMSK	= 0x000007FF,
+	NOC_QOS_LIMITBW_BWn_SHFT	= 0x0,
+	NOC_QOS_LIMITBW_SATn_BMSK	= 0x03FF0000,
+	NOC_QOS_LIMITBW_SATn_SHFT	= 0x11,
+};
+
+#define NOC_QOS_REGUL0CTLn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x40 + (d) * (n))
+enum noc_qos_id_regul0ctln {
+	NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK	= 0x00007000,
+	NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT	= 0x8,
+	NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK	= 0x00000700,
+	NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT	= 0xC,
+	NOC_QOS_REGUL0CTL_WRENn_BMSK	= 0x00000002,
+	NOC_QOS_REGUL0CTL_WRENn_SHFT	= 0x1,
+	NOC_QOS_REGUL0CTL_RDENn_BMSK	= 0x00000001,
+	NOC_QOS_REGUL0CTL_RDENn_SHFT	= 0x0,
+};
+
+#define NOC_QOS_REGUL0BWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x48 + (d) * (n))
+enum noc_qos_id_regul0bwbwn {
+	NOC_QOS_REGUL0BW_BWn_BMSK	= 0x000007FF,
+	NOC_QOS_REGUL0BW_BWn_SHFT	= 0x0,
+	NOC_QOS_REGUL0BW_SATn_BMSK	= 0x03FF0000,
+	NOC_QOS_REGUL0BW_SATn_SHFT	= 0x11,
+};
+
+static void noc_set_qos_dflt_prio(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		uint32_t prio)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = prio << NOC_QOS_MCTL_DFLT_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_DFLT_PRIOn_BMSK))) |
+		(val & NOC_QOS_MCTL_DFLT_PRIOn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_limiter *lim, uint32_t lim_en)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+
+	writel_relaxed((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure we disable limiter before config*/
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim->bw << NOC_QOS_LIMITBW_BWn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_BWn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim->sat << NOC_QOS_LIMITBW_SATn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_SATn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos limiter settings in place before possibly enabling */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim_en << NOC_QOS_MCTL_LIMIT_ENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))) |
+		(val & NOC_QOS_MCTL_LIMIT_ENn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos limiter writes take place before exiting*/
+	wmb();
+}
+
+static void noc_set_qos_regulator(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_regulator *reg,
+		struct msm_bus_noc_regulator_mode *reg_mode)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & (NOC_QOS_REGUL0CTL_WRENn_BMSK |
+						NOC_QOS_REGUL0CTL_RDENn_BMSK);
+
+	writel_relaxed((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK |
+						NOC_QOS_REGUL0CTL_RDENn_BMSK))),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos regulator is disabled before configuring */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK;
+	val = reg->hi_prio << NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK;
+	val = reg->low_prio << NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0BW_BWn_BMSK;
+	val = reg->bw << NOC_QOS_REGUL0BW_BWn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_BWn_BMSK))) |
+		(val & NOC_QOS_REGUL0BW_BWn_BMSK)),
+		NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0BW_SATn_BMSK;
+	val = reg->sat << NOC_QOS_REGUL0BW_SATn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_SATn_BMSK))) |
+		(val & NOC_QOS_REGUL0BW_SATn_BMSK)),
+		NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure regulator is configured before possibly enabling */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = reg_mode->write << NOC_QOS_REGUL0CTL_WRENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_WRENn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = reg_mode->read << NOC_QOS_REGUL0CTL_RDENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_RDENn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_RDENn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure regulator is ready before exiting */
+	wmb();
+}
+
+static void noc_set_qos_forwarding(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		bool urg_fwd_en)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = (urg_fwd_en ? 1:0) << NOC_QOS_MCTL_URGFWD_ENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_URGFWD_ENn_BMSK))) |
+		(val & NOC_QOS_MCTL_URGFWD_ENn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct qos_params_type *qos_params;
+	int ret = 0;
+	int i;
+
+	qos_params = &info->node_info->qos_params;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		noc_set_qos_dflt_prio(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					qos_params->prio_dflt);
+
+		noc_set_qos_limiter(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&qos_params->limiter,
+					qos_params->limiter_en);
+
+		noc_set_qos_regulator(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&qos_params->reg,
+					&qos_params->reg_mode);
+
+		noc_set_qos_forwarding(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					qos_params->urg_fwd_en);
+	}
+err_qos_init:
+	return ret;
+}
+
+int msm_bus_qnoc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+
+	return 0;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 0c1e1cd..351902b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
@@ -207,6 +207,7 @@ struct msm_bus_node_device_type {
 	struct nodevector node_vec[NUM_CTX];
 	struct list_head link;
 	struct list_head query_link;
+	struct list_head dbg_link;
 	struct nodeclk clk[NUM_CTX];
 	struct nodeclk bus_qos_clk;
 	uint32_t num_node_qos_clks;
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 5218136..420a094 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -208,7 +208,6 @@ static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata,
 static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata)
 {
 	if (drvdata->mode == MHI_TRANSFER_TYPE_USB) {
-		flush_workqueue(drvdata->mhi_wq);
 		qdss_destroy_buf_tbl(drvdata);
 		qdss_destroy_read_done_list(drvdata);
 	} else if (drvdata->mode == MHI_TRANSFER_TYPE_UCI) {
@@ -256,6 +255,7 @@ static ssize_t mode_store(struct device *dev,
 				spin_unlock_bh(&drvdata->lock);
 				usb_qdss_close(drvdata->usb_ch);
 				mhi_unprepare_from_transfer(drvdata->mhi_dev);
+				flush_workqueue(drvdata->mhi_wq);
 				mhi_ch_close(drvdata);
 				drvdata->mode = MHI_TRANSFER_TYPE_UCI;
 			} else if (drvdata->opened == DISABLE) {
@@ -275,6 +275,7 @@ static ssize_t mode_store(struct device *dev,
 				spin_unlock_bh(&drvdata->lock);
 				wake_up(&drvdata->uci_wq);
 				mhi_unprepare_from_transfer(drvdata->mhi_dev);
+				flush_workqueue(drvdata->mhi_wq);
 				mhi_ch_close(drvdata);
 				drvdata->mode = MHI_TRANSFER_TYPE_USB;
 				queue_work(drvdata->mhi_wq,
@@ -586,6 +587,7 @@ static int mhi_uci_release(struct inode *inode, struct file *file)
 			spin_unlock_bh(&drvdata->lock);
 			wake_up(&drvdata->uci_wq);
 			mhi_unprepare_from_transfer(drvdata->mhi_dev);
+			flush_workqueue(drvdata->mhi_wq);
 			mhi_ch_close(drvdata);
 		} else if (drvdata->opened == SSR) {
 			spin_unlock_bh(&drvdata->lock);
@@ -825,6 +827,7 @@ static void qdss_mhi_remove(struct mhi_device *mhi_dev)
 				msleep(20);
 			} while (qdss_check_entry(drvdata));
 		}
+		flush_workqueue(drvdata->mhi_wq);
 		mhi_ch_close(drvdata);
 	} else
 		spin_unlock_bh(&drvdata->lock);
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index a391dae..ca65736 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -311,7 +311,6 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
 
 	memset(txn, 0, sizeof(*txn));
 
-	mutex_init(&txn->lock);
 	init_completion(&txn->completion);
 	txn->qmi = qmi;
 	txn->ei = ei;
@@ -347,17 +346,12 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
 
 	ret = wait_for_completion_timeout(&txn->completion, timeout);
 
-	mutex_lock(&txn->lock);
 	if (txn->result == -ENETRESET) {
-		mutex_unlock(&txn->lock);
 		return txn->result;
 	}
-	mutex_unlock(&txn->lock);
 
 	mutex_lock(&qmi->txn_lock);
-	mutex_lock(&txn->lock);
 	idr_remove(&qmi->txns, txn->id);
-	mutex_unlock(&txn->lock);
 	mutex_unlock(&qmi->txn_lock);
 
 	if (ret == 0)
@@ -376,9 +370,7 @@ void qmi_txn_cancel(struct qmi_txn *txn)
 	struct qmi_handle *qmi = txn->qmi;
 
 	mutex_lock(&qmi->txn_lock);
-	mutex_lock(&txn->lock);
 	idr_remove(&qmi->txns, txn->id);
-	mutex_unlock(&txn->lock);
 	mutex_unlock(&qmi->txn_lock);
 }
 EXPORT_SYMBOL(qmi_txn_cancel);
@@ -448,6 +440,7 @@ static void qmi_handle_net_reset(struct qmi_handle *qmi)
 	struct sockaddr_qrtr sq;
 	struct qmi_service *svc;
 	struct socket *sock;
+	long timeo = qmi->sock->sk->sk_sndtimeo;
 
 	sock = qmi_sock_create(qmi, &sq);
 	if (IS_ERR(sock))
@@ -462,11 +455,13 @@ static void qmi_handle_net_reset(struct qmi_handle *qmi)
 	/* Already qmi_handle_release() started */
 	if (!qmi->sock) {
 		sock_release(sock);
+		mutex_unlock(&qmi->sock_lock);
 		return;
 	}
 	sock_release(qmi->sock);
 	qmi->sock = sock;
 	qmi->sq = sq;
+	qmi->sock->sk->sk_sndtimeo = timeo;
 	mutex_unlock(&qmi->sock_lock);
 
 	list_for_each_entry(svc, &qmi->lookups, list_node)
@@ -505,10 +500,6 @@ static void qmi_handle_message(struct qmi_handle *qmi,
 			mutex_unlock(&qmi->txn_lock);
 			return;
 		}
-
-		mutex_lock(&txn->lock);
-		mutex_unlock(&qmi->txn_lock);
-
 		if (txn->dest && txn->ei) {
 			ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
 			if (ret < 0)
@@ -516,11 +507,10 @@ static void qmi_handle_message(struct qmi_handle *qmi,
 
 			txn->result = ret;
 			complete(&txn->completion);
-		} else  {
+		} else {
 			qmi_invoke_handler(qmi, sq, txn, buf, len);
 		}
-
-		mutex_unlock(&txn->lock);
+		mutex_unlock(&qmi->txn_lock);
 	} else {
 		/* Create a txn based on the txn_id of the incoming message */
 		memset(&tmp_txn, 0, sizeof(tmp_txn));
@@ -621,6 +611,21 @@ static struct socket *qmi_sock_create(struct qmi_handle *qmi,
 }
 
 /**
+ * qmi_set_sndtimeo() - set the sk_sndtimeo of the qmi handle
+ * @qmi:	QMI client handle
+ * @timeo:	timeout in jiffies.
+ *
+ * This sets the timeout for the blocking socket send in qmi send.
+ */
+void qmi_set_sndtimeo(struct qmi_handle *qmi, long timeo)
+{
+	mutex_lock(&qmi->sock_lock);
+	qmi->sock->sk->sk_sndtimeo = timeo;
+	mutex_unlock(&qmi->sock_lock);
+}
+EXPORT_SYMBOL(qmi_set_sndtimeo);
+
+/**
  * qmi_handle_init() - initialize a QMI client handle
  * @qmi:	QMI handle to initialize
  * @recv_buf_size: maximum size of incoming message
@@ -718,11 +723,9 @@ void qmi_handle_release(struct qmi_handle *qmi)
 
 	mutex_lock(&qmi->txn_lock);
 	idr_for_each_entry(&qmi->txns, txn, txn_id) {
-		mutex_lock(&txn->lock);
 		idr_remove(&qmi->txns, txn->id);
 		txn->result = -ENETRESET;
 		complete(&txn->completion);
-		mutex_unlock(&txn->lock);
 	}
 	mutex_unlock(&qmi->txn_lock);
 	idr_destroy(&qmi->txns);
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index e993241..de319e3 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -48,6 +48,7 @@ unsigned int rmnet_wq_frequency __read_mostly = 1000;
 
 #ifdef CONFIG_QCOM_QMI_DFC
 static unsigned int qmi_rmnet_scale_factor = 5;
+static LIST_HEAD(qos_cleanup_list);
 #endif
 
 static int
@@ -129,8 +130,7 @@ qmi_rmnet_has_pending(struct qmi_info *qmi)
 
 #ifdef CONFIG_QCOM_QMI_DFC
 static void
-qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
-			  struct qos_info *qos)
+qmi_rmnet_clean_flow_list(struct qos_info *qos)
 {
 	struct rmnet_bearer_map *bearer, *br_tmp;
 	struct rmnet_flow_map *itm, *fl_tmp;
@@ -205,6 +205,8 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
 	else
 		netif_tx_stop_queue(q);
 
+	trace_dfc_qmi_tc(dev->name, mq_idx, enable);
+
 	return 0;
 }
 
@@ -238,7 +240,7 @@ static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
 
 		bearer->bearer_id = bearer_id;
 		bearer->flow_ref = 1;
-		bearer->grant_size = DEFAULT_GRANT;
+		bearer->grant_size = DEFAULT_CALL_GRANT;
 		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
 		bearer->mq_idx = INVALID_MQ;
 		bearer->ack_mq_idx = INVALID_MQ;
@@ -266,15 +268,11 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
 			if (reset) {
 				qmi_rmnet_reset_txq(dev, i);
 				qmi_rmnet_flow_control(dev, i, 1);
-				trace_dfc_qmi_tc(dev->name,
-					bearer->bearer_id, 0, 0, i, 1);
 
 				if (dfc_mode == DFC_MODE_SA) {
 					j = i + ACK_MQ_OFFSET;
 					qmi_rmnet_reset_txq(dev, j);
 					qmi_rmnet_flow_control(dev, j, 1);
-					trace_dfc_qmi_tc(dev->name,
-						bearer->bearer_id, 0, 0, j, 1);
 				}
 			}
 		}
@@ -312,18 +310,10 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
 
 		qmi_rmnet_flow_control(dev, itm->mq_idx,
 				       bearer->grant_size > 0 ? 1 : 0);
-		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
-				 bearer->grant_size, 0, itm->mq_idx,
-				 bearer->grant_size > 0 ? 1 : 0);
 
-		if (dfc_mode == DFC_MODE_SA) {
+		if (dfc_mode == DFC_MODE_SA)
 			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
 					bearer->grant_size > 0 ? 1 : 0);
-			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
-					bearer->grant_size, 0,
-					bearer->ack_mq_idx,
-					bearer->grant_size > 0 ? 1 : 0);
-		}
 	}
 }
 
@@ -476,11 +466,19 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
 	}
 }
 
-#else
-static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
+struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
+						    u8 bearer_id)
 {
+	struct rmnet_bearer_map *bearer;
+
+	bearer = __qmi_rmnet_bearer_get(qos_info, bearer_id);
+	if (bearer)
+		bearer->flow_ref--;
+
+	return bearer;
 }
 
+#else
 static inline void
 qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
 			  struct rmnet_flow_map *new_map)
@@ -780,7 +778,8 @@ static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
 		if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
 		    (ip_hdr(skb)->ihl == 5) &&
 		    (len == 40 || len == 52) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+		    ((tcp_flag_word(tcp_hdr(skb)) &
+		      cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
 			return true;
 		break;
 
@@ -788,7 +787,8 @@ static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
 	case htons(ETH_P_IPV6):
 		if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
 		    (len == 60 || len == 72) &&
-		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+		    ((tcp_flag_word(tcp_hdr(skb)) &
+		      cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
 			return true;
 		break;
 	}
@@ -896,19 +896,27 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
 }
 EXPORT_SYMBOL(qmi_rmnet_qos_init);
 
-void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+void qmi_rmnet_qos_exit_pre(void *qos)
 {
-	void *port = rmnet_get_rmnet_port(dev);
-	struct qmi_info *qmi = rmnet_get_qmi_pt(port);
-	struct qos_info *qos_info = (struct qos_info *)qos;
-
-	if (!qmi || !qos)
+	if (!qos)
 		return;
 
-	qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
-	kfree(qos);
+	list_add(&((struct qos_info *)qos)->list, &qos_cleanup_list);
 }
-EXPORT_SYMBOL(qmi_rmnet_qos_exit);
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
+
+void qmi_rmnet_qos_exit_post(void)
+{
+	struct qos_info *qos, *tmp;
+
+	synchronize_rcu();
+	list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
+		list_del(&qos->list);
+		qmi_rmnet_clean_flow_list(qos);
+		kfree(qos);
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
 #endif
 
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index d94b5ea..15af985 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -13,6 +13,7 @@
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
 #define DEFAULT_GRANT 1
+#define DEFAULT_CALL_GRANT 20480
 #define DFC_MAX_BEARERS_V01 16
 #define DEFAULT_MQ_NUM 0
 #define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
@@ -63,6 +64,7 @@ struct mq_map {
 };
 
 struct qos_info {
+	struct list_head list;
 	u8 mux_id;
 	struct net_device *real_dev;
 	struct list_head flow_head;
@@ -136,6 +138,9 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
 void dfc_qmap_client_exit(void *dfc_data);
 
 void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
+
+struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
+						    u8 bearer_id);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index be34be0..82691f7 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -66,7 +66,7 @@ struct cache_req {
 struct batch_cache_req {
 	struct list_head list;
 	int count;
-	struct rpmh_request rpm_msgs[];
+	struct rpmh_request *rpm_msgs;
 };
 
 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
@@ -175,20 +175,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
 existing:
 	switch (state) {
 	case RPMH_ACTIVE_ONLY_STATE:
-		if (req->sleep_val != UINT_MAX)
+		if (req->sleep_val != UINT_MAX) {
 			req->wake_val = cmd->data;
+			ctrlr->dirty = true;
+		}
 		break;
 	case RPMH_WAKE_ONLY_STATE:
-		req->wake_val = cmd->data;
+		if (req->wake_val != cmd->data) {
+			req->wake_val = cmd->data;
+			ctrlr->dirty = true;
+		}
 		break;
 	case RPMH_SLEEP_STATE:
-		req->sleep_val = cmd->data;
+		if (req->sleep_val != cmd->data) {
+			req->sleep_val = cmd->data;
+			ctrlr->dirty = true;
+		}
 		break;
 	default:
 		break;
 	}
 
-	ctrlr->dirty = true;
 unlock:
 	spin_unlock(&ctrlr->cache_lock);
 
@@ -373,8 +380,10 @@ static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
 	struct batch_cache_req *req, *tmp;
 
 	spin_lock(&ctrlr->cache_lock);
-	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) {
+		list_del(&req->list);
 		kfree(req);
+	}
 	INIT_LIST_HEAD(&ctrlr->batch_cache);
 	spin_unlock(&ctrlr->cache_lock);
 }
@@ -430,10 +439,11 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
 		return -ENOMEM;
 
 	req = ptr;
+	rpm_msgs = ptr + sizeof(*req);
 	compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
 
 	req->count = count;
-	rpm_msgs = req->rpm_msgs;
+	req->rpm_msgs = rpm_msgs;
 
 	for (i = 0; i < count; i++) {
 		__fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 2662285..299cae9 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -425,6 +425,8 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
 		return "VMID_CP_SPSS_HLOS_SHARED";
 	case VMID_INVAL:
 		return "VMID_INVAL";
+	case VMID_NAV:
+		return "VMID_NAV";
 	default:
 		return "Unknown VMID";
 	}
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index b3cb9bc..8201946 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -1081,6 +1081,19 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
 		if (desc.ret[0] != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED)
 			break;
 
+		/*
+		 * At this point we are convinced it is an inbnd req but it is
+		 * possible that it is a resp to inbnd req that has failed and
+		 * returned an err. Ideally scm_call should have returned err
+		 * but err comes in  ret[1]. So check that out otherwise it
+		 * could cause infinite loop.
+		 */
+		if (req->result &&
+			desc.ret[0] == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
+			ret = req->result;
+			break;
+		}
+
 		dmac_inv_range(out_buf, out_buf + out_buf_len);
 
 		if (desc.ret[0] == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index f330ef3..c0da352 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -54,6 +54,7 @@ enum {
 	HW_PLATFORM_STP = 23,
 	HW_PLATFORM_SBC = 24,
 	HW_PLATFORM_HDK = 31,
+	HW_PLATFORM_IDP = 34,
 	HW_PLATFORM_INVALID
 };
 
@@ -75,6 +76,7 @@ const char *hw_platform[] = {
 	[HW_PLATFORM_STP] = "STP",
 	[HW_PLATFORM_SBC] = "SBC",
 	[HW_PLATFORM_HDK] = "HDK",
+	[HW_PLATFORM_IDP] = "IDP"
 };
 
 enum {
@@ -322,10 +324,17 @@ static struct msm_soc_info cpu_of_id[] = {
 
 	/* Bengal ID */
 	[417] = {MSM_CPU_BENGAL, "BENGAL"},
+	[444] = {MSM_CPU_BENGAL, "BENGAL"},
 
 	/* Lagoon ID */
 	[434] = {MSM_CPU_LAGOON, "LAGOON"},
 
+	/* Bengalp ID */
+	[445] = {MSM_CPU_BENGALP, "BENGALP"},
+
+	/* Scuba ID */
+	[441] = {MSM_CPU_SCUBA, "SCUBA"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1197,10 +1206,18 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 417;
 		strlcpy(dummy_socinfo.build_id, "bengal - ",
 		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_bengalp()) {
+		dummy_socinfo.id = 445;
+		strlcpy(dummy_socinfo.build_id, "bengalp - ",
+		sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_lagoon()) {
 		dummy_socinfo.id = 434;
 		strlcpy(dummy_socinfo.build_id, "lagoon - ",
 		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_scuba()) {
+		dummy_socinfo.id = 441;
+		strlcpy(dummy_socinfo.build_id, "scuba - ",
+		sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdmshrike()) {
 		dummy_socinfo.id = 340;
 		strlcpy(dummy_socinfo.build_id, "sdmshrike - ",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 62e58f9..62a224d 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -234,6 +234,7 @@ struct spcom_device {
 
 	int32_t nvm_ion_fd;
 	struct mutex ioctl_lock;
+	struct mutex create_channel_lock;
 };
 
 /* Device Driver State */
@@ -1241,6 +1242,13 @@ static int spcom_handle_write(struct spcom_channel *ch,
 		return -EINVAL;
 	}
 
+	if (cmd_id == SPCOM_CMD_SEND || cmd_id == SPCOM_CMD_SEND_MODIFIED) {
+		if (!spcom_is_channel_connected(ch)) {
+			pr_err("ch [%s] remote side not connected\n", ch->name);
+			return -ENOTCONN;
+		}
+	}
+
 	switch (cmd_id) {
 	case SPCOM_CMD_SEND:
 		if (ch->is_sharable) {
@@ -1698,12 +1706,6 @@ static ssize_t spcom_device_write(struct file *filp,
 			return -EINVAL;
 		}
 		pr_debug("control device - no channel context\n");
-	} else {
-		/* Check if remote side connect */
-		if (!spcom_is_channel_connected(ch)) {
-			pr_err("ch [%s] remote side not connect\n", ch->name);
-			return -ENOTCONN;
-		}
 	}
 	buf_size = size; /* explicit casting size_t to int */
 	buf = kzalloc(size, GFP_KERNEL);
@@ -1963,22 +1965,26 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 	struct cdev *cdev;
 
 	pr_debug("Add channel [%s]\n", name);
+	mutex_lock(&spcom_dev->create_channel_lock);
 
 	ch = spcom_find_channel_by_name(name);
 	if (ch) {
 		pr_err("channel [%s] already exist\n", name);
+		mutex_unlock(&spcom_dev->create_channel_lock);
 		return -EBUSY;
 	}
 
 	ch = spcom_find_channel_by_name(""); /* find reserved channel */
 	if (!ch) {
 		pr_err("no free channel\n");
+		mutex_unlock(&spcom_dev->create_channel_lock);
 		return -ENODEV;
 	}
 
 	ret = spcom_init_channel(ch, is_sharable, name);
 	if (ret < 0) {
 		pr_err("can't init channel %d\n", ret);
+		mutex_unlock(&spcom_dev->create_channel_lock);
 		return ret;
 	}
 
@@ -2017,6 +2023,7 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 	ch->cdev = cdev;
 	ch->dev = dev;
 	mutex_unlock(&ch->lock);
+	mutex_unlock(&spcom_dev->create_channel_lock);
 
 	return 0;
 
@@ -2033,6 +2040,7 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
 	mutex_lock(&ch->lock);
 	memset(ch->name, 0, SPCOM_CHANNEL_NAME_SIZE);
 	mutex_unlock(&ch->lock);
+	mutex_unlock(&spcom_dev->create_channel_lock);
 	return -EFAULT;
 }
 
@@ -2487,6 +2495,7 @@ static int spcom_probe(struct platform_device *pdev)
 	spin_lock_init(&spcom_dev->rx_lock);
 	spcom_dev->nvm_ion_fd = -1;
 	mutex_init(&spcom_dev->ioctl_lock);
+	mutex_init(&spcom_dev->create_channel_lock);
 
 	ret = spcom_register_chardev();
 	if (ret) {
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index 5a4c1ab..ee1b949 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -37,7 +37,7 @@
 #define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH	40
 #define QMI_SSCTL_RESP_MSG_LENGTH		7
 #define QMI_SSCTL_EMPTY_MSG_LENGTH		0
-#define QMI_SSCTL_MAX_MSG_LENGTH		90
+#define QMI_SSCTL_MAX_MSG_LENGTH		101
 
 #define SSCTL_SERVICE_ID			0x2B
 #define SSCTL_VER_2				2
diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c
index 89cb1f7..10f6672 100644
--- a/drivers/soc/qcom/wda_qmi.c
+++ b/drivers/soc/qcom/wda_qmi.c
@@ -250,7 +250,8 @@ static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
 	return ret;
 }
 
-static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
+					int dl_marker)
 {
 	struct wda_qmi_data *data = container_of(wda_handle,
 						 struct wda_qmi_data, handle);
@@ -280,7 +281,8 @@ static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
 	req->ep_id.ep_type = data->svc.ep_type;
 	req->ep_id.iface_id = data->svc.iface_id;
 	req->req_data_cfg_valid = 1;
-	req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
+	req->req_data_cfg = dl_marker ? WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 :
+					WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01;
 	ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
 			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
 			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
@@ -312,11 +314,22 @@ static void wda_svc_config(struct work_struct *work)
 	struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
 						 svc_arrive);
 	struct qmi_info *qmi;
-	int rc;
+	int rc, dl_marker = 0;
+
+	while (!rtnl_trylock()) {
+		if (!data->restart_state)
+			cond_resched();
+		else
+			return;
+	}
+
+	dl_marker = rmnet_get_dlmarker_info(data->rmnet_port);
+	rtnl_unlock();
 
 	if (data->restart_state == 1)
 		return;
-	rc = wda_set_powersave_config_req(&data->handle);
+
+	rc = wda_set_powersave_config_req(&data->handle, dl_marker);
 	if (rc < 0) {
 		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
 		return;
@@ -330,6 +343,7 @@ static void wda_svc_config(struct work_struct *work)
 		else
 			return;
 	}
+
 	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
 	if (!qmi) {
 		rtnl_unlock();
@@ -344,7 +358,8 @@ static void wda_svc_config(struct work_struct *work)
 
 	rtnl_unlock();
 
-	pr_info("Connection established with the WDA Service\n");
+	pr_info("Connection established with the WDA Service, DL Marker %s\n",
+		dl_marker ? "enabled" : "disabled");
 }
 
 static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 0cce762..0d7bbb0 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -444,7 +444,7 @@ struct ion_page_pool {
 	gfp_t gfp_mask;
 	unsigned int order;
 	struct plist_node list;
-	struct ion_heap heap;
+	struct device *dev;
 };
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 63e9218..4d29320 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -81,7 +81,7 @@ void ion_page_pool_refill(struct ion_page_pool *pool)
 {
 	struct page *page;
 	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
-	struct device *dev = pool->heap.priv;
+	struct device *dev = pool->dev;
 
 	/* skip refilling order 0 pools */
 	if (!pool->order)
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 22ad52b..d029608 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -79,7 +79,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 	page = ion_page_pool_alloc(pool, from_pool);
 
 	if (pool_auto_refill_en &&
-	    pool_count_below_lowmark(pool)) {
+	    pool_count_below_lowmark(pool) && vmid <= 0) {
 		wake_up_process(heap->kworker[cached]);
 	}
 
@@ -650,7 +650,7 @@ static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,
 		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
 		if (!pool)
 			goto err_create_pool;
-		pool->heap = sys_heap->heap;
+		pool->dev = sys_heap->heap.priv;
 		pools[i] = pool;
 	}
 	return 0;
@@ -723,6 +723,7 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 	heap->heap.ops = &system_heap_ops;
 	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+	heap->heap.priv = data->priv;
 
 	for (i = 0; i < VMID_LAST; i++)
 		if (is_secure_vmid_valid(i))
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index f0415de..c409961 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/thermal.h>
 #include "tsens.h"
+#include "thermal_core.h"
 #include "qcom/qti_virtual_sensor.h"
 
 LIST_HEAD(tsens_device_list);
@@ -199,6 +200,8 @@ static int get_device_tree_data(struct platform_device *pdev,
 	else
 		tmdev->min_temp_sensor_id = MIN_TEMP_DEF_OFFSET;
 
+	tmdev->tsens_reinit_wa =
+		of_property_read_bool(of_node, "tsens-reinit-wa");
 	return rc;
 }
 
@@ -253,6 +256,38 @@ static int tsens_tm_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static void tsens_therm_fwk_notify(struct work_struct *work)
+{
+	int i, rc, temp;
+	struct tsens_device *tmdev =
+		container_of(work, struct tsens_device, therm_fwk_notify);
+
+	TSENS_DBG(tmdev, "Controller %pK\n", &tmdev->phys_addr_tm);
+	for (i = 0; i < TSENS_MAX_SENSORS; i++) {
+		if (tmdev->ops->sensor_en(tmdev, i)) {
+			rc = tsens_get_temp(&tmdev->sensor[i], &temp);
+			if (rc) {
+				pr_err("%s: Error:%d reading temp sensor:%d\n",
+					__func__, rc, i);
+				continue;
+			}
+			TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n",
+					i);
+			of_thermal_handle_trip_temp(tmdev->sensor[i].tzd, temp);
+		}
+	}
+	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
+		rc = tsens_get_temp(&tmdev->min_temp, &temp);
+		if (rc) {
+			pr_err("%s: Error:%d reading temp sensor:%d\n",
+				   __func__, rc, i);
+			return;
+		}
+		TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n", i);
+		of_thermal_handle_trip_temp(tmdev->min_temp.tzd, temp);
+	}
+}
+
 int tsens_tm_probe(struct platform_device *pdev)
 {
 	struct tsens_device *tmdev = NULL;
@@ -282,6 +317,16 @@ int tsens_tm_probe(struct platform_device *pdev)
 		return rc;
 	}
 
+	snprintf(tsens_name, sizeof(tsens_name), "tsens_wq_%pa",
+		&tmdev->phys_addr_tm);
+
+	tmdev->tsens_reinit_work = alloc_workqueue(tsens_name,
+		WQ_HIGHPRI, 0);
+	if (!tmdev->tsens_reinit_work) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	INIT_WORK(&tmdev->therm_fwk_notify, tsens_therm_fwk_notify);
 	rc = tsens_thermal_zone_register(tmdev);
 	if (rc) {
 		pr_err("Error registering the thermal zone\n");
diff --git a/drivers/thermal/qcom/bcl_soc.c b/drivers/thermal/qcom/bcl_soc.c
index d46fbd1..a375f43 100644
--- a/drivers/thermal/qcom/bcl_soc.c
+++ b/drivers/thermal/qcom/bcl_soc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
@@ -81,6 +81,9 @@ static void bcl_evaluate_soc(struct work_struct *work)
 {
 	int battery_percentage;
 
+	if (!bcl_perph->tz_dev)
+		return;
+
 	if (bcl_read_soc(NULL, &battery_percentage))
 		return;
 
diff --git a/drivers/thermal/qcom/cpu_isolate.c b/drivers/thermal/qcom/cpu_isolate.c
index 132141c..1d14aa9 100644
--- a/drivers/thermal/qcom/cpu_isolate.c
+++ b/drivers/thermal/qcom/cpu_isolate.c
@@ -170,7 +170,7 @@ static int cpu_isolate_set_cur_state(struct thermal_cooling_device *cdev,
 
 	/* Request state should be less than max_level */
 	if (state > CPU_ISOLATE_LEVEL)
-		state = CPU_ISOLATE_LEVEL;
+		return -EINVAL;
 
 	state = !!state;
 	/* Check if the old cooling action is same as new cooling action */
diff --git a/drivers/thermal/qcom/cx_ipeak_cdev.c b/drivers/thermal/qcom/cx_ipeak_cdev.c
index cfc45aa..ae86c56 100644
--- a/drivers/thermal/qcom/cx_ipeak_cdev.c
+++ b/drivers/thermal/qcom/cx_ipeak_cdev.c
@@ -88,7 +88,7 @@ static int cxip_lm_set_cur_state(struct thermal_cooling_device *cdev,
 	int ret = 0;
 
 	if (state > CXIP_LM_CDEV_MAX_STATE)
-		state = CXIP_LM_CDEV_MAX_STATE;
+		return -EINVAL;
 
 	if (cxip_dev->state == state)
 		return 0;
diff --git a/drivers/thermal/qcom/lmh_cpu_vdd_cdev.c b/drivers/thermal/qcom/lmh_cpu_vdd_cdev.c
index 20dfeb5..045a19e 100644
--- a/drivers/thermal/qcom/lmh_cpu_vdd_cdev.c
+++ b/drivers/thermal/qcom/lmh_cpu_vdd_cdev.c
@@ -36,7 +36,7 @@ static int lmh_cpu_vdd_set_cur_state(struct thermal_cooling_device *cdev,
 	struct lmh_cpu_vdd_cdev *vdd_cdev = cdev->devdata;
 
 	if (state > LMH_CPU_VDD_MAX_LVL)
-		state = LMH_CPU_VDD_MAX_LVL;
+		return -EINVAL;
 
 	state = !!state;
 	/* Check if the old cooling action is same as new cooling action */
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
index ff1705a..5cd8aef 100644
--- a/drivers/thermal/qcom/qmi_cooling.c
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -130,6 +130,14 @@ static struct qmi_dev_info device_clients[] = {
 		.type = QMI_CDEV_MAX_LIMIT_TYPE,
 	},
 	{
+		.dev_name = "cdsp_sw",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "cdsp_hw",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
 		.dev_name = "cpuv_restriction_cold",
 		.type = QMI_CDEV_MIN_LIMIT_TYPE,
 	},
@@ -266,7 +274,7 @@ static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
 		return 0;
 
 	if (state > qmi_cdev->max_level)
-		state = qmi_cdev->max_level;
+		return -EINVAL;
 
 	return qmi_set_cur_or_min_state(qmi_cdev, state);
 }
@@ -283,7 +291,7 @@ static int qmi_set_min_state(struct thermal_cooling_device *cdev,
 		return 0;
 
 	if (state > qmi_cdev->max_level)
-		state = qmi_cdev->max_level;
+		return -EINVAL;
 
 	/* Convert state into QMI client expects for min state */
 	state = qmi_cdev->max_level - state;
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 09ed2f0..e11797e 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/thermal.h>
@@ -151,7 +151,28 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = {
 				"cpuss-1-usr"},
 		.logic = VIRT_MAXIMUM,
 	},
-
+	{
+		.virt_zone_name = "hepta-cpu-max-step",
+		.num_sensors = 7,
+		.sensor_names = {"cpu-1-0-usr",
+				"cpu-1-1-usr",
+				"cpu-1-2-usr",
+				"cpu-1-3-usr",
+				"cpuss-0-usr",
+				"cpuss-1-usr",
+				"cpuss-2-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+	{
+		.virt_zone_name = "gpu-skin-avg-step",
+		.num_sensors = 2,
+		.sensor_names = {"skin-msm-therm-usr",
+				"gpuss-0-usr"},
+		.logic = VIRT_WEIGHTED_AVG,
+		.coefficient_ct = 2,
+		.coefficients = {30, 70},
+		.avg_denominator = 100,
+	},
 };
 
 int qti_virtual_sensor_register(struct device *dev)
diff --git a/drivers/thermal/qcom/regulator_aop_cdev.c b/drivers/thermal/qcom/regulator_aop_cdev.c
index 63a7c6e..032cc93 100644
--- a/drivers/thermal/qcom/regulator_aop_cdev.c
+++ b/drivers/thermal/qcom/regulator_aop_cdev.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -92,7 +92,7 @@ static int reg_send_min_state(struct thermal_cooling_device *cdev,
 	int ret = 0;
 
 	if (state > REG_CDEV_MAX_STATE)
-		state = REG_CDEV_MAX_STATE;
+		return -EINVAL;
 
 	if (reg_dev->min_state == state)
 		return ret;
@@ -120,6 +120,9 @@ static int reg_get_cur_state(struct thermal_cooling_device *cdev,
 static int reg_send_cur_state(struct thermal_cooling_device *cdev,
 				unsigned long state)
 {
+	if (state > REG_CDEV_MAX_STATE)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/drivers/thermal/qcom/regulator_cdev.c b/drivers/thermal/qcom/regulator_cdev.c
index 8634fd4..b042181 100644
--- a/drivers/thermal/qcom/regulator_cdev.c
+++ b/drivers/thermal/qcom/regulator_cdev.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -47,7 +47,7 @@ static int reg_set_min_state(struct thermal_cooling_device *cdev,
 	int ret = 0;
 
 	if (state > reg_dev->lvl_ct)
-		state = reg_dev->lvl_ct;
+		return -EINVAL;
 
 	if (reg_dev->min_reg_state == state)
 		return ret;
@@ -92,6 +92,11 @@ static int reg_get_cur_state(struct thermal_cooling_device *cdev,
 static int reg_set_cur_state(struct thermal_cooling_device *cdev,
 				 unsigned long state)
 {
+	struct reg_cooling_device *reg_dev = cdev->devdata;
+
+	if (state > reg_dev->lvl_ct)
+		return -EINVAL;
+
 	/* regulator cooling device doesn't support voltage ceil */
 	return 0;
 }
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index bf8768a..036ee11 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -211,7 +211,10 @@ struct tsens_device {
 	int				trdy_fail_ctr;
 	struct tsens_sensor		min_temp;
 	u8				min_temp_sensor_id;
-	struct tsens_sensor		sensor[0];
+	struct workqueue_struct		*tsens_reinit_work;
+	struct work_struct		therm_fwk_notify;
+	bool				tsens_reinit_wa;
+	struct tsens_sensor             sensor[0];
 };
 
 extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx,
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 7d040ae..ef31fcf 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -11,6 +11,7 @@
 #include <linux/vmalloc.h>
 #include "tsens.h"
 #include "thermal_core.h"
+#include <soc/qcom/scm.h>
 
 #define TSENS_DRIVER_NAME			"msm-tsens"
 
@@ -66,6 +67,9 @@
 #define TSENS_TM_0C_THRESHOLDS(n)		((n) + 0x1c)
 #define TSENS_MAX_READ_FAIL			50
 
+#define TSENS_INIT_ID	0x5
+#define TSENS_RECOVERY_LOOP_COUNT 5
+
 static void msm_tsens_convert_temp(int last_temp, int *temp)
 {
 	int code_mask = ~TSENS_TM_CODE_BIT_MASK;
@@ -80,10 +84,10 @@ static void msm_tsens_convert_temp(int last_temp, int *temp)
 
 static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 {
-	struct tsens_device *tmdev = NULL;
-	unsigned int code;
+	struct tsens_device *tmdev = NULL, *tmdev_itr;
+	unsigned int code, ret, tsens_ret;
 	void __iomem *sensor_addr, *trdy;
-	int last_temp = 0, last_temp2 = 0, last_temp3 = 0;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
 
 	if (!sensor)
 		return -EINVAL;
@@ -93,22 +97,66 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 	trdy = TSENS_TM_TRDY(tmdev->tsens_tm_addr);
 
 	code = readl_relaxed_no_log(trdy);
+
 	if (!((code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) >>
 			TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT)) {
-		pr_err("tsens device first round not complete0x%x, ctr is %d\n",
-			code, tmdev->trdy_fail_ctr);
-		tmdev->trdy_fail_ctr++;
+		pr_err("%s: tsens device first round not complete0x%x\n",
+			__func__, code);
+		/* Wait for 2.5 ms for tsens controller to recover */
+		do {
+			udelay(500);
+			code = readl_relaxed_no_log(trdy);
+			if (code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) {
+				TSENS_DUMP(tmdev, "%s",
+					"tsens controller recovered\n");
+				goto sensor_read;
+			}
+		} while (++count < TSENS_RECOVERY_LOOP_COUNT);
 
-		if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
+		/*
+		 * TSENS controller did not recover,
+		 * proceed with SCM call to re-init it
+		 */
+		if (tmdev->tsens_reinit_wa) {
+			struct scm_desc desc = { 0 };
+
 			if (tmdev->ops->dbg)
 				tmdev->ops->dbg(tmdev, 0,
 					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
+			/* Make an scm call to re-init TSENS */
+			TSENS_DBG(tmdev, "%s",
+				   "Calling TZ to re-init TSENS\n");
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TSENS,
+							TSENS_INIT_ID), &desc);
+			TSENS_DBG(tmdev, "%s",
+				   "return from scm call\n");
+			if (ret) {
+				pr_err("%s: scm call failed %d\n",
+					__func__, ret);
+				BUG();
+			}
+			tsens_ret = desc.ret[0];
+			if (tsens_ret) {
+				pr_err("%s: scm call failed to init tsens %d\n",
+					__func__, tsens_ret);
+				BUG();
+			}
+			/* Notify thermal fwk */
+			list_for_each_entry(tmdev_itr,
+						&tsens_device_list, list) {
+				queue_work(tmdev_itr->tsens_reinit_work,
+					&tmdev_itr->therm_fwk_notify);
+			}
+
+		} else {
+			pr_err("%s: tsens controller got reset\n", __func__);
 			BUG();
 		}
-
-		return -ENODATA;
+		return -EAGAIN;
 	}
 
+sensor_read:
+
 	tmdev->trdy_fail_ctr = 0;
 
 	code = readl_relaxed_no_log(sensor_addr +
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index ce40ff0..8e27de9 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -109,7 +109,7 @@
 #define UART_CONSOLE_CORE2X_VOTE (960)
 
 #define WAKEBYTE_TIMEOUT_MSEC	(2000)
-#define WAIT_XFER_MAX_ITER	(50)
+#define WAIT_XFER_MAX_ITER	(2)
 #define WAIT_XFER_MAX_TIMEOUT_US	(10000)
 #define WAIT_XFER_MIN_TIMEOUT_US	(9000)
 #define IPC_LOG_PWR_PAGES	(6)
@@ -197,6 +197,8 @@ static void msm_geni_serial_stop_rx(struct uart_port *uport);
 static int msm_geni_serial_runtime_resume(struct device *dev);
 static int msm_geni_serial_runtime_suspend(struct device *dev);
 static int msm_geni_serial_get_ver_info(struct uart_port *uport);
+static void msm_geni_serial_set_manual_flow(bool enable,
+				struct msm_geni_serial_port *port);
 static int uart_line_id;
 
 #define GET_DEV_PORT(uport) \
@@ -297,6 +299,12 @@ static void wait_for_transfers_inflight(struct uart_port *uport)
 {
 	int iter = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	unsigned int geni_status;
+
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	/* Possible stop rx is called before this. */
+	if (!(geni_status & S_GENI_CMD_ACTIVE))
+		return;
 
 	while (iter < WAIT_XFER_MAX_ITER) {
 		if (check_transfers_inflight(uport)) {
@@ -623,6 +631,7 @@ static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport)
 static void msm_geni_serial_abort_rx(struct uart_port *uport)
 {
 	unsigned int irq_clear = S_CMD_DONE_EN;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	geni_abort_s_cmd(uport->membase);
 	/* Ensure this goes through before polling. */
@@ -631,6 +640,8 @@ static void msm_geni_serial_abort_rx(struct uart_port *uport)
 	msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
 					S_GENI_CMD_ABORT, false);
 	geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	/* FORCE_DEFAULT makes RFR default high, hence set manually Low */
+	msm_geni_serial_set_manual_flow(true, port);
 	geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG);
 }
 
diff --git a/drivers/uio/msm_sharedmem/msm_sharedmem.c b/drivers/uio/msm_sharedmem/msm_sharedmem.c
index cc15ed0..725786a 100644
--- a/drivers/uio/msm_sharedmem/msm_sharedmem.c
+++ b/drivers/uio/msm_sharedmem/msm_sharedmem.c
@@ -66,21 +66,33 @@ static int sharedmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
 }
 
 /* Setup the shared ram permissions.
- * This function currently supports the mpss client only.
+ * This function currently supports the mpss and nav clients only.
  */
-static void setup_shared_ram_perms(u32 client_id, phys_addr_t addr, u32 size)
+static void setup_shared_ram_perms(u32 client_id, phys_addr_t addr, u32 size,
+				   bool vm_nav_path)
 {
 	int ret;
 	u32 source_vmlist[1] = {VMID_HLOS};
-	int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
-	int dest_perms[2] = {PERM_READ|PERM_WRITE,
-			     PERM_READ|PERM_WRITE};
 
 	if (client_id != MPSS_RMTS_CLIENT_ID)
 		return;
 
-	ret = hyp_assign_phys(addr, size, source_vmlist, 1, dest_vmids,
-				dest_perms, 2);
+	if (vm_nav_path) {
+		int dest_vmids[3] = {VMID_HLOS, VMID_MSS_MSA, VMID_NAV};
+		int dest_perms[3] = {PERM_READ|PERM_WRITE,
+				     PERM_READ|PERM_WRITE,
+					PERM_READ|PERM_WRITE};
+
+		ret = hyp_assign_phys(addr, size, source_vmlist, 1, dest_vmids,
+					dest_perms, 3);
+	} else {
+		int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+		int dest_perms[2] = {PERM_READ|PERM_WRITE,
+				     PERM_READ|PERM_WRITE};
+
+		ret = hyp_assign_phys(addr, size, source_vmlist, 1, dest_vmids,
+					dest_perms, 2);
+	}
 	if (ret != 0) {
 		if (ret == -EINVAL)
 			pr_warn("hyp_assign_phys is not supported!\n");
@@ -102,6 +114,7 @@ static int msm_sharedmem_probe(struct platform_device *pdev)
 	phys_addr_t shared_mem_pyhsical = 0;
 	bool is_addr_dynamic = false;
 	bool guard_memory = false;
+	bool vm_nav_path = false;
 
 	/* Get the addresses from platform-data */
 	if (!pdev->dev.of_node) {
@@ -162,8 +175,16 @@ static int msm_sharedmem_probe(struct platform_device *pdev)
 			shared_mem_pyhsical += SZ_4K;
 	}
 
+	/*
+	 * If this dtsi property is set, then the shared memory region
+	 * will be given access to vm-nav-path also.
+	 */
+	vm_nav_path = of_property_read_bool(pdev->dev.of_node,
+			"qcom,vm-nav-path");
+
 	/* Set up the permissions for the shared ram that was allocated. */
-	setup_shared_ram_perms(client_id, shared_mem_pyhsical, shared_mem_size);
+	setup_shared_ram_perms(client_id, shared_mem_pyhsical, shared_mem_size,
+				vm_nav_path);
 
 	/* Setup device */
 	info->mmap = sharedmem_mmap; /* Custom mmap function. */
diff --git a/drivers/usb/dwc3/debug_ipc.c b/drivers/usb/dwc3/debug_ipc.c
index 98f27a5..e00bbfc 100644
--- a/drivers/usb/dwc3/debug_ipc.c
+++ b/drivers/usb/dwc3/debug_ipc.c
@@ -139,9 +139,6 @@ void dwc3_dbg_print_reg(struct dwc3 *dwc, const char *name, int reg)
 
 void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
 {
-	if (ep_num < 2)
-		return;
-
 	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
 		"%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d", ep_num >> 1,
 		ep_num & 1 ? "IN":"OUT", "UNMAP", &req->request,
@@ -151,9 +148,6 @@ void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
 
 void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
 {
-	if (ep_num < 2)
-		return;
-
 	ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
 		"%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx", ep_num >> 1,
 		ep_num & 1 ? "IN":"OUT", "MAP", &req->request, req->request.dma,
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d0644d3..81d667e 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1504,6 +1504,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 
 	switch (op) {
 	case GSI_EP_OP_PREPARE_TRBS:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		request = (struct usb_gsi_request *)op_data;
 		ret = gsi_prepare_trbs(ep, request);
 		break;
@@ -1512,12 +1517,22 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 		gsi_free_trbs(ep, request);
 		break;
 	case GSI_EP_OP_CONFIG:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		request = (struct usb_gsi_request *)op_data;
 		spin_lock_irqsave(&dwc->lock, flags);
 		gsi_configure_ep(ep, request);
 		spin_unlock_irqrestore(&dwc->lock, flags);
 		break;
 	case GSI_EP_OP_STARTXFER:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		spin_lock_irqsave(&dwc->lock, flags);
 		ret = gsi_startxfer_for_ep(ep);
 		spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1530,6 +1545,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 		gsi_store_ringbase_dbl_info(ep, request);
 		break;
 	case GSI_EP_OP_ENABLE_GSI:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		gsi_enable(ep);
 		break;
 	case GSI_EP_OP_GET_CH_INFO:
@@ -1537,6 +1557,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 		gsi_get_channel_info(ep, ch_info);
 		break;
 	case GSI_EP_OP_RING_DB:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		request = (struct usb_gsi_request *)op_data;
 		gsi_ring_db(ep, request);
 		break;
@@ -2547,14 +2572,17 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse)
 
 	/* Perform controller power collapse */
 	if (!(mdwc->in_host_mode || mdwc->in_device_mode) ||
-	      mdwc->in_restart || force_power_collapse ||
-	      (dwc->gdsc_collapse_in_host_suspend && mdwc->in_host_mode)) {
+	      mdwc->in_restart || force_power_collapse) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
 		clk_disable_unprepare(mdwc->sleep_clk);
+	} else if (dwc->gdsc_collapse_in_host_suspend && mdwc->in_host_mode) {
+		dev_dbg(mdwc->dev, "Collapse GDSC in host mode bus suspend\n");
+		dwc3_msm_config_gdsc(mdwc, 0);
 	}
 
+
 	dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NONE);
 
 	/*
@@ -2652,8 +2680,12 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
 			dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
 					__func__);
 		clk_prepare_enable(mdwc->sleep_clk);
+	} else if (dwc->gdsc_collapse_in_host_suspend && mdwc->in_host_mode) {
+		dev_dbg(mdwc->dev, "Turn on GDSC in host mode bus resume\n");
+		dwc3_msm_config_gdsc(mdwc, 1);
 	}
 
+
 	/*
 	 * Enable clocks
 	 * Turned ON iface_clk before core_clk due to FSM depedency.
@@ -3530,8 +3562,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 	 * in avoiding race conditions between xhci_plat_resume and
 	 * xhci_runtime_resume and also between hcd disconnect and xhci_resume.
 	 */
-	mdwc->sm_usb_wq = alloc_ordered_workqueue("k_sm_usb",
-						WQ_FREEZABLE | WQ_MEM_RECLAIM);
+	mdwc->sm_usb_wq = alloc_ordered_workqueue("k_sm_usb", WQ_FREEZABLE);
 	if (!mdwc->sm_usb_wq) {
 		destroy_workqueue(mdwc->dwc3_wq);
 		return -ENOMEM;
@@ -4626,8 +4657,6 @@ static int dwc3_msm_pm_suspend(struct device *dev)
 	dev_dbg(dev, "dwc3-msm PM suspend\n");
 	dbg_event(0xFF, "PM Sus", 0);
 
-	flush_workqueue(mdwc->dwc3_wq);
-
 	/*
 	 * Check if pm_suspend can proceed irrespective of runtimePM state of
 	 * host.
@@ -4671,8 +4700,6 @@ static int dwc3_msm_pm_resume(struct device *dev)
 	dev_dbg(dev, "dwc3-msm PM resume\n");
 	dbg_event(0xFF, "PM Res", 0);
 
-	/* flush to avoid race in read/write of pm_suspended */
-	flush_workqueue(mdwc->dwc3_wq);
 	atomic_set(&mdwc->pm_suspended, 0);
 
 	if (!dwc->host_poweroff_in_pm_suspend || !mdwc->in_host_mode) {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index ac89af3..76b3fb4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1054,6 +1054,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		maxpacket = dep->endpoint.maxpacket;
 		rem = req->request.length % maxpacket;
 		dwc->ep0_bounced = true;
+		dbg_ep_map(dep->number, req);
 
 		/* prepare normal TRB */
 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
@@ -1077,6 +1078,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		if (ret)
 			return;
 
+		dbg_ep_map(dep->number, req);
+
 		/* prepare normal TRB */
 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
 					 req->request.length,
@@ -1096,6 +1099,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		if (ret)
 			return;
 
+		dbg_ep_map(dep->number, req);
 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
 				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
 				false);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b843d11..d80c410 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2873,8 +2873,19 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
 		const struct dwc3_event_depevt *event,
 		struct dwc3_request *req, int status)
 {
+	struct dwc3 *dwc = dep->dwc;
 	int ret;
 
+	/*
+	 * If the HWO is set, it implies the TRB is still being
+	 * processed by the core. Hence do not reclaim it until
+	 * it is processed by the core.
+	 */
+	if (req->trb->ctrl & DWC3_TRB_CTRL_HWO) {
+		dbg_event(0xFF, "PEND TRB", dep->number);
+		return 1;
+	}
+
 	if (req->num_pending_sgs)
 		ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
 				status);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dc73594..e1309b0 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2109,6 +2109,12 @@ void composite_disconnect(struct usb_gadget *gadget)
 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
 	unsigned long			flags;
 
+	if (cdev == NULL) {
+		WARN(1, "%s: Calling disconnect on a Gadget that is \
+			 not connected\n", __func__);
+		return;
+	}
+
 	/* REVISIT:  should we have config and device level
 	 * disconnect callbacks?
 	 */
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 3211960..19da5e29 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -487,10 +487,22 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 	log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
 		d_port->in_request.num_bufs, d_port->in_request.buf_len);
 
-	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
-		GSI_EP_OP_PREPARE_TRBS);
-	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+	ret = usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+			GSI_EP_OP_PREPARE_TRBS);
+	if (ret) {
+		log_event_err("%s: GSI_EP_OP_PREPARE_TRBS failed: %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
 			GSI_EP_OP_STARTXFER);
+	if (ret) {
+		log_event_err("%s: GSI_EP_OP_STARTXFER failed: %d\n",
+				__func__, ret);
+		goto free_trb_ep_in;
+	}
+
 	d_port->in_xfer_rsc_index = usb_gsi_ep_op(d_port->in_ep, NULL,
 			GSI_EP_OP_GET_XFER_IDX);
 
@@ -533,10 +545,22 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 		log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
 			d_port->out_request.num_bufs,
 			d_port->out_request.buf_len);
-		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+		ret = usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
 			GSI_EP_OP_PREPARE_TRBS);
-		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+		if (ret) {
+			log_event_err("%s: GSI_EP_OP_PREPARE_TRBS failed: %d\n",
+					__func__, ret);
+			goto end_xfer_ep_in;
+		}
+
+		ret = usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
 				GSI_EP_OP_STARTXFER);
+		if (ret) {
+			log_event_err("%s: GSI_EP_OP_STARTXFER failed: %d\n",
+					__func__, ret);
+			goto free_trb_ep_out;
+		}
+
 		d_port->out_xfer_rsc_index =
 			usb_gsi_ep_op(d_port->out_ep,
 				NULL, GSI_EP_OP_GET_XFER_IDX);
@@ -612,7 +636,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 					conn_params);
 	if (ret) {
 		log_event_err("%s: IPA connect failed %d", __func__, ret);
-		return ret;
+		goto end_xfer_ep_out;
 	}
 	log_event_dbg("%s: xdci_connect done", __func__);
 
@@ -640,6 +664,23 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 		d_port->out_request.db_reg_phs_addr_msb =
 			ipa_out_channel_out_params.db_reg_phs_addr_msb;
 	}
+
+	return ret;
+
+end_xfer_ep_out:
+	if (d_port->out_ep)
+		usb_gsi_ep_op(d_port->out_ep, NULL,
+			GSI_EP_OP_ENDXFER);
+free_trb_ep_out:
+	if (d_port->out_ep)
+		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+			GSI_EP_OP_FREE_TRBS);
+end_xfer_ep_in:
+	usb_gsi_ep_op(d_port->in_ep, NULL,
+		GSI_EP_OP_ENDXFER);
+free_trb_ep_in:
+	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+		GSI_EP_OP_FREE_TRBS);
 	return ret;
 }
 
@@ -802,8 +843,10 @@ static int gsi_ep_enable(struct f_gsi *gsi)
 			return ret;
 
 		log_event_dbg("%s: Enable IN ep", __func__);
-		usb_gsi_ep_op(gsi->d_port.in_ep,
-			&gsi->d_port.in_request, GSI_EP_OP_CONFIG);
+		ret = usb_gsi_ep_op(gsi->d_port.in_ep,
+				&gsi->d_port.in_request, GSI_EP_OP_CONFIG);
+		if (ret)
+			return ret;
 	}
 
 	if (gsi->d_port.out_ep && !gsi->d_port.out_ep->desc) {
@@ -813,8 +856,13 @@ static int gsi_ep_enable(struct f_gsi *gsi)
 			return ret;
 
 		log_event_dbg("%s: Enable OUT ep", __func__);
-		usb_gsi_ep_op(gsi->d_port.out_ep,
+		ret = usb_gsi_ep_op(gsi->d_port.out_ep,
 				&gsi->d_port.out_request, GSI_EP_OP_CONFIG);
+		if (ret) {
+			usb_gsi_ep_op(gsi->d_port.in_ep,
+				&gsi->d_port.in_request, GSI_EP_OP_DISABLE);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -887,7 +935,16 @@ static void ipa_work_handler(struct work_struct *w)
 				break;
 			}
 
-			ipa_connect_channels(d_port);
+			ret = ipa_connect_channels(d_port);
+			if (ret) {
+				log_event_err("%s: ipa_connect_channels failed\n",
+								__func__);
+				ipa_data_path_disable(d_port);
+				usb_gadget_autopm_put_async(d_port->gadget);
+				d_port->sm_state = STATE_INITIALIZED;
+				break;
+			}
+
 			d_port->sm_state = STATE_WAIT_FOR_IPA_RDY;
 			log_event_dbg("%s: ST_INIT_EVT_SET_ALT",
 					__func__);
@@ -1026,7 +1083,14 @@ static void ipa_work_handler(struct work_struct *w)
 			log_event_dbg("%s: get = %d", __func__,
 				atomic_read(&gad_dev->power.usage_count));
 
-			ipa_connect_channels(d_port);
+			ret = ipa_connect_channels(d_port);
+			if (ret) {
+				log_event_err("%s: ipa_connect_channels failed\n",
+								__func__);
+				usb_gadget_autopm_put_async(d_port->gadget);
+				break;
+			}
+
 			ipa_data_path_enable(d_port);
 			d_port->sm_state = STATE_CONNECTED;
 			log_event_dbg("%s: ST_HOST_NRDY_EVT_HRDY_", __func__);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 9b86d55..b5f6190 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1235,7 +1235,7 @@ static ssize_t alsa_show(struct device *dev,
 
 	if (fi_midi && fi_midi->f) {
 		midi = func_to_midi(fi_midi->f);
-		if (midi->rmidi && midi->rmidi->card)
+		if (midi->rmidi && midi->card && midi->rmidi->card)
 			return sprintf(buf, "%d %d\n",
 			midi->rmidi->card->number, midi->rmidi->device);
 	}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index ad6d4eb..0b3f905 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -150,6 +150,7 @@ struct mtp_dev {
 	} perf[MAX_ITERATION];
 	unsigned int dbg_read_index;
 	unsigned int dbg_write_index;
+	struct mutex  read_mutex;
 };
 
 static void *_mtp_ipc_log;
@@ -609,12 +610,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	mtp_log("(%zu) state:%d\n", count, dev->state);
 
 	/* we will block until we're online */
-	mtp_log("waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
 		dev->state != STATE_OFFLINE);
 	if (ret < 0) {
 		r = ret;
-		goto done;
+		goto wait_err;
 	}
 
 	len = ALIGN(count, dev->ep_out->maxpacket);
@@ -649,6 +649,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	dev->state = STATE_BUSY;
 	spin_unlock_irq(&dev->lock);
 
+	mutex_lock(&dev->read_mutex);
+	if (dev->state == STATE_OFFLINE) {
+		r = -EIO;
+		goto done;
+	}
 requeue_req:
 	/* queue a request */
 	req = dev->rx_req[0];
@@ -693,6 +698,8 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		r = -EIO;
 
 done:
+	mutex_unlock(&dev->read_mutex);
+wait_err:
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED)
 		r = -ECANCELED;
@@ -936,7 +943,7 @@ static void receive_file_work(struct work_struct *data)
 	if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
 		mtp_log("- count(%lld) not multiple of mtu(%d)\n",
 						count, dev->ep_out->maxpacket);
-
+	mutex_lock(&dev->read_mutex);
 	while (count > 0 || write_req) {
 		if (count > 0) {
 			/* queue a request */
@@ -1019,7 +1026,7 @@ static void receive_file_work(struct work_struct *data)
 			read_req = NULL;
 		}
 	}
-
+	mutex_unlock(&dev->read_mutex);
 	mtp_log("returning %d\n", r);
 	/* write the result */
 	dev->xfer_result = r;
@@ -1116,14 +1123,17 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
 	 * in kernel context, which is necessary for vfs_read and
 	 * vfs_write to use our buffers in the kernel address space.
 	 */
-	queue_work(dev->wq, work);
-	/* wait for operation to complete */
-	flush_workqueue(dev->wq);
-	fput(filp);
+	dev->xfer_result = 0;
+	if (dev->xfer_file_length) {
+		queue_work(dev->wq, work);
+		/* wait for operation to complete */
+		flush_workqueue(dev->wq);
 
-	/* read the result */
-	smp_rmb();
+		/* read the result */
+		smp_rmb();
+	}
 	ret = dev->xfer_result;
+	fput(filp);
 
 fail:
 	spin_lock_irq(&dev->lock);
@@ -1469,6 +1479,8 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
 	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
 	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
 	mtp_log("dev: %pK\n", dev);
+
+	mutex_lock(&dev->read_mutex);
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
 		mtp_request_free(req, dev->ep_in);
 	for (i = 0; i < RX_REQ_MAX; i++)
@@ -1479,6 +1491,8 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
 	dev->state = STATE_OFFLINE;
 	dev->cdev = NULL;
 	spin_unlock_irq(&dev->lock);
+	mutex_unlock(&dev->read_mutex);
+
 	kfree(f->os_desc_table);
 	f->os_desc_n = 0;
 	fi_mtp->func_inst.f = NULL;
@@ -1819,6 +1833,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
 	usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
 					descs, names, THIS_MODULE);
 
+	mutex_init(&fi_mtp->dev->read_mutex);
+
 	return  &fi_mtp->func_inst;
 }
 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index c137669..f0ae181 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -238,6 +238,8 @@ static void qdss_write_complete(struct usb_ep *ep,
 	}
 
 	spin_lock_irqsave(&qdss->lock, flags);
+	if (!qdss->debug_inface_enabled)
+		list_del(&req->list);
 	list_add_tail(&req->list, list_pool);
 	if (req->length != 0) {
 		d_req->actual = req->actual;
@@ -792,6 +794,7 @@ static struct f_qdss *alloc_usb_qdss(char *channel_name)
 	INIT_LIST_HEAD(&qdss->ctrl_read_pool);
 	INIT_LIST_HEAD(&qdss->ctrl_write_pool);
 	INIT_LIST_HEAD(&qdss->data_write_pool);
+	INIT_LIST_HEAD(&qdss->queued_data_pool);
 	INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
 	INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
 
@@ -893,14 +896,14 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
 	unsigned long flags;
 	struct usb_request *req = NULL;
 
-	pr_debug("usb_qdss_ctrl_write\n");
+	pr_debug("usb_qdss_data_write\n");
 
 	if (!qdss)
 		return -ENODEV;
 
 	spin_lock_irqsave(&qdss->lock, flags);
 
-	if (qdss->usb_connected == 0) {
+	if (qdss->qdss_close || qdss->usb_connected == 0) {
 		spin_unlock_irqrestore(&qdss->lock, flags);
 		return -EIO;
 	}
@@ -913,7 +916,7 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
 
 	req = list_first_entry(&qdss->data_write_pool, struct usb_request,
 		list);
-	list_del(&req->list);
+	list_move_tail(&req->list, &qdss->queued_data_pool);
 	spin_unlock_irqrestore(&qdss->lock, flags);
 
 	req->buf = d_req->buf;
@@ -924,7 +927,8 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
 	req->num_mapped_sgs = d_req->num_mapped_sgs;
 	if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
 		spin_lock_irqsave(&qdss->lock, flags);
-		list_add_tail(&req->list, &qdss->data_write_pool);
+		/* Remove from queued pool and add back to data pool */
+		list_move_tail(&req->list, &qdss->data_write_pool);
 		spin_unlock_irqrestore(&qdss->lock, flags);
 		pr_err("qdss usb_ep_queue failed\n");
 		return -EIO;
@@ -971,6 +975,7 @@ struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
 	ch->priv = priv;
 	ch->notify = notify;
 	ch->app_conn = 1;
+	qdss->qdss_close = false;
 	spin_unlock_irqrestore(&qdss_lock, flags);
 
 	/* the case USB cabel was connected before qdss called qdss_open */
@@ -987,10 +992,27 @@ void usb_qdss_close(struct usb_qdss_ch *ch)
 	struct usb_gadget *gadget;
 	unsigned long flags;
 	int status;
+	struct usb_request *req;
 
 	pr_debug("%s\n", __func__);
 
 	spin_lock_irqsave(&qdss_lock, flags);
+	if (!qdss)
+		goto close;
+	qdss->qdss_close = true;
+	while (!list_empty(&qdss->queued_data_pool)) {
+		req = list_first_entry(&qdss->queued_data_pool,
+				struct usb_request, list);
+		spin_unlock_irqrestore(&qdss_lock, flags);
+		if (usb_ep_dequeue(qdss->port.data, req)) {
+			spin_lock_irqsave(&qdss_lock, flags);
+			list_move_tail(&req->list, &qdss->data_write_pool);
+			spin_unlock_irqrestore(&qdss_lock, flags);
+		}
+		spin_lock_irqsave(&qdss_lock, flags);
+	}
+	usb_qdss_free_req(ch);
+close:
 	ch->priv_usb = NULL;
 	if (!qdss || !qdss->usb_connected ||
 			!strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index 477b23b..5a68f23 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _F_QDSS_H
@@ -54,6 +54,7 @@ struct f_qdss {
 
 	/* for mdm channel SW path */
 	struct list_head data_write_pool;
+	struct list_head queued_data_pool;
 
 	struct work_struct connect_w;
 	struct work_struct disconnect_w;
@@ -62,6 +63,7 @@ struct f_qdss {
 	unsigned int ctrl_in_enabled:1;
 	unsigned int ctrl_out_enabled:1;
 	struct workqueue_struct *wq;
+	bool qdss_close;
 };
 
 struct usb_qdss_opts {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9f7e540..8b73bfe 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -136,7 +136,7 @@ int xhci_halt(struct xhci_hcd *xhci)
 	xhci_quiesce(xhci);
 
 	ret = xhci_handshake(&xhci->op_regs->status,
-			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+			STS_HALT, STS_HALT, 2 * XHCI_MAX_HALT_USEC);
 	if (ret) {
 		xhci_warn(xhci, "Host halt failed, %d\n", ret);
 		return ret;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index df24403..4ffcee0 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1176,7 +1176,10 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
 	list_add_tail(&rx_msg->entry, &pd->rx_q);
 	spin_unlock_irqrestore(&pd->rx_lock, flags);
 
-	kick_sm(pd, 0);
+	if (!work_busy(&pd->sm_work))
+		kick_sm(pd, 0);
+	else
+		usbpd_dbg(&pd->dev, "usbpd_sm already running\n");
 }
 
 static void phy_shutdown(struct usbpd *pd)
@@ -3587,7 +3590,7 @@ static void usbpd_sm(struct work_struct *w)
 	spin_unlock_irqrestore(&pd->rx_lock, flags);
 
 	/* requeue if there are any new/pending RX messages */
-	if (!ret)
+	if (!ret && !pd->sm_queued)
 		kick_sm(pd, 0);
 
 	if (!pd->sm_queued)
@@ -3756,7 +3759,12 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
 		usbpd_dbg(&pd->dev, "hard reset: typec mode:%d present:%d\n",
 			typec_mode, pd->vbus_present);
 		pd->typec_mode = typec_mode;
-		kick_sm(pd, 0);
+
+		if (!work_busy(&pd->sm_work))
+			kick_sm(pd, 0);
+		else
+			usbpd_dbg(&pd->dev, "usbpd_sm already running\n");
+
 		return 0;
 	}
 
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 09b0c88..2792ae6 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -346,6 +346,9 @@ static int bd_cdev_set_cur_brightness(struct thermal_cooling_device *cdev,
 	struct backlight_device *bd = (struct backlight_device *)cdev->devdata;
 	int brightness_lvl;
 
+	if (state > bd->props.max_brightness)
+		return -EINVAL;
+
 	brightness_lvl = bd->props.max_brightness - state;
 	if (brightness_lvl == bd->thermal_brightness_limit)
 		return 0;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 9b561f1..2cbb9be 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -50,7 +50,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 	struct page *page = vmf->page;
 	struct inode *inode = file_inode(vmf->vma->vm_file);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct dnode_of_data dn = { .node_changed = false };
+	struct dnode_of_data dn;
 	int err;
 
 	if (unlikely(f2fs_cp_error(sbi))) {
@@ -58,6 +58,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 		goto err;
 	}
 
+	/* should do out of any locked page */
+	f2fs_balance_fs(sbi, true);
+
 	sb_start_pagefault(inode->i_sb);
 
 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -115,8 +118,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 out_sem:
 	up_read(&F2FS_I(inode)->i_mmap_sem);
 
-	f2fs_balance_fs(sbi, dn.node_changed);
-
 	sb_end_pagefault(inode->i_sb);
 err:
 	return block_page_mkwrite_return(err);
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
index 7932623..27b285b 100644
--- a/include/crypto/ice.h
+++ b/include/crypto/ice.h
@@ -8,6 +8,8 @@
 
 #include <linux/platform_device.h>
 #include <linux/cdev.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
 
 struct request;
 
@@ -67,7 +69,10 @@ struct ice_device {
 	struct qcom_ice_bus_vote bus_vote;
 	ktime_t			ice_reset_start_time;
 	ktime_t			ice_reset_complete_time;
-	void             *key_table;
+	void                    *key_table;
+	atomic_t		is_ice_suspended;
+	atomic_t		is_ice_busy;
+	wait_queue_head_t       block_suspend_ice_queue;
 };
 
 struct ice_crypto_setting {
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index f00a53b..d980d32 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -40,6 +40,8 @@ enum {
 	DRM_PANEL_BLANK_POWERDOWN,
 	/* panel: low power mode */
 	DRM_PANEL_BLANK_LP,
+	/* fps change */
+	DRM_PANEL_BLANK_FPS_CHANGE,
 };
 
 struct drm_panel_notifier {
diff --git a/include/dt-bindings/clock/qcom,dispcc-scuba.h b/include/dt-bindings/clock/qcom,dispcc-scuba.h
new file mode 100644
index 0000000..823d70e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-scuba.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SCUBA_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SCUBA_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0				0
+#define DISP_CC_MDSS_AHB_CLK			1
+#define DISP_CC_MDSS_AHB_CLK_SRC		2
+#define DISP_CC_MDSS_BYTE0_CLK			3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC		4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC		5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK		6
+#define DISP_CC_MDSS_ESC0_CLK			7
+#define DISP_CC_MDSS_ESC0_CLK_SRC		8
+#define DISP_CC_MDSS_MDP_CLK			9
+#define DISP_CC_MDSS_MDP_CLK_SRC		10
+#define DISP_CC_MDSS_MDP_LUT_CLK		11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK		12
+#define DISP_CC_MDSS_PCLK0_CLK			13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC		14
+#define DISP_CC_MDSS_VSYNC_CLK			15
+#define DISP_CC_MDSS_VSYNC_CLK_SRC		16
+#define DISP_CC_SLEEP_CLK			17
+#define DISP_CC_SLEEP_CLK_SRC			18
+#define DISP_CC_XO_CLK				19
+#define DISP_CC_XO_CLK_SRC			20
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-bengal.h b/include/dt-bindings/clock/qcom,gcc-bengal.h
index 4da52fc..6963e67 100644
--- a/include/dt-bindings/clock/qcom,gcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gcc-bengal.h
@@ -66,115 +66,112 @@
 #define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				56
 #define GCC_CPUSS_AHB_CLK					57
 #define GCC_CPUSS_GNOC_CLK					60
-#define GCC_CPUSS_THROTTLE_CORE_CLK				61
-#define GCC_CPUSS_THROTTLE_XO_CLK				62
-#define GCC_DISP_AHB_CLK					63
-#define GCC_DISP_GPLL0_DIV_CLK_SRC				64
-#define GCC_DISP_HF_AXI_CLK					65
-#define GCC_DISP_THROTTLE_CORE_CLK				66
-#define GCC_DISP_XO_CLK						67
-#define GCC_GP1_CLK						68
-#define GCC_GP1_CLK_SRC						69
-#define GCC_GP2_CLK						70
-#define GCC_GP2_CLK_SRC						71
-#define GCC_GP3_CLK						72
-#define GCC_GP3_CLK_SRC						73
-#define GCC_GPU_CFG_AHB_CLK					74
-#define GCC_GPU_GPLL0_CLK_SRC					75
-#define GCC_GPU_GPLL0_DIV_CLK_SRC				76
-#define GCC_GPU_IREF_CLK					77
-#define GCC_GPU_MEMNOC_GFX_CLK					78
-#define GCC_GPU_SNOC_DVM_GFX_CLK				79
-#define GCC_GPU_THROTTLE_CORE_CLK				80
-#define GCC_GPU_THROTTLE_XO_CLK					81
-#define GCC_PDM2_CLK						82
-#define GCC_PDM2_CLK_SRC					83
-#define GCC_PDM_AHB_CLK						84
-#define GCC_PDM_XO4_CLK						85
-#define GCC_PRNG_AHB_CLK					86
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK				87
-#define GCC_QMIP_CAMERA_RT_AHB_CLK				88
-#define GCC_QMIP_CPUSS_CFG_AHB_CLK				89
-#define GCC_QMIP_DISP_AHB_CLK					90
-#define GCC_QMIP_GPU_CFG_AHB_CLK				91
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				92
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				93
-#define GCC_QUPV3_WRAP0_CORE_CLK				94
-#define GCC_QUPV3_WRAP0_S0_CLK					95
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				96
-#define GCC_QUPV3_WRAP0_S1_CLK					97
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				98
-#define GCC_QUPV3_WRAP0_S2_CLK					99
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				100
-#define GCC_QUPV3_WRAP0_S3_CLK					101
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				102
-#define GCC_QUPV3_WRAP0_S4_CLK					103
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				104
-#define GCC_QUPV3_WRAP0_S5_CLK					105
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				106
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				107
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				108
-#define GCC_SDCC1_AHB_CLK					109
-#define GCC_SDCC1_APPS_CLK					110
-#define GCC_SDCC1_APPS_CLK_SRC					111
-#define GCC_SDCC1_ICE_CORE_CLK					112
-#define GCC_SDCC1_ICE_CORE_CLK_SRC				113
-#define GCC_SDCC2_AHB_CLK					114
-#define GCC_SDCC2_APPS_CLK					115
-#define GCC_SDCC2_APPS_CLK_SRC					116
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				117
-#define GCC_SYS_NOC_UFS_PHY_AXI_CLK				118
-#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK				119
-#define GCC_UFS_PHY_AHB_CLK					120
-#define GCC_UFS_PHY_AXI_CLK					121
-#define GCC_UFS_PHY_AXI_CLK_SRC					122
-#define GCC_UFS_PHY_ICE_CORE_CLK				123
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				124
-#define GCC_UFS_PHY_PHY_AUX_CLK					125
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				126
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				127
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				128
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				129
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				130
-#define GCC_USB30_PRIM_MASTER_CLK				131
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				132
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				133
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			134
-#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		135
-#define GCC_USB30_PRIM_SLEEP_CLK				136
-#define GCC_USB3_PRIM_CLKREF_CLK				137
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				138
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				139
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				140
-#define GCC_VCODEC0_AXI_CLK					141
-#define GCC_VENUS_AHB_CLK					142
-#define GCC_VENUS_CTL_AXI_CLK					143
-#define GCC_VIDEO_AHB_CLK					144
-#define GCC_VIDEO_AXI0_CLK					145
-#define GCC_VIDEO_THROTTLE_CORE_CLK				146
-#define GCC_VIDEO_VCODEC0_SYS_CLK				147
-#define GCC_VIDEO_VENUS_CLK_SRC					148
-#define GCC_VIDEO_VENUS_CTL_CLK					149
-#define GCC_VIDEO_XO_CLK					150
-#define GCC_AHB2PHY_CSI_CLK					151
-#define GCC_AHB2PHY_USB_CLK					152
-#define GCC_BIMC_GPU_AXI_CLK					153
-#define GCC_BOOT_ROM_AHB_CLK					154
-#define GCC_CAM_THROTTLE_NRT_CLK				155
-#define GCC_CAM_THROTTLE_RT_CLK					156
-#define GCC_CAMERA_AHB_CLK					157
-#define GCC_CAMERA_XO_CLK					158
-#define GCC_CAMSS_AXI_CLK					159
-#define GCC_CAMSS_AXI_CLK_SRC					160
-#define GCC_CAMSS_CAMNOC_ATB_CLK				161
-#define GCC_CAMSS_CAMNOC_NTS_XO_CLK				162
-#define GCC_CAMSS_CCI_0_CLK					163
-#define GCC_CAMSS_CCI_CLK_SRC					164
-#define GCC_CAMSS_CPHY_0_CLK					165
-#define GCC_CAMSS_CPHY_1_CLK					166
-#define GCC_CAMSS_CPHY_2_CLK					167
-#define GCC_UFS_CLKREF_CLK					168
-#define GCC_DISP_GPLL0_CLK_SRC					169
+#define GCC_DISP_AHB_CLK					61
+#define GCC_DISP_GPLL0_DIV_CLK_SRC				62
+#define GCC_DISP_HF_AXI_CLK					63
+#define GCC_DISP_THROTTLE_CORE_CLK				64
+#define GCC_DISP_XO_CLK						65
+#define GCC_GP1_CLK						66
+#define GCC_GP1_CLK_SRC						67
+#define GCC_GP2_CLK						68
+#define GCC_GP2_CLK_SRC						69
+#define GCC_GP3_CLK						70
+#define GCC_GP3_CLK_SRC						71
+#define GCC_GPU_CFG_AHB_CLK					72
+#define GCC_GPU_GPLL0_CLK_SRC					73
+#define GCC_GPU_GPLL0_DIV_CLK_SRC				74
+#define GCC_GPU_IREF_CLK					75
+#define GCC_GPU_MEMNOC_GFX_CLK					76
+#define GCC_GPU_SNOC_DVM_GFX_CLK				77
+#define GCC_GPU_THROTTLE_CORE_CLK				78
+#define GCC_GPU_THROTTLE_XO_CLK					79
+#define GCC_PDM2_CLK						80
+#define GCC_PDM2_CLK_SRC					81
+#define GCC_PDM_AHB_CLK						82
+#define GCC_PDM_XO4_CLK						83
+#define GCC_PRNG_AHB_CLK					84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK				85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK				86
+#define GCC_QMIP_DISP_AHB_CLK					87
+#define GCC_QMIP_GPU_CFG_AHB_CLK				88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				89
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				90
+#define GCC_QUPV3_WRAP0_CORE_CLK				91
+#define GCC_QUPV3_WRAP0_S0_CLK					92
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP0_S1_CLK					94
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP0_S2_CLK					96
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP0_S3_CLK					98
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP0_S4_CLK					100
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP0_S5_CLK					102
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				105
+#define GCC_SDCC1_AHB_CLK					106
+#define GCC_SDCC1_APPS_CLK					107
+#define GCC_SDCC1_APPS_CLK_SRC					108
+#define GCC_SDCC1_ICE_CORE_CLK					109
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				110
+#define GCC_SDCC2_AHB_CLK					111
+#define GCC_SDCC2_APPS_CLK					112
+#define GCC_SDCC2_APPS_CLK_SRC					113
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				114
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK				115
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK				116
+#define GCC_UFS_PHY_AHB_CLK					117
+#define GCC_UFS_PHY_AXI_CLK					118
+#define GCC_UFS_PHY_AXI_CLK_SRC					119
+#define GCC_UFS_PHY_ICE_CORE_CLK				120
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				121
+#define GCC_UFS_PHY_PHY_AUX_CLK					122
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				127
+#define GCC_USB30_PRIM_MASTER_CLK				128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		132
+#define GCC_USB30_PRIM_SLEEP_CLK				133
+#define GCC_USB3_PRIM_CLKREF_CLK				134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				137
+#define GCC_VCODEC0_AXI_CLK					138
+#define GCC_VENUS_AHB_CLK					139
+#define GCC_VENUS_CTL_AXI_CLK					140
+#define GCC_VIDEO_AHB_CLK					141
+#define GCC_VIDEO_AXI0_CLK					142
+#define GCC_VIDEO_THROTTLE_CORE_CLK				143
+#define GCC_VIDEO_VCODEC0_SYS_CLK				144
+#define GCC_VIDEO_VENUS_CLK_SRC					145
+#define GCC_VIDEO_VENUS_CTL_CLK					146
+#define GCC_VIDEO_XO_CLK					147
+#define GCC_AHB2PHY_CSI_CLK					148
+#define GCC_AHB2PHY_USB_CLK					149
+#define GCC_BIMC_GPU_AXI_CLK					150
+#define GCC_BOOT_ROM_AHB_CLK					151
+#define GCC_CAM_THROTTLE_NRT_CLK				152
+#define GCC_CAM_THROTTLE_RT_CLK					153
+#define GCC_CAMERA_AHB_CLK					154
+#define GCC_CAMERA_XO_CLK					155
+#define GCC_CAMSS_AXI_CLK					156
+#define GCC_CAMSS_AXI_CLK_SRC					157
+#define GCC_CAMSS_CAMNOC_ATB_CLK				158
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK				159
+#define GCC_CAMSS_CCI_0_CLK					160
+#define GCC_CAMSS_CCI_CLK_SRC					161
+#define GCC_CAMSS_CPHY_0_CLK					162
+#define GCC_CAMSS_CPHY_1_CLK					163
+#define GCC_CAMSS_CPHY_2_CLK					164
+#define GCC_UFS_CLKREF_CLK					165
+#define GCC_DISP_GPLL0_CLK_SRC					166
 
 /* GCC resets */
 #define GCC_QUSB2PHY_PRIM_BCR					0
diff --git a/include/dt-bindings/clock/qcom,gcc-scuba.h b/include/dt-bindings/clock/qcom,gcc-scuba.h
new file mode 100644
index 0000000..3394a2a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-scuba.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SCUBA_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SCUBA_H
+
+/* GCC clocks */
+#define GPLL0						0
+#define GPLL0_OUT_AUX2					1
+#define GPLL1						2
+#define GPLL10						3
+#define GPLL11						4
+#define GPLL3						5
+#define GPLL3_OUT_MAIN					6
+#define GPLL4						7
+#define GPLL5						8
+#define GPLL6						9
+#define GPLL6_OUT_MAIN					10
+#define GPLL7						11
+#define GPLL8						12
+#define GPLL8_OUT_MAIN					13
+#define GPLL9						14
+#define GPLL9_OUT_MAIN					15
+#define GCC_AHB2PHY_CSI_CLK				16
+#define GCC_AHB2PHY_USB_CLK				17
+#define GCC_APC_VS_CLK					18
+#define GCC_BIMC_GPU_AXI_CLK				19
+#define GCC_BOOT_ROM_AHB_CLK				20
+#define GCC_CAM_THROTTLE_NRT_CLK			21
+#define GCC_CAM_THROTTLE_RT_CLK				22
+#define GCC_CAMERA_AHB_CLK				23
+#define GCC_CAMERA_XO_CLK				24
+#define GCC_CAMSS_AXI_CLK				25
+#define GCC_CAMSS_AXI_CLK_SRC				26
+#define GCC_CAMSS_CAMNOC_ATB_CLK			27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK			28
+#define GCC_CAMSS_CCI_0_CLK				29
+#define GCC_CAMSS_CCI_CLK_SRC				30
+#define GCC_CAMSS_CPHY_0_CLK				31
+#define GCC_CAMSS_CPHY_1_CLK				32
+#define GCC_CAMSS_CSI0PHYTIMER_CLK			33
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC			34
+#define GCC_CAMSS_CSI1PHYTIMER_CLK			35
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC			36
+#define GCC_CAMSS_MCLK0_CLK				37
+#define GCC_CAMSS_MCLK0_CLK_SRC				38
+#define GCC_CAMSS_MCLK1_CLK				39
+#define GCC_CAMSS_MCLK1_CLK_SRC				40
+#define GCC_CAMSS_MCLK2_CLK				41
+#define GCC_CAMSS_MCLK2_CLK_SRC				42
+#define GCC_CAMSS_MCLK3_CLK				43
+#define GCC_CAMSS_MCLK3_CLK_SRC				44
+#define GCC_CAMSS_NRT_AXI_CLK				45
+#define GCC_CAMSS_OPE_AHB_CLK				46
+#define GCC_CAMSS_OPE_AHB_CLK_SRC			47
+#define GCC_CAMSS_OPE_CLK				48
+#define GCC_CAMSS_OPE_CLK_SRC				49
+#define GCC_CAMSS_RT_AXI_CLK				50
+#define GCC_CAMSS_TFE_0_CLK				51
+#define GCC_CAMSS_TFE_0_CLK_SRC				52
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK			53
+#define GCC_CAMSS_TFE_0_CSID_CLK			54
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC			55
+#define GCC_CAMSS_TFE_1_CLK				56
+#define GCC_CAMSS_TFE_1_CLK_SRC				57
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK			58
+#define GCC_CAMSS_TFE_1_CSID_CLK			59
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC			60
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC			61
+#define GCC_CAMSS_TOP_AHB_CLK				62
+#define GCC_CAMSS_TOP_AHB_CLK_SRC			63
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK			64
+#define GCC_CPUSS_AHB_CLK				65
+#define GCC_CPUSS_AHB_CLK_SRC				66
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC			67
+#define GCC_CPUSS_GNOC_CLK				68
+#define GCC_CPUSS_THROTTLE_CORE_CLK			69
+#define GCC_CPUSS_THROTTLE_XO_CLK			70
+#define GCC_DISP_AHB_CLK				71
+#define GCC_DISP_GPLL0_DIV_CLK_SRC			72
+#define GCC_DISP_HF_AXI_CLK				73
+#define GCC_DISP_THROTTLE_CORE_CLK			74
+#define GCC_DISP_XO_CLK					75
+#define GCC_GP1_CLK					76
+#define GCC_GP1_CLK_SRC					77
+#define GCC_GP2_CLK					78
+#define GCC_GP2_CLK_SRC					79
+#define GCC_GP3_CLK					80
+#define GCC_GP3_CLK_SRC					81
+#define GCC_GPU_BIMC_AXI_CLK_SRC			82
+#define GCC_GPU_CFG_AHB_CLK				83
+#define GCC_GPU_GPLL0_CLK_SRC				84
+#define GCC_GPU_GPLL0_DIV_CLK_SRC			85
+#define GCC_GPU_IREF_CLK				86
+#define GCC_GPU_MEMNOC_GFX_CLK				87
+#define GCC_GPU_SNOC_DVM_GFX_CLK			88
+#define GCC_GPU_THROTTLE_CORE_CLK			89
+#define GCC_GPU_THROTTLE_XO_CLK				90
+#define GCC_MSS_VS_CLK					91
+#define GCC_PDM2_CLK					92
+#define GCC_PDM2_CLK_SRC				93
+#define GCC_PDM_AHB_CLK					94
+#define GCC_PDM_XO4_CLK					95
+#define GCC_PWM0_XO512_CLK				96
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK			97
+#define GCC_QMIP_CAMERA_RT_AHB_CLK			98
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK			99
+#define GCC_QMIP_DISP_AHB_CLK				100
+#define GCC_QMIP_GPU_CFG_AHB_CLK			101
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK			102
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK			103
+#define GCC_QUPV3_WRAP0_CORE_CLK			104
+#define GCC_QUPV3_WRAP0_S0_CLK				105
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC			106
+#define GCC_QUPV3_WRAP0_S1_CLK				107
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC			108
+#define GCC_QUPV3_WRAP0_S2_CLK				109
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC			110
+#define GCC_QUPV3_WRAP0_S3_CLK				111
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC			112
+#define GCC_QUPV3_WRAP0_S4_CLK				113
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC			114
+#define GCC_QUPV3_WRAP0_S5_CLK				115
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC			116
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK			117
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK			118
+#define GCC_SDCC1_AHB_CLK				119
+#define GCC_SDCC1_APPS_CLK				120
+#define GCC_SDCC1_APPS_CLK_SRC				121
+#define GCC_SDCC1_ICE_CORE_CLK				122
+#define GCC_SDCC1_ICE_CORE_CLK_SRC			123
+#define GCC_SDCC2_AHB_CLK				124
+#define GCC_SDCC2_APPS_CLK				125
+#define GCC_SDCC2_APPS_CLK_SRC				126
+#define GCC_SYS_NOC_CPUSS_AHB_CLK			127
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK			128
+#define GCC_USB30_PRIM_MASTER_CLK			129
+#define GCC_USB30_PRIM_MASTER_CLK_SRC			130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK			131
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC		132
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV		133
+#define GCC_USB30_PRIM_SLEEP_CLK			134
+#define GCC_USB3_PRIM_CLKREF_CLK			135
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC			136
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK			137
+#define GCC_USB3_PRIM_PHY_PIPE_CLK			138
+#define GCC_VCODEC0_AXI_CLK				139
+#define GCC_VDDA_VS_CLK					140
+#define GCC_VDDCX_VS_CLK				141
+#define GCC_VDDMX_VS_CLK				142
+#define GCC_VENUS_AHB_CLK				143
+#define GCC_VENUS_CTL_AXI_CLK				144
+#define GCC_VIDEO_AHB_CLK				145
+#define GCC_VIDEO_AXI0_CLK				146
+#define GCC_VIDEO_THROTTLE_CORE_CLK			147
+#define GCC_VIDEO_VCODEC0_SYS_CLK			148
+#define GCC_VIDEO_VENUS_CLK_SRC				149
+#define GCC_VIDEO_VENUS_CTL_CLK				150
+#define GCC_VIDEO_XO_CLK				151
+#define GCC_VS_CTRL_AHB_CLK				152
+#define GCC_VS_CTRL_CLK					153
+#define GCC_VS_CTRL_CLK_SRC				154
+#define GCC_VSENSOR_CLK_SRC				155
+#define GCC_WCSS_VS_CLK					156
+
+/* GCC resets */
+#define GCC_CAMSS_OPE_BCR				0
+#define GCC_CAMSS_TFE_BCR				1
+#define GCC_CAMSS_TOP_BCR				2
+#define GCC_GPU_BCR					3
+#define GCC_MMSS_BCR					4
+#define GCC_PDM_BCR					5
+#define GCC_QUPV3_WRAPPER_0_BCR				6
+#define GCC_QUPV3_WRAPPER_1_BCR				7
+#define GCC_QUSB2PHY_PRIM_BCR				8
+#define GCC_QUSB2PHY_SEC_BCR				9
+#define GCC_SDCC1_BCR					10
+#define GCC_SDCC2_BCR					11
+#define GCC_USB30_PRIM_BCR				12
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR			13
+#define GCC_VCODEC0_BCR					14
+#define GCC_VENUS_BCR					15
+#define GCC_VIDEO_INTERFACE_BCR				16
+#define GCC_VS_BCR					17
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-scuba.h b/include/dt-bindings/clock/qcom,gpucc-scuba.h
new file mode 100644
index 0000000..cb5211c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-scuba.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SCUBA_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SCUBA_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0				0
+#define GPU_CC_AHB_CLK				1
+#define GPU_CC_CRC_AHB_CLK			2
+#define GPU_CC_CX_APB_CLK			3
+#define GPU_CC_CX_GFX3D_CLK			4
+#define GPU_CC_CX_GFX3D_SLV_CLK			5
+#define GPU_CC_CX_GMU_CLK			6
+#define GPU_CC_CX_SNOC_DVM_CLK			7
+#define GPU_CC_CXO_AON_CLK			8
+#define GPU_CC_CXO_CLK				9
+#define GPU_CC_GMU_CLK_SRC			10
+#define GPU_CC_GX_CXO_CLK			11
+#define GPU_CC_GX_GFX3D_CLK			12
+#define GPU_CC_SLEEP_CLK			13
+
+#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 7bfba5b..835fb0c 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -44,6 +44,7 @@
 #define	MSM_BUS_FAB_GPU_VIRT 6159
 #define	MSM_BUS_FAB_MMNRT_VIRT 6160
 #define	MSM_BUS_FAB_MMRT_VIRT 6161
+#define	MSM_BUS_FAB_CLK_VIRT 6162
 
 #define	MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
 #define	MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -292,6 +293,8 @@
 #define	MSM_BUS_MASTER_SNOC_BIMC_NRT 179
 #define	MSM_BUS_MASTER_GPU_CDSP_PROC 180
 #define	MSM_BUS_MASTER_ANOC_SNOC 181
+#define	MSM_BUS_MASTER_CAMNOC_ICP_UNCOMP 182
+#define	MSM_BUS_MASTER_NPU_PROC 183
 
 #define	MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define	MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -695,6 +698,7 @@
 #define	MSM_BUS_SLAVE_SNOC_BIMC_RT 833
 #define	MSM_BUS_SLAVE_ANOC_SNOC 834
 #define	MSM_BUS_SLAVE_GPU_CDSP_BIMC 835
+#define	MSM_BUS_SLAVE_AHB2PHY_2 836
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
@@ -1152,23 +1156,23 @@
 #define	ICBID_SLAVE_PCNOC_S_10 245
 #define	ICBID_SLAVE_PCNOC_S_11 246
 #define	ICBID_SLAVE_LPASS_ANOC_BIMC 247
-#define	ICBID_SLAVE_SNOC_BIMC_NRT 248
-#define	ICBID_SLAVE_SNOC_BIMC_RT 249
-#define	ICBID_SLAVE_QUP_0 250
-#define	ICBID_SLAVE_UFS_MEM_CFG 251
-#define	ICBID_SLAVE_VSENSE_CTRL_CFG 252
-#define	ICBID_SLAVE_QUP_CORE_0 253
-#define	ICBID_SLAVE_QUP_CORE_1 254
-#define	ICBID_SLAVE_GPU_CDSP_BIMC 255
-#define	ICBID_SLAVE_AHB2PHY_USB 256
-#define	ICBID_SLAVE_APSS_THROTTLE_CFG 257
-#define	ICBID_SLAVE_CAMERA_NRT_THROTTLE_CFG 258
-#define	ICBID_SLAVE_CDSP_THROTTLE_CFG 259
-#define	ICBID_SLAVE_DDR_PHY_CFG 260
-#define	ICBID_SLAVE_DDR_SS_CFG 261
-#define	ICBID_SLAVE_GPU_CFG 262
-#define	ICBID_SLAVE_GPU_THROTTLE_CFG 263
-#define	ICBID_SLAVE_MAPSS 264
-#define	ICBID_SLAVE_MDSP_MPU_CFG 265
-#define	ICBID_SLAVE_CAMERA_RT_THROTTLE_CFG 266
+#define	ICBID_SLAVE_SNOC_BIMC_NRT 259
+#define	ICBID_SLAVE_SNOC_BIMC_RT 260
+#define	ICBID_SLAVE_QUP_0 261
+#define	ICBID_SLAVE_UFS_MEM_CFG 262
+#define	ICBID_SLAVE_VSENSE_CTRL_CFG 263
+#define	ICBID_SLAVE_QUP_CORE_0 264
+#define	ICBID_SLAVE_QUP_CORE_1 265
+#define	ICBID_SLAVE_GPU_CDSP_BIMC 266
+#define	ICBID_SLAVE_AHB2PHY_USB 268
+#define	ICBID_SLAVE_APSS_THROTTLE_CFG 270
+#define	ICBID_SLAVE_CAMERA_NRT_THROTTLE_CFG 271
+#define	ICBID_SLAVE_CDSP_THROTTLE_CFG 272
+#define	ICBID_SLAVE_DDR_PHY_CFG 273
+#define	ICBID_SLAVE_DDR_SS_CFG 274
+#define	ICBID_SLAVE_GPU_CFG 275
+#define	ICBID_SLAVE_GPU_THROTTLE_CFG 276
+#define	ICBID_SLAVE_MAPSS 277
+#define	ICBID_SLAVE_MDSP_MPU_CFG 278
+#define	ICBID_SLAVE_CAMERA_RT_THROTTLE_CFG 279
 #endif
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 2cf6fa6..b0b43b5 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -142,10 +142,10 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xCB4
+#define APPS_EVENT_LAST_ID		0xCB7
 
 #define MSG_SSID_0			0
-#define MSG_SSID_0_LAST			130
+#define MSG_SSID_0_LAST			131
 #define MSG_SSID_1			500
 #define MSG_SSID_1_LAST			506
 #define MSG_SSID_2			1000
@@ -348,13 +348,15 @@ static const uint32_t msg_bld_masks_0[] = {
 	MSG_LVL_MED,
 	MSG_LVL_HIGH,
 	MSG_LVL_LOW,
-	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
 	MSG_LVL_HIGH,
 	MSG_LVL_LOW,
 	MSG_LVL_MED,
 	MSG_LVL_MED,
 	MSG_LVL_HIGH,
-	MSG_LVL_HIGH
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR
 };
 
 static const uint32_t msg_bld_masks_1[] = {
@@ -916,7 +918,7 @@ static const uint32_t msg_bld_masks_25[] = {
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x1CB2,	/* EQUIP ID 1 */
+	0x1CC0,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 64214f3..fd7652e 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -82,6 +82,11 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+			   u64 size);
+
+int iommu_dma_enable_best_fit_algo(struct device *dev);
+
 #else
 
 struct iommu_domain;
@@ -115,6 +120,17 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
 {
 }
 
+static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+					 u64 size)
+{
+	return -ENODEV;
+}
+
+static inline int iommu_dma_enable_best_fit_algo(struct device *dev)
+{
+	return -ENODEV;
+}
+
 #endif	/* CONFIG_IOMMU_DMA */
 #endif	/* __KERNEL__ */
 #endif	/* __DMA_IOMMU_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index c9fb031..8c48bfa 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -97,6 +97,7 @@ struct iova_domain {
 						   flush-queues */
 	atomic_t fq_timer_on;			/* 1 when timer is active, 0
 						   when not */
+	bool best_fit;
 };
 
 static inline unsigned long iova_size(struct iova *iova)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b0d0b51..6d87d04 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -157,6 +157,7 @@ struct lockdep_map {
 #ifdef CONFIG_LOCK_STAT
 	int				cpu;
 	unsigned long			ip;
+	unsigned long			ip_caller;
 #endif
 };
 
@@ -233,6 +234,7 @@ struct held_lock {
 	 */
 	u64				prev_chain_key;
 	unsigned long			acquire_ip;
+	unsigned long			acquire_ip_caller;
 	struct lockdep_map		*instance;
 	struct lockdep_map		*nest_lock;
 #ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 1f8be08..48b9b27 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -449,6 +449,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
 	for (i = 0, rgn = &memblock_type->regions[0];			\
 	     i < memblock_type->cnt;					\
 	     i++, rgn = &memblock_type->regions[i])
+#define for_each_memblock_rev(memblock_type, region)	\
+	for (region = memblock.memblock_type.regions + \
+			memblock.memblock_type.cnt - 1;	\
+	     region >= memblock.memblock_type.regions;	\
+	     region--)
 
 #ifdef CONFIG_MEMTEST
 extern void early_memtest(phys_addr_t start, phys_addr_t end);
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 83768b3..a8ffa7f 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -12,6 +12,9 @@ struct image_info;
 struct bhi_vec_entry;
 struct mhi_timesync;
 struct mhi_buf_info;
+struct mhi_sfr_info;
+
+#define REG_WRITE_QUEUE_LEN 1024
 
 /**
  * enum MHI_CB - MHI callback
@@ -186,6 +189,19 @@ struct file_info {
 };
 
 /**
+ * struct reg_write_info - offload reg write info
+ * @reg_addr - register address
+ * @val - value to be written to register
+ * @chan - channel number
+ * @valid - entry is valid or not
+ */
+struct reg_write_info {
+	void __iomem *reg_addr;
+	u32 val;
+	bool valid;
+};
+
+/**
  * struct mhi_controller - Master controller structure for external modem
  * @dev: Device associated with this controller
  * @of_node: DT that has MHI configuration information
@@ -303,6 +319,7 @@ struct mhi_controller {
 
 	/* caller should grab pm_mutex for suspend/resume operations */
 	struct mutex pm_mutex;
+	struct mutex tsync_mutex;
 	bool pre_init;
 	rwlock_t pm_lock;
 	u32 pm_state;
@@ -352,6 +369,8 @@ struct mhi_controller {
 	void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
 	int (*bw_scale)(struct mhi_controller *mhi_cntrl,
 			struct mhi_link_info *link_info);
+	void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *base,
+			u32 offset, u32 val);
 
 	/* channel to control DTR messaging */
 	struct mhi_device *dtr_dev;
@@ -366,6 +385,10 @@ struct mhi_controller {
 	u64 local_timer_freq;
 	u64 remote_timer_freq;
 
+	/* subsytem failure reason retrieval feature */
+	struct mhi_sfr_info *mhi_sfr;
+	size_t sfr_len;
+
 	/* kernel log level */
 	enum MHI_DEBUG_LEVEL klog_lvl;
 
@@ -373,11 +396,21 @@ struct mhi_controller {
 	enum MHI_DEBUG_LEVEL log_lvl;
 
 	/* controller specific data */
+	const char *name;
 	bool power_down;
+	bool need_force_m3;
+	bool force_m3_done;
 	void *priv_data;
 	void *log_buf;
 	struct dentry *dentry;
 	struct dentry *parent;
+
+	/* for reg write offload */
+	struct workqueue_struct *offload_wq;
+	struct work_struct reg_write_work;
+	struct reg_write_info *reg_write_q;
+	atomic_t write_idx;
+	u32 read_idx;
 };
 
 /**
@@ -794,6 +827,12 @@ void mhi_control_error(struct mhi_controller *mhi_cntrl);
  */
 void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
 
+/**
+ * mhi_get_restart_reason - retrieve the subsystem failure reason
+ * @name: controller name
+ */
+char *mhi_get_restart_reason(const char *name);
+
 #ifndef CONFIG_ARCH_QCOM
 
 #ifdef CONFIG_MHI_DEBUG
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2d6695f..cbc05d1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -574,16 +574,16 @@ unsigned long vmalloc_to_pfn(const void *addr);
  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
  * is no special casing required.
  */
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
-	unsigned long addr = (unsigned long)x;
 
-	return addr >= VMALLOC_START && addr < VMALLOC_END;
+#ifdef CONFIG_MMU
+extern int is_vmalloc_addr(const void *x);
 #else
-	return false;
-#endif
+static inline int is_vmalloc_addr(const void *x)
+{
+	return 0;
 }
+#endif
+
 #ifdef CONFIG_MMU
 extern int is_vmalloc_or_module_addr(const void *x);
 #else
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
index b0ad58e..077883e 100644
--- a/include/linux/msm_pcie.h
+++ b/include/linux/msm_pcie.h
@@ -66,6 +66,26 @@ static inline int msm_msi_init(struct device *dev)
 #ifdef CONFIG_PCI_MSM
 
 /**
+ * msm_pcie_allow_l1 - allow PCIe link to re-enter L1
+ * @pci_dev:		client's pci device structure
+ *
+ * This function gives PCIe clients the control to allow the link to re-enter
+ * L1. Should only be used after msm_pcie_prevent_l1 has been called.
+ */
+void msm_pcie_allow_l1(struct pci_dev *pci_dev);
+
+/**
+ * msm_pcie_prevent_l1 - keeps PCIe link out of L1
+ * @pci_dev:		client's pci device structure
+ *
+ * This function gives PCIe clients the control to exit and prevent the link
+ * from entering L1.
+ *
+ * Return 0 on success, negative value on error
+ */
+int msm_pcie_prevent_l1(struct pci_dev *pci_dev);
+
+/**
  * msm_pcie_set_link_bandwidth - updates the number of lanes and speed of PCIe
  * link.
  * @pci_dev:		client's pci device structure
@@ -202,6 +222,15 @@ static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
 	return -ENODEV;
 }
 
+static inline void msm_pcie_allow_l1(struct pci_dev *pci_dev)
+{
+}
+
+static inline int msm_pcie_prevent_l1(struct pci_dev *pci_dev)
+{
+	return -ENODEV;
+}
+
 static inline int msm_pcie_l1ss_timeout_disable(struct pci_dev *pci_dev)
 {
 	return -ENODEV;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3949bcf..b736707 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -825,7 +825,7 @@ struct task_struct {
 	struct list_head grp_list;
 	u64 cpu_cycles;
 	bool misfit;
-	u8 unfilter;
+	u32 unfilter;
 #endif
 
 #ifdef CONFIG_CGROUP_SCHED
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 817cbd3..5b76a3c7 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -47,7 +47,7 @@ extern unsigned int sysctl_sched_min_task_util_for_boost;
 extern unsigned int sysctl_sched_min_task_util_for_colocation;
 extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;
 extern unsigned int sysctl_sched_coloc_downmigrate_ns;
-extern unsigned int sysctl_sched_task_unfilter_nr_windows;
+extern unsigned int sysctl_sched_task_unfilter_period;
 extern unsigned int sysctl_sched_busy_hyst_enable_cpus;
 extern unsigned int sysctl_sched_busy_hyst;
 extern unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
@@ -55,6 +55,7 @@ extern unsigned int sysctl_sched_coloc_busy_hyst;
 extern unsigned int sysctl_sched_coloc_busy_hyst_max_ms;
 extern unsigned int sysctl_sched_window_stats_policy;
 extern unsigned int sysctl_sched_ravg_window_nr_ticks;
+extern unsigned int sysctl_sched_dynamic_ravg_window_enable;
 
 extern int
 walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 2fb53c7..a56c3ea 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -87,6 +87,7 @@ struct qmi_elem_info {
 #define QMI_ERR_INTERNAL_V01			3
 #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01	5
 #define QMI_ERR_INVALID_ID_V01			41
+#define QMI_ERR_NETWORK_NOT_READY_V01		53
 #define QMI_ERR_ENCODING_V01			58
 #define QMI_ERR_DISABLED_V01			69
 #define QMI_ERR_INCOMPATIBLE_STATE_V01		90
@@ -158,7 +159,6 @@ struct qmi_ops {
  * struct qmi_txn - transaction context
  * @qmi:	QMI handle this transaction is associated with
  * @id:		transaction id
- * @lock:	for synchronization between handler and waiter of messages
  * @completion:	completion object as the transaction receives a response
  * @result:	result code for the completed transaction
  * @ei:		description of the QMI encoded response (optional)
@@ -169,7 +169,6 @@ struct qmi_txn {
 
 	u16 id;
 
-	struct mutex lock;
 	struct completion completion;
 	int result;
 
@@ -268,5 +267,6 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
 		 struct qmi_elem_info *ei, void *c_struct);
 int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
 void qmi_txn_cancel(struct qmi_txn *txn);
+void qmi_set_sndtimeo(struct qmi_handle *qmi, long timeo);
 
 #endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 316efa3..d8c61db 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -21,6 +21,8 @@ struct notifier_block;		/* in notifier.h */
 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
+#define VM_LOWMEM		0x00000100      /* Tracking of direct mapped lowmem */
+
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
@@ -179,6 +181,13 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
 extern struct list_head vmap_area_list;
 extern __init void vm_area_add_early(struct vm_struct *vm);
 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern __init int vm_area_check_early(struct vm_struct *vm);
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
+#else
+static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
+{ };
+#endif
 
 #ifdef CONFIG_SMP
 # ifdef CONFIG_MMU
@@ -204,7 +213,12 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #endif
 
 #ifdef CONFIG_MMU
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern unsigned long total_vmalloc_size;
+#define VMALLOC_TOTAL total_vmalloc_size
+#else
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+#endif
 #else
 #define VMALLOC_TOTAL 0UL
 #endif
diff --git a/include/soc/qcom/lpm_levels.h b/include/soc/qcom/lpm_levels.h
index 137a757..871ac79 100644
--- a/include/soc/qcom/lpm_levels.h
+++ b/include/soc/qcom/lpm_levels.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SOC_QCOM_LPM_LEVEL_H__
@@ -16,9 +16,11 @@ struct system_pm_ops {
 
 #ifdef CONFIG_MSM_PM
 uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops);
+void update_ipi_history(int cpu);
 #else
 static inline uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
 { return -ENODEV; }
+static inline void update_ipi_history(int cpu) {}
 #endif
 
 #endif
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index d7ab66e..9d00f2a 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -45,7 +45,8 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
 
 #ifdef CONFIG_QCOM_QMI_DFC
 void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
-void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
+void qmi_rmnet_qos_exit_pre(void *qos);
+void qmi_rmnet_qos_exit_post(void);
 void qmi_rmnet_burst_fc_check(struct net_device *dev,
 			      int ip_type, u32 mark, unsigned int len);
 int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@@ -56,7 +57,11 @@ qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
 	return NULL;
 }
 
-static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+static inline void qmi_rmnet_qos_exit_pre(void *qos)
+{
+}
+
+static inline void qmi_rmnet_qos_exit_post(void)
 {
 }
 
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index ffcef3f..843c557d 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -25,6 +25,7 @@ void rmnet_clear_powersave_format(void *port);
 void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
 int rmnet_get_powersave_notif(void *port);
 struct net_device *rmnet_get_real_dev(void *port);
+int rmnet_get_dlmarker_info(void *port);
 #else
 static inline void *rmnet_get_qmi_pt(void *port)
 {
@@ -81,5 +82,10 @@ static inline struct net_device *rmnet_get_real_dev(void *port)
 {
 	return NULL;
 }
+
+static inline int rmnet_get_dlmarker_info(void *port)
+{
+	return 0;
+}
 #endif /* CONFIG_QCOM_QMI_RMNET */
 #endif /*_RMNET_QMI_H*/
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index b334bcc..a8d1b15 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __MACH_SCM_H
@@ -26,6 +26,7 @@
 #define SCM_SVC_SMMU_PROGRAM		0x15
 #define SCM_SVC_QDSS			0x16
 #define SCM_SVC_RTIC			0x19
+#define SCM_SVC_TSENS			0x1E
 #define SCM_SVC_TZSCHEDULER		0xFC
 
 #define SCM_FUSE_READ			0x7
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 82845d9..07bd544 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -33,6 +33,7 @@ enum vmid {
 	VMID_CP_SPSS_SP_SHARED = 0x22,
 	VMID_CP_SPSS_HLOS_SHARED = 0x24,
 	VMID_CP_CDSP = 0x2A,
+	VMID_NAV = 0x2B,
 	VMID_LAST,
 	VMID_INVAL = -1
 };
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 0dec7c5..a808e7d 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -58,8 +58,12 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lito")
 #define early_machine_is_bengal()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengal")
+#define early_machine_is_bengalp()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengalp")
 #define early_machine_is_lagoon()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lagoon")
+#define early_machine_is_scuba()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,scuba")
 #define early_machine_is_sdmshrike()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmshrike")
 #define early_machine_is_sm6150()	\
@@ -93,7 +97,9 @@
 #define early_machine_is_kona()		0
 #define early_machine_is_lito()		0
 #define early_machine_is_bengal()	0
+#define early_machine_is_bengalp()	0
 #define early_machine_is_lagoon()	0
+#define early_machine_is_scuba()	0
 #define early_machine_is_sdmshrike()	0
 #define early_machine_is_sm6150()	0
 #define early_machine_is_qcs405()	0
@@ -124,7 +130,9 @@ enum msm_cpu {
 	MSM_CPU_KONA,
 	MSM_CPU_LITO,
 	MSM_CPU_BENGAL,
+	MSM_CPU_BENGALP,
 	MSM_CPU_LAGOON,
+	MSM_CPU_SCUBA,
 	MSM_CPU_SDMSHRIKE,
 	MSM_CPU_SM6150,
 	MSM_CPU_QCS405,
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index fb092bb..c0856ca 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -12,33 +12,25 @@
 
 TRACE_EVENT(dfc_qmi_tc,
 
-	TP_PROTO(const char *name, u8 bearer_id, u32 grant,
-		 int qlen, u32 tcm_handle, int enable),
+	TP_PROTO(const char *name, u32 txq, int enable),
 
-	TP_ARGS(name, bearer_id, grant, qlen, tcm_handle, enable),
+	TP_ARGS(name, txq, enable),
 
 	TP_STRUCT__entry(
 		__string(dev_name, name)
-		__field(u8, bid)
-		__field(u32, grant)
-		__field(int, qlen)
-		__field(u32, tcm_handle)
+		__field(u32, txq)
 		__field(int, enable)
 	),
 
 	TP_fast_assign(
 		__assign_str(dev_name, name);
-		__entry->bid = bearer_id;
-		__entry->grant = grant;
-		__entry->qlen = qlen;
-		__entry->tcm_handle = tcm_handle;
+		__entry->txq = txq;
 		__entry->enable = enable;
 	),
 
-	TP_printk("dev=%s bearer_id=%u grant=%u len=%d mq=%u %s",
+	TP_printk("dev=%s txq=%u %s",
 		__get_str(dev_name),
-		__entry->bid, __entry->grant, __entry->qlen,
-		__entry->tcm_handle,
+		__entry->txq,
 		__entry->enable ? "enable" : "disable")
 );
 
@@ -131,7 +123,7 @@ TRACE_EVENT(dfc_flow_info,
 		__entry->action = add;
 	),
 
-	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d mq=%d",
+	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d txq=%d",
 		__entry->action ? "add flow" : "delete flow",
 		__get_str(dev_name),
 		__entry->bid, __entry->fid, __entry->ip, __entry->handle)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1406e29..dabbb87 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1234,7 +1234,7 @@ TRACE_EVENT(sched_task_util,
 		__field(bool,		is_rtg)
 		__field(bool,		rtg_skip_min)
 		__field(int,		start_cpu)
-		__field(int,		unfilter)
+		__field(u32,		unfilter)
 	),
 
 	TP_fast_assign(
@@ -1260,7 +1260,7 @@ TRACE_EVENT(sched_task_util,
 #endif
 	),
 
-	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d",
+	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u",
 		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
 		__entry->candidates, __entry->best_energy_cpu, __entry->sync,
 		__entry->need_idle, __entry->fastpath, __entry->placement_boost,
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
index 285f967..60058f4 100644
--- a/include/trace/events/trace_msm_bus.h
+++ b/include/trace/events/trace_msm_bus.h
@@ -202,6 +202,42 @@ TRACE_EVENT(bus_client_status,
 		__entry->vote_count)
 );
 
+TRACE_EVENT(bus_bcm_client_status,
+
+	TP_PROTO(const char *bcm, const char *client,
+		unsigned long long act_ab, unsigned long long act_ib,
+		unsigned long long slp_ab, unsigned long long slp_ib),
+
+	TP_ARGS(bcm, client, act_ab, act_ib, slp_ab, slp_ib),
+
+	TP_STRUCT__entry(
+		__string(bcm, bcm)
+		__string(client, client)
+		__field(u64, act_ab)
+		__field(u64, act_ib)
+		__field(u64, slp_ab)
+		__field(u64, slp_ib)
+	),
+
+	TP_fast_assign(
+		__assign_str(bcm, bcm);
+		 __assign_str(client, client);
+		__entry->act_ab = act_ab;
+		__entry->act_ib = act_ib;
+		__entry->slp_ab = slp_ab;
+		__entry->slp_ib = slp_ib;
+	),
+
+	TP_printk(
+		"bcm=%s cl=%s act_ab=%llu act_ib=%llu slp_ab=%llu slp_ib=%llu",
+		__get_str(bcm),
+		__get_str(client),
+		(unsigned long long)__entry->act_ab,
+		(unsigned long long)__entry->act_ib,
+		(unsigned long long)__entry->slp_ab,
+		(unsigned long long)__entry->slp_ib)
+);
+
 TRACE_EVENT(bus_agg_bw,
 
 	TP_PROTO(unsigned int node_id, int rpm_id, int ctx_set,
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 14c15ac..f474351 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -204,10 +204,10 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
 TRACE_EVENT(sched_update_task_ravg,
 
 	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
-		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+		 u64 wallclock, u64 irqtime,
 		 struct group_cpu_time *cpu_time),
 
-	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
+	TP_ARGS(p, rq, evt, wallclock, irqtime, cpu_time),
 
 	TP_STRUCT__entry(
 		__array(char,			comm, TASK_COMM_LEN)
@@ -250,7 +250,7 @@ TRACE_EVENT(sched_update_task_ravg,
 		__entry->evt            = evt;
 		__entry->cpu            = rq->cpu;
 		__entry->cur_pid        = rq->curr->pid;
-		__entry->cur_freq       = cpu_cycles_to_freq(cycles, exec_time);
+		__entry->cur_freq       = rq->task_exec_scale;
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->pid            = p->pid;
 		__entry->mark_start     = p->ravg.mark_start;
@@ -301,10 +301,10 @@ TRACE_EVENT(sched_update_task_ravg,
 TRACE_EVENT(sched_update_task_ravg_mini,
 
 	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
-		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+		 u64 wallclock, u64 irqtime,
 		 struct group_cpu_time *cpu_time),
 
-	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
+	TP_ARGS(p, rq, evt, wallclock, irqtime, cpu_time),
 
 	TP_STRUCT__entry(
 		__array(char,			comm, TASK_COMM_LEN)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1e272f6..1df8348 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -52,6 +52,7 @@
 #include <linux/nmi.h>
 
 #include <asm/sections.h>
+#include <asm/stacktrace.h>
 
 #include "lockdep_internals.h"
 
@@ -72,6 +73,30 @@ module_param(lock_stat, int, 0644);
 #define lock_stat 0
 #endif
 
+static int lockdep_log = 1;
+
+static int lockdep_logging_off(void)
+{
+	if (lockdep_log && xchg(&lockdep_log, 0)) {
+		if (!debug_locks_silent)
+			return 1;
+	}
+	return 0;
+}
+
+#define MAX_ITR 20
+#define lockdep_warn_on(cond)						\
+({									\
+	int __ret = 0;							\
+									\
+	if (!oops_in_progress && unlikely(cond)) {                      \
+		if (lockdep_logging_off() && !debug_locks_silent)       \
+			WARN(1, "lockdep_warn_on(%s)", #cond);          \
+		__ret = 1;						\
+	}								\
+	__ret;								\
+})
+
 /*
  * lockdep_lock: protects the lockdep graph, the hashes and the
  *               class/list/hash allocators.
@@ -107,7 +132,7 @@ static inline int graph_unlock(void)
 		 * The lockdep graph lock isn't locked while we expect it to
 		 * be, we're confused now, bye!
 		 */
-		return DEBUG_LOCKS_WARN_ON(1);
+		return lockdep_warn_on(1);
 	}
 
 	current->lockdep_recursion--;
@@ -115,6 +140,14 @@ static inline int graph_unlock(void)
 	return 0;
 }
 
+static int lockdep_logging_off_graph_unlock(void)
+{
+	int ret = lockdep_logging_off();
+
+	graph_unlock();
+	return ret;
+}
+
 /*
  * Turn lock debugging off and return with 0 if it was off already,
  * and also release the graph lock:
@@ -146,7 +179,7 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 		/*
 		 * Someone passed in garbage, we give up.
 		 */
-		DEBUG_LOCKS_WARN_ON(1);
+		lockdep_warn_on(1);
 		return NULL;
 	}
 	return lock_classes + hlock->class_idx - 1;
@@ -413,7 +446,7 @@ static int save_trace(struct stack_trace *trace)
 	nr_stack_trace_entries += trace->nr_entries;
 
 	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
-		if (!debug_locks_off_graph_unlock())
+		if (!lockdep_logging_off_graph_unlock())
 			return 0;
 
 		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
@@ -720,10 +753,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
 		lock->key = (void *)lock;
 	else {
 		/* Debug-check: all keys must be persistent! */
-		debug_locks_off();
 		pr_err("INFO: trying to register non-static key.\n");
 		pr_err("the code is fine but needs lockdep annotation.\n");
-		pr_err("turning off the locking correctness validator.\n");
 		dump_stack();
 		return false;
 	}
@@ -1193,7 +1224,7 @@ static noinline int print_circular_bug(struct lock_list *this,
 	struct lock_list *first_parent;
 	int depth;
 
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (!lockdep_logging_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	if (!save_trace(&this->trace))
@@ -1508,7 +1539,7 @@ print_bad_irq_dependency(struct task_struct *curr,
 			 enum lock_usage_bit bit2,
 			 const char *irqclass)
 {
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (!lockdep_logging_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -1737,7 +1768,7 @@ static int
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 		   struct held_lock *next)
 {
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (!lockdep_logging_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -2000,7 +2031,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
 	}
 	return 1;
 out_bug:
-	if (!debug_locks_off_graph_unlock())
+	if (!lockdep_logging_off_graph_unlock())
 		return 0;
 
 	/*
@@ -2130,7 +2161,7 @@ static int check_no_collision(struct task_struct *curr,
 
 	i = get_first_held_lock(curr, hlock);
 
-	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
+	if (lockdep_warn_on(chain->depth != curr->lockdep_depth - (i - 1))) {
 		print_collision(curr, hlock, chain);
 		return 0;
 	}
@@ -2138,7 +2169,7 @@ static int check_no_collision(struct task_struct *curr,
 	for (j = 0; j < chain->depth - 1; j++, i++) {
 		id = curr->held_locks[i].class_idx - 1;
 
-		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+		if (lockdep_warn_on(chain_hlocks[chain->base + j] != id)) {
 			print_collision(curr, hlock, chain);
 			return 0;
 		}
@@ -2399,7 +2430,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
 		int ret = check_deadlock(curr, hlock, lock, hlock->read);
 
 		if (!ret)
-			return 0;
+			return 1;
 		/*
 		 * Mark recursive read, as we jump over it when
 		 * building dependencies (just like we jump over
@@ -2413,7 +2444,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
 		 */
 		if (!chain_head && ret != 2) {
 			if (!check_prevs_add(curr, hlock))
-				return 0;
+				return 1;
 		}
 
 		graph_unlock();
@@ -2507,7 +2538,7 @@ static int
 print_usage_bug(struct task_struct *curr, struct held_lock *this,
 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
 {
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (!lockdep_logging_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -2572,7 +2603,7 @@ print_irq_inversion_bug(struct task_struct *curr,
 	struct lock_list *middle = NULL;
 	int depth;
 
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (!lockdep_logging_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -2992,40 +3023,41 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
 			if (curr->hardirq_context)
 				if (!mark_lock(curr, hlock,
 						LOCK_USED_IN_HARDIRQ_READ))
-					return 0;
+					goto out;
 			if (curr->softirq_context)
 				if (!mark_lock(curr, hlock,
 						LOCK_USED_IN_SOFTIRQ_READ))
-					return 0;
+					goto out;
 		} else {
 			if (curr->hardirq_context)
 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
-					return 0;
+					goto out;
 			if (curr->softirq_context)
 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
-					return 0;
+					goto out;
 		}
 	}
 	if (!hlock->hardirqs_off) {
 		if (hlock->read) {
 			if (!mark_lock(curr, hlock,
 					LOCK_ENABLED_HARDIRQ_READ))
-				return 0;
+				goto out;
 			if (curr->softirqs_enabled)
 				if (!mark_lock(curr, hlock,
 						LOCK_ENABLED_SOFTIRQ_READ))
-					return 0;
+					goto out;
 		} else {
 			if (!mark_lock(curr, hlock,
 					LOCK_ENABLED_HARDIRQ))
-				return 0;
+				goto out;
 			if (curr->softirqs_enabled)
 				if (!mark_lock(curr, hlock,
 						LOCK_ENABLED_SOFTIRQ))
-					return 0;
+					goto out;
 		}
 	}
 
+out:
 	return 1;
 }
 
@@ -3132,7 +3164,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 		debug_atomic_dec(nr_unused_locks);
 		break;
 	default:
-		if (!debug_locks_off_graph_unlock())
+		if (!lockdep_logging_off_graph_unlock())
 			return 0;
 		WARN_ON(1);
 		return 0;
@@ -3171,7 +3203,7 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 	/*
 	 * Can't be having no nameless bastards around this place!
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!name)) {
+	if (lockdep_warn_on(!name)) {
 		lock->name = "NULL";
 		return;
 	}
@@ -3181,17 +3213,22 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 	/*
 	 * No key, no joy, we need to hash something.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!key))
+	if (lockdep_warn_on(!key))
 		return;
 	/*
 	 * Sanity check, the lock-class key must be persistent:
 	 */
 	if (!static_obj(key)) {
+		/*
+		 * to make sure register_lock_class returns NULL
+		 * for a non static key
+		 */
+		lock->key = key;
 		printk("BUG: key %px not in .data!\n", key);
 		/*
 		 * What it says above ^^^^^, I suggest you read it.
 		 */
-		DEBUG_LOCKS_WARN_ON(1);
+		lockdep_warn_on(1);
 		return;
 	}
 	lock->key = key;
@@ -3202,7 +3239,7 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 	if (subclass) {
 		unsigned long flags;
 
-		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+		if (lockdep_warn_on(current->lockdep_recursion))
 			return;
 
 		raw_local_irq_save(flags);
@@ -3228,9 +3265,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 				struct held_lock *hlock,
 				unsigned long ip)
 {
-	if (!debug_locks_off())
-		return 0;
-	if (debug_locks_silent)
+	if (!lockdep_logging_off() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -3259,6 +3294,33 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 
 static int __lock_is_held(const struct lockdep_map *lock, int read);
 
+static unsigned long lockdep_walk_stack(unsigned long addr)
+{
+	int ret;
+	struct stackframe frame;
+	struct task_struct *tsk = current;
+	bool end_search = false;
+	int counter = 0;
+
+	frame.fp = (unsigned long)__builtin_frame_address(0);
+	frame.pc = (unsigned long)lockdep_walk_stack;
+
+	while (counter < MAX_ITR) {
+		counter++;
+		if (frame.pc == addr)
+			end_search = true;
+
+		ret = unwind_frame(tsk, &frame);
+		if ((ret < 0) || end_search)
+			break;
+	}
+
+	if (unlikely(ret < 0) || unlikely(counter == MAX_ITR))
+		return 0;
+	else
+		return frame.pc;
+}
+
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
@@ -3266,7 +3328,8 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 			  int trylock, int read, int check, int hardirqs_off,
 			  struct lockdep_map *nest_lock, unsigned long ip,
-			  int references, int pin_count)
+			  int references, int pin_count,
+			  unsigned long ip_caller)
 {
 	struct task_struct *curr = current;
 	struct lock_class *class = NULL;
@@ -3347,7 +3410,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	 * Plain impossible, we just registered it and checked it weren't no
 	 * NULL like.. I bet this mushroom I ate was good!
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!class))
+	if (lockdep_warn_on(!class))
 		return 0;
 	hlock->class_idx = class_idx;
 	hlock->acquire_ip = ip;
@@ -3364,13 +3427,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	hlock->holdtime_stamp = lockstat_clock();
 #endif
 	hlock->pin_count = pin_count;
+	if (!ip_caller)
+		hlock->acquire_ip_caller = lockdep_walk_stack(ip);
+	else
+		hlock->acquire_ip_caller = ip_caller;
 
 	if (check && !mark_irqflags(curr, hlock))
 		return 0;
 
 	/* mark it as used: */
-	if (!mark_lock(curr, hlock, LOCK_USED))
-		return 0;
+	mark_lock(curr, hlock, LOCK_USED);
 
 	/*
 	 * Calculate the chain hash: it's the combined hash of all the
@@ -3393,7 +3459,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 		/*
 		 * How can we have a chain hash when we ain't got no keys?!
 		 */
-		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
+		if (lockdep_warn_on(chain_key != 0))
 			return 0;
 		chain_head = 1;
 	}
@@ -3441,9 +3507,7 @@ static int
 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
 			   unsigned long ip)
 {
-	if (!debug_locks_off())
-		return 0;
-	if (debug_locks_silent)
+	if (!lockdep_logging_off() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -3492,7 +3556,7 @@ static int match_held_lock(const struct held_lock *hlock,
 		 * State got messed up, follow the sites that change ->references
 		 * and try to make sense of it.
 		 */
-		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
+		if (lockdep_warn_on(!hlock->nest_lock))
 			return 0;
 
 		if (hlock->class_idx == class - lock_classes + 1)
@@ -3550,7 +3614,8 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
 				    hlock->read, hlock->check,
 				    hlock->hardirqs_off,
 				    hlock->nest_lock, hlock->acquire_ip,
-				    hlock->references, hlock->pin_count))
+				    hlock->references, hlock->pin_count,
+				    hlock->acquire_ip_caller))
 			return 1;
 	}
 	return 0;
@@ -3572,7 +3637,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
 	 * This function is about (re)setting the class of a held lock,
 	 * yet we're not actually holding any locks. Naughty user!
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!depth))
+	if (lockdep_warn_on(!depth))
 		return 0;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
@@ -3581,6 +3646,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
 
 	lockdep_init_map(lock, name, key, 0);
 	class = register_lock_class(lock, subclass, 0);
+	if (!class)
+		return 0;
+
 	hlock->class_idx = class - lock_classes + 1;
 
 	curr->lockdep_depth = i;
@@ -3593,7 +3661,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
 	 * I took it apart and put it back together again, except now I have
 	 * these 'spare' parts.. where shall I put them.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+	if (lockdep_warn_on(curr->lockdep_depth != depth))
 		return 0;
 	return 1;
 }
@@ -3613,7 +3681,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 	 * This function is about (re)setting the class of a held lock,
 	 * yet we're not actually holding any locks. Naughty user!
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!depth))
+	if (lockdep_warn_on(!depth))
 		return 0;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
@@ -3623,9 +3691,12 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 	curr->lockdep_depth = i;
 	curr->curr_chain_key = hlock->prev_chain_key;
 
-	WARN(hlock->read, "downgrading a read lock");
+	if (lockdep_logging_off())
+		WARN(hlock->read, "downgrading a read lock");
+
 	hlock->read = 1;
 	hlock->acquire_ip = ip;
+	hlock->acquire_ip_caller = lockdep_walk_stack(ip);
 
 	if (reacquire_held_locks(curr, depth, i))
 		return 0;
@@ -3634,7 +3705,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 	 * I took it apart and put it back together again, except now I have
 	 * these 'spare' parts.. where shall I put them.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+	if (lockdep_warn_on(curr->lockdep_depth != depth))
 		return 0;
 	return 1;
 }
@@ -3662,8 +3733,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	 * So we're all set to release this lock.. wait what lock? We don't
 	 * own any locks, you've been drinking again?
 	 */
-	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
-		 return print_unlock_imbalance_bug(curr, lock, ip);
+	if (lockdep_warn_on(depth <= 0))
+		return print_unlock_imbalance_bug(curr, lock, ip);
 
 	/*
 	 * Check whether the lock exists in the current stack
@@ -3676,7 +3747,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	if (hlock->instance == lock)
 		lock_release_holdtime(hlock);
 
-	WARN(hlock->pin_count, "releasing a pinned lock\n");
+	if (lockdep_logging_off())
+		WARN(hlock->pin_count, "releasing a pinned lock\n");
 
 	if (hlock->references) {
 		hlock->references--;
@@ -3706,7 +3778,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	 * We had N bottles of beer on the wall, we drank one, but now
 	 * there's not N-1 bottles of beer left on the wall...
 	 */
-	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
+	if (lockdep_warn_on(curr->lockdep_depth != depth - 1))
 		return 0;
 
 	return 1;
@@ -3755,7 +3827,9 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
 		}
 	}
 
-	WARN(1, "pinning an unheld lock\n");
+	if (lockdep_logging_off())
+		WARN(1, "pinning an unheld lock\n");
+
 	return cookie;
 }
 
@@ -3776,7 +3850,8 @@ static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 		}
 	}
 
-	WARN(1, "pinning an unheld lock\n");
+	if (lockdep_logging_off())
+		WARN(1, "pinning an unheld lock\n");
 }
 
 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
@@ -3791,19 +3866,26 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 		struct held_lock *hlock = curr->held_locks + i;
 
 		if (match_held_lock(hlock, lock)) {
-			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
+			if (!hlock->pin_count) {
+				if (lockdep_logging_off())
+					WARN(1, "unpinning an unpinned lock\n");
 				return;
+			}
 
 			hlock->pin_count -= cookie.val;
 
-			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
+			if ((int)hlock->pin_count < 0) {
+				if (lockdep_logging_off())
+					WARN(1, "pin count corrupted\n");
 				hlock->pin_count = 0;
+			}
 
 			return;
 		}
 	}
 
-	WARN(1, "unpinning an unheld lock\n");
+	if (lockdep_logging_off())
+		WARN(1, "unpinning an unheld lock\n");
 }
 
 /*
@@ -3901,7 +3983,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	current->lockdep_recursion = 1;
 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 	__lock_acquire(lock, subclass, trylock, read, check,
-		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
 }
@@ -4005,9 +4087,7 @@ static int
 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
 			   unsigned long ip)
 {
-	if (!debug_locks_off())
-		return 0;
-	if (debug_locks_silent)
+	if (!lockdep_logging_off() || debug_locks_silent)
 		return 0;
 
 	pr_warn("\n");
@@ -4044,7 +4124,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
 	 * Whee, we contended on this lock, except it seems we're not
 	 * actually trying to acquire anything much at all..
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!depth))
+	if (lockdep_warn_on(!depth))
 		return;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
@@ -4086,7 +4166,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
 	 * Yay, we acquired ownership of this lock we didn't try to
 	 * acquire, how the heck did that happen?
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!depth))
+	if (lockdep_warn_on(!depth))
 		return;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
@@ -4119,6 +4199,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
 
 	lock->cpu = cpu;
 	lock->ip = ip;
+	lock->ip_caller = lockdep_walk_stack(ip);
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -4298,7 +4379,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
 				match |= class == lock->class_cache[j];
 
 			if (unlikely(match)) {
-				if (debug_locks_off_graph_unlock()) {
+				if (lockdep_logging_off_graph_unlock()) {
 					/*
 					 * We all just reset everything, how did it match?
 					 */
@@ -4347,9 +4428,7 @@ static void
 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
 		     const void *mem_to, struct held_lock *hlock)
 {
-	if (!debug_locks_off())
-		return;
-	if (debug_locks_silent)
+	if (!lockdep_logging_off() || debug_locks_silent)
 		return;
 
 	pr_warn("\n");
@@ -4405,9 +4484,7 @@ EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
 static void print_held_locks_bug(void)
 {
-	if (!debug_locks_off())
-		return;
-	if (debug_locks_silent)
+	if (!lockdep_logging_off() || debug_locks_silent)
 		return;
 
 	pr_warn("\n");
@@ -4474,7 +4551,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
 	struct task_struct *curr = current;
 
 	if (unlikely(curr->lockdep_depth)) {
-		if (!debug_locks_off())
+		if (!lockdep_logging_off())
 			return;
 		pr_warn("\n");
 		pr_warn("================================================\n");
@@ -4497,6 +4574,9 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 {
 	struct task_struct *curr = current;
 
+	if (!lockdep_logging_off())
+		return;
+
 	/* Note: the following can be executed concurrently, so be careful. */
 	pr_warn("\n");
 	pr_warn("=============================\n");
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 0b2c2ad..062d278 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -595,14 +595,14 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
 	struct rcu_node *rnp;
 
 	synchronize_sched_expedited_wait(rsp);
-	rcu_exp_gp_seq_end(rsp);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
 
-	/*
-	 * Switch over to wakeup mode, allowing the next GP, but -only- the
-	 * next GP, to proceed.
+	/* Switch over to wakeup mode, allowing the next GP to proceed.
+	 * End the previous grace period only after acquiring the mutex
+	 * to ensure that only one GP runs concurrently with wakeups.
 	 */
 	mutex_lock(&rsp->exp_wake_mutex);
+	rcu_exp_gp_seq_end(rsp);
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 
 	rcu_for_each_node_breadth_first(rsp, rnp) {
 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
@@ -613,7 +613,7 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
 			spin_unlock(&rnp->exp_lock);
 		}
 		smp_mb(); /* All above changes before wakeup. */
-		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
+		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
 	}
 	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
 	mutex_unlock(&rsp->exp_wake_mutex);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d541d5e..e9da7954 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -267,7 +267,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
 
 	if (use_pelt())
 		sg_policy->work_in_progress = true;
-	irq_work_queue(&sg_policy->irq_work);
+	sched_irq_work_queue(&sg_policy->irq_work);
 }
 
 #define TARGET_LOAD 80
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 785c211..30d04f3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7619,7 +7619,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
 	struct perf_domain *pd;
 	struct sched_domain *sd;
 	cpumask_t *candidates;
-	bool is_rtg;
+	bool is_rtg, curr_is_rtg;
 	struct find_best_target_env fbt_env;
 	bool need_idle = wake_to_idle(p);
 	int placement_boost = task_boost_policy(p);
@@ -7633,6 +7633,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
 		goto eas_not_ready;
 
 	is_rtg = task_in_related_thread_group(p);
+	curr_is_rtg = task_in_related_thread_group(cpu_rq(cpu)->curr);
 
 	fbt_env.fastpath = 0;
 
@@ -7643,7 +7644,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
 	candidates = this_cpu_ptr(&energy_cpus);
 	cpumask_clear(candidates);
 
-	if (need_idle)
+	if (sync && (need_idle || (is_rtg && curr_is_rtg)))
 		sync = 0;
 
 	if (sysctl_sched_sync_hint_enable && sync &&
@@ -9386,6 +9387,11 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
 	if (sgs->sum_nr_running <= sgs->group_weight)
 		return false;
 
+#ifdef CONFIG_SCHED_WALT
+	if (env->idle != CPU_NOT_IDLE && walt_rotation_enabled)
+		return true;
+#endif
+
 	if ((sgs->group_capacity * 100) <
 			(sgs->group_util * env->sd->imbalance_pct))
 		return true;
@@ -11544,6 +11550,20 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
 static inline void nohz_newidle_balance(struct rq *this_rq) { }
 #endif /* CONFIG_NO_HZ_COMMON */
 
+static bool silver_has_big_tasks(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		if (!is_min_capacity_cpu(cpu))
+			break;
+		if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
+			return true;
+	}
+
+	return false;
+}
+
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
@@ -11555,6 +11575,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	struct sched_domain *sd;
 	int pulled_task = 0;
 	u64 curr_cost = 0;
+	u64 avg_idle = this_rq->avg_idle;
 
 	if (cpu_isolated(this_cpu))
 		return 0;
@@ -11570,7 +11591,9 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	 */
 	if (!cpu_active(this_cpu))
 		return 0;
-
+	if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks()
+		&& (atomic_read(&this_rq->nr_iowait) == 0))
+		avg_idle = ULLONG_MAX;
 	/*
 	 * This is OK, because current is on_cpu, which avoids it being picked
 	 * for load-balance and preemption/IRQs are still disabled avoiding
@@ -11579,7 +11602,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	 */
 	rq_unpin_lock(this_rq, rf);
 
-	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+	if (avg_idle < sysctl_sched_migration_cost ||
 	    !READ_ONCE(this_rq->rd->overload)) {
 
 		rcu_read_lock();
@@ -11604,7 +11627,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 		if (!(sd->flags & SD_LOAD_BALANCE))
 			continue;
 
-		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
+		if (avg_idle < curr_cost + sd->max_newidle_lb_cost) {
 			update_next_balance(sd, &next_balance);
 			break;
 		}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 4170afd..d78f53b 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -619,7 +619,8 @@ void psi_emergency_trigger(void)
 
 		/* Generate an event */
 		if (cmpxchg(&t->event, 0, 1) == 0) {
-			mod_timer(&t->wdog_timer, (unsigned long)t->win.size);
+			mod_timer(&t->wdog_timer, jiffies +
+					  nsecs_to_jiffies(2 * t->win.size));
 			wake_up_interruptible(&t->event_wait);
 		}
 		t->last_event_time = now;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9b1b472..102bda8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -108,11 +108,6 @@ struct walt_sched_stats {
 	u64 pred_demands_sum_scaled;
 };
 
-struct cpu_cycle {
-	u64 cycles;
-	u64 time;
-};
-
 struct group_cpu_time {
 	u64 curr_runnable_sum;
 	u64 prev_runnable_sum;
@@ -998,7 +993,7 @@ struct rq {
 	u64			avg_irqload;
 	u64			irqload_ts;
 	struct task_struct	*ed_task;
-	struct cpu_cycle	cc;
+	u64			task_exec_scale;
 	u64			old_busy_time, old_busy_time_group;
 	u64			old_estimated_time;
 	u64			curr_runnable_sum;
@@ -3096,3 +3091,13 @@ struct sched_avg_stats {
 	int nr_scaled;
 };
 extern void sched_get_nr_running_avg(struct sched_avg_stats *stats);
+
+#ifdef CONFIG_SMP
+static inline void sched_irq_work_queue(struct irq_work *work)
+{
+	if (likely(cpu_online(raw_smp_processor_id())))
+		irq_work_queue(work);
+	else
+		irq_work_queue_on(work, cpumask_any(cpu_online_mask));
+}
+#endif
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 9ae415a..be7ed1b 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -123,13 +123,18 @@ static __read_mostly unsigned int sched_io_is_busy = 1;
 __read_mostly unsigned int sysctl_sched_window_stats_policy =
 	WINDOW_STATS_MAX_RECENT_AVG;
 
-__read_mostly unsigned int sysctl_sched_ravg_window_nr_ticks =
+unsigned int sysctl_sched_ravg_window_nr_ticks = (HZ / NR_WINDOWS_PER_SEC);
+
+static unsigned int display_sched_ravg_window_nr_ticks =
 	(HZ / NR_WINDOWS_PER_SEC);
 
+unsigned int sysctl_sched_dynamic_ravg_window_enable = (HZ == 250);
+
 /* Window size (in ns) */
 __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
 __read_mostly unsigned int new_sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
 
+static DEFINE_SPINLOCK(sched_ravg_window_lock);
 u64 sched_ravg_window_change_time;
 /*
  * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
@@ -982,7 +987,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
 	if (!same_freq_domain(new_cpu, task_cpu(p))) {
 		src_rq->notif_pending = true;
 		dest_rq->notif_pending = true;
-		irq_work_queue(&walt_migration_irq_work);
+		sched_irq_work_queue(&walt_migration_irq_work);
 	}
 
 	if (is_ed_enabled()) {
@@ -1376,7 +1381,7 @@ static void rollover_task_window(struct task_struct *p, bool full_window)
 		p->ravg.curr_window_cpu[i] = 0;
 	}
 
-	if (p->ravg.active_time < NEW_TASK_ACTIVE_TIME)
+	if (is_new_task(p))
 		p->ravg.active_time += p->ravg.last_win_size;
 }
 
@@ -1430,14 +1435,7 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
 
 static inline u64 scale_exec_time(u64 delta, struct rq *rq)
 {
-	u32 freq;
-
-	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
-	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
-	delta *= rq->cluster->exec_scale_factor;
-	delta >>= 10;
-
-	return delta;
+	return (delta * rq->task_exec_scale) >> 10;
 }
 
 /* Convert busy time to frequency equivalent
@@ -1515,8 +1513,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 	if (new_window)
 		full_window = (window_start - mark_start) >= window_size;
 
-	new_task = is_new_task(p);
-
 	/*
 	 * Handle per-task window rollover. We don't care about the idle
 	 * task or exiting tasks.
@@ -1526,6 +1522,8 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 			rollover_task_window(p, full_window);
 	}
 
+	new_task = is_new_task(p);
+
 	if (p_is_curr_task && new_window) {
 		rollover_cpu_window(rq, full_window);
 		rollover_top_tasks(rq, full_window);
@@ -1792,7 +1790,7 @@ account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
 	return 1;
 }
 
-unsigned int sysctl_sched_task_unfilter_nr_windows = 10;
+unsigned int sysctl_sched_task_unfilter_period = 200000000;
 
 /*
  * Called when new window is starting for a task, to record cpu usage over
@@ -1875,11 +1873,11 @@ static void update_history(struct rq *rq, struct task_struct *p,
 	p->ravg.pred_demand_scaled = pred_demand_scaled;
 
 	if (demand_scaled > sched_task_filter_util)
-		p->unfilter = sysctl_sched_task_unfilter_nr_windows;
+		p->unfilter = sysctl_sched_task_unfilter_period;
 	else
 		if (p->unfilter)
-			p->unfilter = p->unfilter - 1;
-
+			p->unfilter = max_t(int, 0,
+				p->unfilter - p->ravg.last_win_size);
 done:
 	trace_sched_update_history(rq, p, runtime, samples, event);
 }
@@ -2014,13 +2012,16 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
 			  u64 wallclock, u64 irqtime)
 {
 	u64 cur_cycles;
+	u64 cycles_delta;
+	u64 time_delta;
 	int cpu = cpu_of(rq);
 
 	lockdep_assert_held(&rq->lock);
 
 	if (!use_cycle_counter) {
-		rq->cc.cycles = cpu_cur_freq(cpu);
-		rq->cc.time = 1;
+		rq->task_exec_scale = DIV64_U64_ROUNDUP(cpu_cur_freq(cpu) *
+				topology_get_cpu_scale(NULL, cpu),
+				rq->cluster->max_possible_freq);
 		return;
 	}
 
@@ -2035,10 +2036,10 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
 	 */
 	if (!is_idle_task(rq->curr) || irqtime) {
 		if (unlikely(cur_cycles < p->cpu_cycles))
-			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+			cycles_delta = cur_cycles + (U64_MAX - p->cpu_cycles);
 		else
-			rq->cc.cycles = cur_cycles - p->cpu_cycles;
-		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+			cycles_delta = cur_cycles - p->cpu_cycles;
+		cycles_delta = cycles_delta * NSEC_PER_MSEC;
 
 		if (event == IRQ_UPDATE && is_idle_task(p))
 			/*
@@ -2046,20 +2047,24 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
 			 * entry time is CPU cycle counter stall period.
 			 * Upon IRQ handler entry sched_account_irqstart()
 			 * replenishes idle task's cpu cycle counter so
-			 * rq->cc.cycles now represents increased cycles during
+			 * cycles_delta now represents increased cycles during
 			 * IRQ handler rather than time between idle entry and
 			 * IRQ exit.  Thus use irqtime as time delta.
 			 */
-			rq->cc.time = irqtime;
+			time_delta = irqtime;
 		else
-			rq->cc.time = wallclock - p->ravg.mark_start;
-		SCHED_BUG_ON((s64)rq->cc.time < 0);
+			time_delta = wallclock - p->ravg.mark_start;
+		SCHED_BUG_ON((s64)time_delta < 0);
+
+		rq->task_exec_scale = DIV64_U64_ROUNDUP(cycles_delta *
+				topology_get_cpu_scale(NULL, cpu),
+				time_delta * rq->cluster->max_possible_freq);
 	}
 
 	p->cpu_cycles = cur_cycles;
 
 	trace_sched_get_task_cpu_cycles(cpu, event,
-					rq->cc.cycles, rq->cc.time, p);
+					cycles_delta, time_delta, p);
 }
 
 static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
@@ -2072,7 +2077,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
 	result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
 				   rq->window_start);
 	if (result == old_window_start)
-		irq_work_queue(&walt_cpufreq_irq_work);
+		sched_irq_work_queue(&walt_cpufreq_irq_work);
 }
 
 /* Reflect task activity on its demand and cpu's busy time statistics */
@@ -2103,9 +2108,9 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 		goto done;
 
 	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
-				rq->cc.cycles, rq->cc.time, &rq->grp_time);
+				&rq->grp_time);
 	trace_sched_update_task_ravg_mini(p, rq, event, wallclock, irqtime,
-				rq->cc.cycles, rq->cc.time, &rq->grp_time);
+				&rq->grp_time);
 
 done:
 	p->ravg.mark_start = wallclock;
@@ -2161,7 +2166,7 @@ void init_new_task_load(struct task_struct *p)
 	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
 		p->ravg.sum_history[i] = init_load_windows;
 	p->misfit = false;
-	p->unfilter = sysctl_sched_task_unfilter_nr_windows;
+	p->unfilter = sysctl_sched_task_unfilter_period;
 }
 
 /*
@@ -2223,11 +2228,10 @@ void mark_task_starting(struct task_struct *p)
 }
 
 #define pct_to_min_scaled(tunable) \
-		div64_u64(((u64)sched_ravg_window * tunable *	\
-			  cluster_max_freq(sched_cluster[0]) *	\
-			  sched_cluster[0]->efficiency),	\
-			  ((u64)max_possible_freq *		\
-			  max_possible_efficiency * 100))
+		div64_u64(((u64)sched_ravg_window * tunable *		\
+			 topology_get_cpu_scale(NULL,			\
+			 cluster_first_cpu(sched_cluster[0]))),	\
+			 ((u64)SCHED_CAPACITY_SCALE * 100))
 
 static inline void walt_update_group_thresholds(void)
 {
@@ -3362,6 +3366,7 @@ void walt_irq_work(struct irq_work *irq_work)
 	bool is_migration = false, is_asym_migration = false;
 	u64 total_grp_load = 0, min_cluster_grp_load = 0;
 	int level = 0;
+	unsigned long flags;
 
 	/* Am I the window rollover work or the migration work? */
 	if (irq_work == &walt_migration_irq_work)
@@ -3459,6 +3464,8 @@ void walt_irq_work(struct irq_work *irq_work)
 	 * change sched_ravg_window since all rq locks are acquired.
 	 */
 	if (!is_migration) {
+		spin_lock_irqsave(&sched_ravg_window_lock, flags);
+
 		if (sched_ravg_window != new_sched_ravg_window) {
 			sched_ravg_window_change_time = sched_ktime_clock();
 			printk_deferred("ALERT: changing window size from %u to %u at %lu\n",
@@ -3468,6 +3475,7 @@ void walt_irq_work(struct irq_work *irq_work)
 			sched_ravg_window = new_sched_ravg_window;
 			walt_tunables_fixup();
 		}
+		spin_unlock_irqrestore(&sched_ravg_window_lock, flags);
 	}
 
 	for_each_cpu(cpu, cpu_possible_mask)
@@ -3620,8 +3628,7 @@ void walt_sched_init_rq(struct rq *rq)
 	rq->cur_irqload = 0;
 	rq->avg_irqload = 0;
 	rq->irqload_ts = 0;
-	rq->cc.cycles = 1;
-	rq->cc.time = 1;
+	rq->task_exec_scale = 1024;
 
 	/*
 	 * All cpus part of same cluster by default. This avoids the
@@ -3678,29 +3685,39 @@ int walt_proc_user_hint_handler(struct ctl_table *table,
 	return ret;
 }
 
-static inline void sched_window_nr_ticks_change(int new_nr_ticks)
+static inline void sched_window_nr_ticks_change(void)
 {
-	new_sched_ravg_window = new_nr_ticks * (NSEC_PER_SEC / HZ);
+	int new_ticks;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sched_ravg_window_lock, flags);
+
+	new_ticks = min(display_sched_ravg_window_nr_ticks,
+			sysctl_sched_ravg_window_nr_ticks);
+
+	new_sched_ravg_window = new_ticks * (NSEC_PER_SEC / HZ);
+	spin_unlock_irqrestore(&sched_ravg_window_lock, flags);
 }
 
 int sched_ravg_window_handler(struct ctl_table *table,
 				int write, void __user *buffer, size_t *lenp,
 				loff_t *ppos)
 {
-	int ret;
+	int ret = -EPERM;
 	static DEFINE_MUTEX(mutex);
 	unsigned int prev_value;
 
 	mutex_lock(&mutex);
 
-	prev_value = sysctl_sched_ravg_window_nr_ticks;
-	ret = proc_douintvec_ravg_window(table, write, buffer, lenp, ppos);
-	if (ret || !write ||
-			(prev_value == sysctl_sched_ravg_window_nr_ticks) ||
-			(sysctl_sched_ravg_window_nr_ticks == 0))
+	if (write && (HZ != 250 || !sysctl_sched_dynamic_ravg_window_enable))
 		goto unlock;
 
-	sched_window_nr_ticks_change(sysctl_sched_ravg_window_nr_ticks);
+	prev_value = sysctl_sched_ravg_window_nr_ticks;
+	ret = proc_douintvec_ravg_window(table, write, buffer, lenp, ppos);
+	if (ret || !write || (prev_value == sysctl_sched_ravg_window_nr_ticks))
+		goto unlock;
+
+	sched_window_nr_ticks_change();
 
 unlock:
 	mutex_unlock(&mutex);
@@ -3709,16 +3726,15 @@ int sched_ravg_window_handler(struct ctl_table *table,
 
 void sched_set_refresh_rate(enum fps fps)
 {
-	int new_nr_ticks;
-
-	if (HZ == 250) {
+	if (HZ == 250 && sysctl_sched_dynamic_ravg_window_enable) {
 		if (fps > FPS90)
-			new_nr_ticks = 2;
+			display_sched_ravg_window_nr_ticks = 2;
 		else if (fps == FPS90)
-			new_nr_ticks = 3;
+			display_sched_ravg_window_nr_ticks = 3;
 		else
-			new_nr_ticks = 5;
-		sched_window_nr_ticks_change(new_nr_ticks);
+			display_sched_ravg_window_nr_ticks = 5;
+
+		sched_window_nr_ticks_change();
 	}
 }
 EXPORT_SYMBOL(sched_set_refresh_rate);
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 5a150f5a..96762372 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -194,7 +194,7 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
 
 static inline bool is_new_task(struct task_struct *p)
 {
-	return p->ravg.active_time <= NEW_TASK_ACTIVE_TIME;
+	return p->ravg.active_time < NEW_TASK_ACTIVE_TIME;
 }
 
 static inline void clear_top_tasks_table(u8 *table)
@@ -375,8 +375,7 @@ static inline void walt_rq_dump(int cpu)
 	SCHED_PRINT(rq->nt_curr_runnable_sum);
 	SCHED_PRINT(rq->nt_prev_runnable_sum);
 	SCHED_PRINT(rq->cum_window_demand_scaled);
-	SCHED_PRINT(rq->cc.time);
-	SCHED_PRINT(rq->cc.cycles);
+	SCHED_PRINT(rq->task_exec_scale);
 	SCHED_PRINT(rq->grp_time.curr_runnable_sum);
 	SCHED_PRINT(rq->grp_time.prev_runnable_sum);
 	SCHED_PRINT(rq->grp_time.nt_curr_runnable_sum);
diff --git a/kernel/signal.c b/kernel/signal.c
index 4a321c6..c5f2cf1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1372,7 +1372,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
 		check_panic_on_foreground_kill(p);
 		ret = do_send_sig_info(sig, info, p, type);
 		if (capable(CAP_KILL) && sig == SIGKILL) {
-			add_to_oom_reaper(p);
+			if (!strcmp(current->comm, ULMK_MAGIC))
+				add_to_oom_reaper(p);
 			ulmk_update_last_kill();
 		}
 	}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e4d61ff..04b713e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -141,6 +141,7 @@ static int ten_thousand = 10000;
 static int six_hundred_forty_kb = 640 * 1024;
 #endif
 static int two_hundred_fifty_five = 255;
+static int __maybe_unused two_hundred_million = 200000000;
 
 #ifdef CONFIG_SCHED_WALT
 const int sched_user_hint_max = 1000;
@@ -467,13 +468,13 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= proc_douintvec_minmax,
 	},
 	{
-		.procname	= "sched_task_unfilter_nr_windows",
-		.data		= &sysctl_sched_task_unfilter_nr_windows,
+		.procname	= "sched_task_unfilter_period",
+		.data		= &sysctl_sched_task_unfilter_period,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler   = proc_dointvec_minmax,
 		.extra1         = &one,
-		.extra2		= &two_hundred_fifty_five,
+		.extra2		= &two_hundred_million,
 	},
 	{
 		.procname	= "sched_busy_hysteresis_enable_cpus",
@@ -528,6 +529,15 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= sched_ravg_window_handler,
 	},
 	{
+		.procname	= "sched_dynamic_ravg_window_enable",
+		.data		= &sysctl_sched_dynamic_ravg_window_enable,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
 		.procname	= "sched_upmigrate",
 		.data		= &sysctl_sched_capacity_margin_up,
 		.maxlen		= sizeof(unsigned int) * MAX_MARGIN_LEVELS,
@@ -3562,7 +3572,7 @@ static int do_proc_douintvec_rwin(bool *negp, unsigned long *lvalp,
 				  int *valp, int write, void *data)
 {
 	if (write) {
-		if (*lvalp == 0 || *lvalp == 2 || *lvalp == 5)
+		if ((*lvalp >= 2 && *lvalp <= 5) || *lvalp == 8)
 			*valp = *lvalp;
 		else
 			return -EINVAL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3f4fd25..02439f1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4376,6 +4376,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 {
+	if ((mask == TRACE_ITER_RECORD_TGID) ||
+	    (mask == TRACE_ITER_RECORD_CMD))
+		lockdep_assert_held(&event_mutex);
+
 	/* do nothing if flag is already set */
 	if (!!(tr->trace_flags & mask) == !!enabled)
 		return 0;
@@ -4441,6 +4445,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
 		cmp += 2;
 	}
 
+	mutex_lock(&event_mutex);
 	mutex_lock(&trace_types_lock);
 
 	ret = match_string(trace_options, -1, cmp);
@@ -4451,6 +4456,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
 		ret = set_tracer_flag(tr, 1 << ret, !neg);
 
 	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
 
 	/*
 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
@@ -7467,9 +7473,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 	if (val != 0 && val != 1)
 		return -EINVAL;
 
+	mutex_lock(&event_mutex);
 	mutex_lock(&trace_types_lock);
 	ret = set_tracer_flag(tr, 1 << index, val);
 	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
 
 	if (ret < 0)
 		return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 234725d..0726ab8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -327,7 +327,8 @@ void trace_event_enable_cmd_record(bool enable)
 	struct trace_event_file *file;
 	struct trace_array *tr;
 
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
 	do_for_each_event_file(tr, file) {
 
 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
@@ -341,7 +342,6 @@ void trace_event_enable_cmd_record(bool enable)
 			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
 		}
 	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
 }
 
 void trace_event_enable_tgid_record(bool enable)
@@ -349,7 +349,8 @@ void trace_event_enable_tgid_record(bool enable)
 	struct trace_event_file *file;
 	struct trace_array *tr;
 
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
 	do_for_each_event_file(tr, file) {
 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
 			continue;
@@ -363,7 +364,6 @@ void trace_event_enable_tgid_record(bool enable)
 				  &file->flags);
 		}
 	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
 }
 
 static int __ftrace_event_enable_disable(struct trace_event_file *file,
diff --git a/mm/memory.c b/mm/memory.c
index 4e60e15..8fec1a4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1319,6 +1319,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 		if (pte_none(ptent))
 			continue;
 
+		if (need_resched())
+			break;
+
 		if (pte_present(ptent)) {
 			struct page *page;
 
@@ -1417,8 +1420,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	if (force_flush) {
 		force_flush = 0;
 		tlb_flush_mmu_free(tlb);
-		if (addr != end)
-			goto again;
+	}
+
+	if (addr != end) {
+		cond_resched();
+		goto again;
 	}
 
 	return addr;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 117a8e0..e6c8969 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -90,11 +90,12 @@ module_param(ulmk_dbg_policy, uint, 0644);
 
 static atomic64_t ulmk_wdog_expired = ATOMIC64_INIT(0);
 static atomic64_t ulmk_kill_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
+static atomic64_t ulmk_watchdog_pet_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
 static unsigned long psi_emergency_jiffies = INITIAL_JIFFIES;
 /* Prevents contention on the mutex_trylock in psi_emergency_jiffies */
 static DEFINE_MUTEX(ulmk_retry_lock);
 
-static bool ulmk_kill_possible(void)
+static bool __maybe_unused ulmk_kill_possible(void)
 {
 	struct task_struct *tsk;
 	bool ret = false;
@@ -129,7 +130,7 @@ static bool ulmk_kill_possible(void)
  */
 bool should_ulmk_retry(gfp_t gfp_mask)
 {
-	unsigned long now, last_kill;
+	unsigned long now, last_kill, last_wdog_pet;
 	bool ret = true;
 	bool wdog_expired, trigger_active;
 
@@ -155,13 +156,50 @@ bool should_ulmk_retry(gfp_t gfp_mask)
 
 	now = jiffies;
 	last_kill = atomic64_read(&ulmk_kill_jiffies);
+	last_wdog_pet = atomic64_read(&ulmk_watchdog_pet_jiffies);
 	wdog_expired = atomic64_read(&ulmk_wdog_expired);
 	trigger_active = psi_is_trigger_active();
 
+	/*
+	 * Returning True causes direct reclaim retry and false
+	 * causes to take OOM path.
+	 * Conditions check is as below:
+	 * a) If there is a kill after the previous update of
+	 *    psi_emergency_jiffies, then system kills are happening
+	 *    properly. Thus update the psi_emergency_jiffies with the
+	 *    current time and return true.
+	 *
+	 * b) If no kill have had happened in the last ULMK_TIMEOUT and
+	 *    LMKD also stuck for the last ULMK_TIMEOUT, which then means
+	 *    that system kill logic is not responding despite PSI events
+	 *    sent from kernel. Return false.
+	 *
+	 * c) Cond1: trigger = !active && wdog_expired = false:
+	 *    Then give a chance to the ULMK by raising emergnecy trigger
+	 *    which also registers a watchdog timer with timeout of
+	 *    2 * trigger's ->win_size. And thus further process entering
+	 *    gets returned with true.
+	 *
+	 *    Cond2: trigger = active && wdog_expired = true:
+	 *    This represents that the previously raised event is not
+	 *    consumed by ULMK in 2*HZ timeout. Under this condition we rely
+	 *    on OOM killer to select the positive adj task and kill. If
+	 *    the OOM killer fails to find a +ve adj task, we return false.
+	 *
+	 *    Cond3: trigger = !active && wdog_expired = true:
+	 *    This is a case of previous events to previous are yet to be
+	 *    consumed by ULMK, if triggered, thus only this process is
+	 *    asked to raise the trigger and the subsequent ones in the
+	 *    triggers ->win.size fall back to OOM.
+	 *
+	 *    Cond4: trigger = !active && wdog_expired = false:
+	 *    ULMK is perfectly working fine.
+	 */
 	if (time_after(last_kill, psi_emergency_jiffies)) {
 		psi_emergency_jiffies = now;
 		ret = true;
-	} else if (time_after(now, psi_emergency_jiffies + ULMK_TIMEOUT)) {
+	} else if (time_after(now, psi_emergency_jiffies + ULMK_TIMEOUT) &&
+		   time_after(now, last_wdog_pet + ULMK_TIMEOUT)) {
 		ret = false;
 	} else if (!trigger_active) {
 		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_TRIGGER);
@@ -172,9 +210,6 @@ bool should_ulmk_retry(gfp_t gfp_mask)
 		ret = out_of_memory(&oc);
 		mutex_unlock(&oom_lock);
 		BUG_ON(!ret && ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
-	} else if (!ulmk_kill_possible()) {
-		BUG_ON(ulmk_dbg_policy & ULMK_DBG_POLICY_POSITIVE_ADJ);
-		ret = false;
 	}
 
 	mutex_unlock(&ulmk_retry_lock);
@@ -191,6 +226,7 @@ void ulmk_watchdog_pet(struct timer_list *t)
 {
 	del_timer_sync(t);
 	atomic64_set(&ulmk_wdog_expired, 0);
+	atomic64_set(&ulmk_watchdog_pet_jiffies, jiffies);
 }
 
 void ulmk_update_last_kill(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f52fb42..9a50141 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3628,7 +3628,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		 *  3) ALLOC_HARDER - Allow (__GFP_ATOMIC && !__GFP_NOMEMALLOC),
 		 *			of the others.
 		 */
-		if (unlikely(!order && (alloc_flags & ALLOC_WMARK_MIN) &&
+		if (unlikely(!order && !(alloc_flags & ALLOC_WMARK_MASK) &&
 		     (alloc_flags & (ALLOC_HARDER | ALLOC_HIGH)))) {
 			mark = zone->_watermark[WMARK_MIN];
 		}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7bc3a22..ddb6f19 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -404,6 +404,57 @@ unsigned long vmalloc_nr_pages(void)
 	return atomic_long_read(&nr_vmalloc_pages);
 }
 
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+#define POSSIBLE_VMALLOC_START	PAGE_OFFSET
+
+#define VMALLOC_BITMAP_SIZE	((VMALLOC_END - PAGE_OFFSET) >> \
+					PAGE_SHIFT)
+#define VMALLOC_TO_BIT(addr)	((addr - PAGE_OFFSET) >> PAGE_SHIFT)
+#define BIT_TO_VMALLOC(i)	(PAGE_OFFSET + i * PAGE_SIZE)
+
+unsigned long total_vmalloc_size;
+unsigned long vmalloc_reserved;
+
+DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
+
+void mark_vmalloc_reserved_area(void *x, unsigned long size)
+{
+	unsigned long addr = (unsigned long)x;
+
+	bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
+	vmalloc_reserved += size;
+}
+
+int is_vmalloc_addr(const void *x)
+{
+	unsigned long addr = (unsigned long)x;
+
+	if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
+		return 0;
+
+	if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
+		return 0;
+
+	return 1;
+}
+
+static void calc_total_vmalloc_size(void)
+{
+	total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
+		vmalloc_reserved;
+}
+#else
+int is_vmalloc_addr(const void *x)
+{
+	unsigned long addr = (unsigned long)x;
+
+	return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
+static void calc_total_vmalloc_size(void) { }
+#endif
+EXPORT_SYMBOL(is_vmalloc_addr);
+
 static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
 	struct rb_node *n = vmap_area_root.rb_node;
@@ -1744,6 +1795,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 EXPORT_SYMBOL(vm_map_ram);
 
 static struct vm_struct *vmlist __initdata;
+
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+	struct vm_struct *tmp, **p;
+
+	BUG_ON(vmap_initialized);
+	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+		if (tmp->addr >= vm->addr) {
+			if (tmp->addr < vm->addr + vm->size)
+				return 1;
+		} else {
+			if (tmp->addr + tmp->size > vm->addr)
+				return 1;
+		}
+	}
+	return 0;
+}
+
 /**
  * vm_area_add_early - add vmap area early during boot
  * @vm: vm_struct to add
@@ -1876,6 +1954,7 @@ void __init vmalloc_init(void)
 	 * Now we can initialize a free vmap space.
 	 */
 	vmap_init_free_space();
+	calc_total_vmalloc_size();
 	vmap_initialized = true;
 }
 
@@ -2039,16 +2118,27 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  */
 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 {
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+	return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+				  NUMA_NO_NODE, GFP_KERNEL,
+				  __builtin_return_address(0));
+#else
 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL,
 				  __builtin_return_address(0));
+#endif
 }
 
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
 				const void *caller)
 {
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+	return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+				  NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
 }
 
 /**
@@ -3369,6 +3459,9 @@ static int s_show(struct seq_file *m, void *p)
 	if (is_vmalloc_addr(v->pages))
 		seq_puts(m, " vpages");
 
+	if (v->flags & VM_LOWMEM)
+		seq_puts(m, " lowmem");
+
 	show_numa_info(m, v);
 	seq_putc(m, '\n');
 	return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0ec269f..91071a4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3206,7 +3206,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 	 * committing a transaction where throttling it could forcing other
 	 * processes to block on log_wait_commit().
 	 */
-	if (current->flags & PF_KTHREAD)
+	if ((current->flags & PF_KTHREAD) || !strcmp(current->comm, ULMK_MAGIC))
 		goto out;
 
 	/*
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5a81dfb4..70c607b 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -135,6 +135,15 @@ static DECLARE_RWSEM(qrtr_node_lock);
 static DEFINE_IDR(qrtr_ports);
 static DEFINE_MUTEX(qrtr_port_lock);
 
+/* backup buffers */
+#define QRTR_BACKUP_HI_NUM	5
+#define QRTR_BACKUP_HI_SIZE	SZ_16K
+#define QRTR_BACKUP_LO_NUM	20
+#define QRTR_BACKUP_LO_SIZE	SZ_1K
+static struct sk_buff_head qrtr_backup_lo;
+static struct sk_buff_head qrtr_backup_hi;
+static struct work_struct qrtr_backup_work;
+
 /**
  * struct qrtr_node - endpoint node
  * @ep_lock: lock for endpoint management and callbacks
@@ -346,8 +355,8 @@ static void __qrtr_node_release(struct kref *kref)
 			sock_put(waiter->sk);
 			kfree(waiter);
 		}
-		kfree(flow);
 		radix_tree_delete(&node->qrtr_tx_flow, iter.index);
+		kfree(flow);
 	}
 	mutex_unlock(&node->qrtr_tx_lock);
 
@@ -684,6 +693,54 @@ int qrtr_peek_pkt_size(const void *data)
 }
 EXPORT_SYMBOL(qrtr_peek_pkt_size);
 
+static void qrtr_alloc_backup(struct work_struct *work)
+{
+	struct sk_buff *skb;
+
+	while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) {
+		skb = alloc_skb(QRTR_BACKUP_LO_SIZE, GFP_KERNEL);
+		if (!skb)
+			break;
+		skb_queue_tail(&qrtr_backup_lo, skb);
+	}
+	while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) {
+		skb = alloc_skb(QRTR_BACKUP_HI_SIZE, GFP_KERNEL);
+		if (!skb)
+			break;
+		skb_queue_tail(&qrtr_backup_hi, skb);
+	}
+}
+
+static struct sk_buff *qrtr_get_backup(size_t len)
+{
+	struct sk_buff *skb = NULL;
+
+	if (len < QRTR_BACKUP_LO_SIZE)
+		skb = skb_dequeue(&qrtr_backup_lo);
+	else if (len < QRTR_BACKUP_HI_SIZE)
+		skb = skb_dequeue(&qrtr_backup_hi);
+
+	if (skb)
+		queue_work(system_unbound_wq, &qrtr_backup_work);
+
+	return skb;
+}
+
+static void qrtr_backup_init(void)
+{
+	skb_queue_head_init(&qrtr_backup_lo);
+	skb_queue_head_init(&qrtr_backup_hi);
+	INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup);
+	queue_work(system_unbound_wq, &qrtr_backup_work);
+}
+
+static void qrtr_backup_deinit(void)
+{
+	cancel_work_sync(&qrtr_backup_work);
+	skb_queue_purge(&qrtr_backup_lo);
+	skb_queue_purge(&qrtr_backup_hi);
+}
+
 /**
  * qrtr_endpoint_post() - post incoming data
  * @ep: endpoint handle
@@ -708,8 +765,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 		return -EINVAL;
 
 	skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
-	if (!skb)
-		return -ENOMEM;
+	if (!skb) {
+		skb = qrtr_get_backup(len);
+		if (!skb) {
+			pr_err("qrtr: Unable to get skb with len:%lu\n", len);
+			return -ENOMEM;
+		}
+	}
 
 	skb_reserve(skb, sizeof(*v1));
 	cb = (struct qrtr_cb *)skb->cb;
@@ -1864,6 +1926,8 @@ static int __init qrtr_proto_init(void)
 		proto_unregister(&qrtr_proto);
 	}
 
+	qrtr_backup_init();
+
 	return rc;
 }
 postcore_initcall(qrtr_proto_init);
@@ -1873,6 +1937,8 @@ static void __exit qrtr_proto_fini(void)
 	rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
 	sock_unregister(qrtr_family.family);
 	proto_unregister(&qrtr_proto);
+
+	qrtr_backup_deinit();
 }
 module_exit(qrtr_proto_fini);
 
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 15dd58a..30db3d9 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -61,7 +61,7 @@
 modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
 
 # Step 1), find all modules listed in $(MODVERDIR)/
-MODLISTCMD := find $(MODVERDIR) -name '*.mod' | xargs -r grep -h '\.ko$$' | sort -u
+MODLISTCMD := find $(MODVERDIR) -name '*.mod'  -exec grep -h '\.ko$$' {} \; | sort -u
 __modules := $(shell $(MODLISTCMD))
 modules   := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
 
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 44816ee..3d1cff2 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -691,40 +691,38 @@ static struct avc_node *avc_insert(struct selinux_avc *avc,
 	struct avc_node *pos, *node = NULL;
 	int hvalue;
 	unsigned long flag;
+	spinlock_t *lock;
+	struct hlist_head *head;
 
 	if (avc_latest_notif_update(avc, avd->seqno, 1))
-		goto out;
+		return NULL;
 
 	node = avc_alloc_node(avc);
-	if (node) {
-		struct hlist_head *head;
-		spinlock_t *lock;
-		int rc = 0;
+	if (!node)
+		return NULL;
 
-		hvalue = avc_hash(ssid, tsid, tclass);
-		avc_node_populate(node, ssid, tsid, tclass, avd);
-		rc = avc_xperms_populate(node, xp_node);
-		if (rc) {
-			kmem_cache_free(avc_node_cachep, node);
-			return NULL;
-		}
-		head = &avc->avc_cache.slots[hvalue];
-		lock = &avc->avc_cache.slots_lock[hvalue];
-
-		spin_lock_irqsave(lock, flag);
-		hlist_for_each_entry(pos, head, list) {
-			if (pos->ae.ssid == ssid &&
-			    pos->ae.tsid == tsid &&
-			    pos->ae.tclass == tclass) {
-				avc_node_replace(avc, node, pos);
-				goto found;
-			}
-		}
-		hlist_add_head_rcu(&node->list, head);
-found:
-		spin_unlock_irqrestore(lock, flag);
+	avc_node_populate(node, ssid, tsid, tclass, avd);
+	if (avc_xperms_populate(node, xp_node)) {
+		avc_node_kill(avc, node);
+		return NULL;
 	}
-out:
+
+	hvalue = avc_hash(ssid, tsid, tclass);
+	head = &avc->avc_cache.slots[hvalue];
+	lock = &avc->avc_cache.slots_lock[hvalue];
+	spin_lock_irqsave(lock, flag);
+	hlist_for_each_entry(pos, head, list) {
+		if (pos->ae.ssid == ssid &&
+		    pos->ae.tsid == tsid &&
+		    pos->ae.tclass == tclass) {
+			avc_node_replace(avc, node, pos);
+			goto found;
+		}
+	}
+
+	hlist_add_head_rcu(&node->list, head);
+found:
+	spin_unlock_irqrestore(lock, flag);
 	return node;
 }
 
@@ -915,7 +913,7 @@ static int avc_update_node(struct selinux_avc *avc,
 	if (orig->ae.xp_node) {
 		rc = avc_xperms_populate(node, orig->ae.xp_node);
 		if (rc) {
-			kmem_cache_free(avc_node_cachep, node);
+			avc_node_kill(avc, node);
 			goto out_unlock;
 		}
 	}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 24389b5..a0112b9 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -817,7 +817,8 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
 	if (substream->ops->hw_free)
 		result = substream->ops->hw_free(substream);
 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
-	pm_qos_remove_request(&substream->latency_pm_qos_req);
+	if (pm_qos_request_active(&substream->latency_pm_qos_req))
+		pm_qos_remove_request(&substream->latency_pm_qos_req);
 	return result;
 }
 
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 6270771..5d4758c 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -887,10 +887,8 @@ static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
 				QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN,
 				qmi_uaudio_stream_ind_msg_v01_ei,
 				&disconnect_ind);
-		if (ret < 0) {
-			uaudio_err("qmi send failed wiht err: %d\n", ret);
-			return;
-		}
+		if (ret < 0)
+			uaudio_err("qmi send failed with err: %d\n", ret);
 
 		ret = wait_event_interruptible(dev->disconnect_wq,
 				!atomic_read(&dev->in_use));