Merge "mhi: core: Trigger host resume if client requests device vote"
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 2345f8a..ba12088 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -720,7 +720,8 @@
 perf_event_paranoid:
 
 Controls use of the performance events system by unprivileged
-users (without CAP_SYS_ADMIN).  The default value is 2.
+users (without CAP_SYS_ADMIN).  The default value is 3 if
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise.
 
  -1: Allow use of (almost) all events by all users
      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
@@ -728,6 +729,7 @@
      Disallow raw tracepoint access by users without CAP_SYS_ADMIN
 >=1: Disallow CPU event access by users without CAP_SYS_ADMIN
 >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
+>=3: Disallow all event access by users without CAP_SYS_ADMIN
 
 ==============================================================
 
diff --git a/arch/arm/configs/vendor/bengal-perf_defconfig b/arch/arm/configs/vendor/bengal-perf_defconfig
index 47882c3..76befaf 100644
--- a/arch/arm/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm/configs/vendor/bengal-perf_defconfig
@@ -577,6 +577,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm/configs/vendor/bengal_defconfig b/arch/arm/configs/vendor/bengal_defconfig
index 4eb59dd..803b096 100644
--- a/arch/arm/configs/vendor/bengal_defconfig
+++ b/arch/arm/configs/vendor/bengal_defconfig
@@ -627,6 +627,7 @@
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
index 0de07fe..b3532e3 100644
--- a/arch/arm64/configs/gki_defconfig
+++ b/arch/arm64/configs/gki_defconfig
@@ -451,6 +451,7 @@
 CONFIG_NLS_MAC_TURKISH=y
 CONFIG_NLS_UTF8=y
 CONFIG_UNICODE=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITYFS=y
 CONFIG_SECURITY_NETWORK=y
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index dbeee56..e8934af 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -80,7 +80,6 @@
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_ARM_QCOM_CPUFREQ_HW=y
-CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -392,6 +391,7 @@
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QPNP_LCDB=y
@@ -616,6 +616,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index c7449a1..26a53b3 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -404,6 +404,7 @@
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QPNP_LCDB=y
@@ -642,6 +643,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/debugfs.config b/arch/arm64/configs/vendor/debugfs.config
new file mode 100644
index 0000000..059013b
--- /dev/null
+++ b/arch/arm64/configs/vendor/debugfs.config
@@ -0,0 +1,2 @@
+CONFIG_PAGE_OWNER=n
+CONFIG_DEBUG_FS=n
diff --git a/arch/arm64/configs/vendor/kona-iot-perf_defconfig b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
index 086d497..cd7797f 100644
--- a/arch/arm64/configs/vendor/kona-iot-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
@@ -663,6 +663,7 @@
 CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
diff --git a/arch/arm64/configs/vendor/kona-iot_defconfig b/arch/arm64/configs/vendor/kona-iot_defconfig
index 5158d35..9a0731d 100644
--- a/arch/arm64/configs/vendor/kona-iot_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot_defconfig
@@ -697,6 +697,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 449e50f..38391e2 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -81,7 +81,6 @@
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_ARM_QCOM_CPUFREQ_HW=y
-CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -596,6 +595,7 @@
 CONFIG_QCOM_MINIDUMP=y
 CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_INITIAL_LOGBUF=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
@@ -673,6 +673,7 @@
 CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index fdd3b59..5dae27e 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -625,6 +625,7 @@
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_INITIAL_LOGBUF=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_WDOG_IPI_ENABLE=y
 CONFIG_QCOM_BUS_SCALING=y
@@ -708,6 +709,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 851dbb5..c194cd1 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -79,7 +79,6 @@
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_ARM_QCOM_CPUFREQ_HW_DEBUG=y
-CONFIG_MSM_TZ_LOG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -589,6 +588,7 @@
 CONFIG_QCOM_MINIDUMP=y
 CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_INITIAL_LOGBUF=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
@@ -658,6 +658,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index c3eb6a6..51eae7d 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -608,6 +608,7 @@
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_FSA4480_I2C=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_INITIAL_LOGBUF=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
@@ -679,6 +680,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig
index 6cb4ea0..67089f8 100644
--- a/arch/arm64/configs/vendor/sdm660-perf_defconfig
+++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig
@@ -609,6 +609,7 @@
 CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig
index 79c5f50..6e561f2 100644
--- a/arch/arm64/configs/vendor/sdm660_defconfig
+++ b/arch/arm64/configs/vendor/sdm660_defconfig
@@ -614,6 +614,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
index 9e056bd..0f5ab5e 100644
--- a/arch/x86/configs/gki_defconfig
+++ b/arch/x86/configs/gki_defconfig
@@ -393,6 +393,7 @@
 CONFIG_NLS_MAC_TURKISH=y
 CONFIG_NLS_UTF8=y
 CONFIG_UNICODE=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITYFS=y
 CONFIG_SECURITY_NETWORK=y
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index e720028..14c780e 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -428,17 +428,22 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
 }
 
 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
-			 struct image_info *image_info)
+			 struct image_info **image_info)
 {
 	int i;
-	struct mhi_buf *mhi_buf = image_info->mhi_buf;
+	struct mhi_buf *mhi_buf = (*image_info)->mhi_buf;
 
-	for (i = 0; i < image_info->entries; i++, mhi_buf++)
+	if (mhi_cntrl->img_pre_alloc)
+		return;
+
+	for (i = 0; i < (*image_info)->entries; i++, mhi_buf++)
 		mhi_free_contig_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
 				  mhi_buf->dma_addr);
 
-	kfree(image_info->mhi_buf);
-	kfree(image_info);
+	kfree((*image_info)->mhi_buf);
+	kfree(*image_info);
+
+	*image_info = NULL;
 }
 
 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
@@ -452,6 +457,9 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
 	struct image_info *img_info;
 	struct mhi_buf *mhi_buf;
 
+	if (mhi_cntrl->img_pre_alloc)
+		return 0;
+
 	MHI_CNTRL_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
 			alloc_size, seg_size, segments);
 
@@ -670,9 +678,15 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 	return;
 
 error_read:
-	mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
-	mhi_cntrl->fbc_image = NULL;
+	mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
 
 error_alloc_fw_table:
 	release_firmware(firmware);
 }
+
+void mhi_perform_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs,
+			     MHI_SOC_RESET_REQ_OFFSET,
+			     MHI_SOC_RESET_REQ);
+}
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 32d6285..6bc6fd4 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -346,6 +346,9 @@ void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
 		}
 		spin_unlock(&mhi_tsync->lock);
 
+		if (mhi_tsync->db_response_pending)
+			complete(&mhi_tsync->db_completion);
+
 		kfree(mhi_cntrl->mhi_tsync);
 		mhi_cntrl->mhi_tsync = NULL;
 		mutex_unlock(&mhi_cntrl->tsync_mutex);
@@ -520,6 +523,12 @@ static int mhi_init_debugfs_mhi_vote_open(struct inode *inode, struct file *fp)
 	return single_open(fp, mhi_debugfs_mhi_vote_show, inode->i_private);
 }
 
+static int mhi_init_debugfs_mhi_regdump_open(struct inode *inode,
+					     struct file *fp)
+{
+	return single_open(fp, mhi_debugfs_mhi_regdump_show, inode->i_private);
+}
+
 static const struct file_operations debugfs_state_ops = {
 	.open = mhi_init_debugfs_mhi_states_open,
 	.release = single_release,
@@ -544,9 +553,18 @@ static const struct file_operations debugfs_vote_ops = {
 	.read = seq_read,
 };
 
+static const struct file_operations debugfs_regdump_ops = {
+	.open = mhi_init_debugfs_mhi_regdump_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
 DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_reset_fops, NULL,
 			 mhi_debugfs_trigger_reset, "%llu\n");
 
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_soc_reset_fops, NULL,
+			 mhi_debugfs_trigger_soc_reset, "%llu\n");
+
 void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
 {
 	struct dentry *dentry;
@@ -573,6 +591,11 @@ void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
 				   &debugfs_vote_ops);
 	debugfs_create_file_unsafe("reset", 0444, dentry, mhi_cntrl,
 				   &debugfs_trigger_reset_fops);
+	debugfs_create_file_unsafe("regdump", 0444, dentry, mhi_cntrl,
+				   &debugfs_regdump_ops);
+	debugfs_create_file_unsafe("soc_reset", 0444, dentry, mhi_cntrl,
+				   &debugfs_trigger_soc_reset_fops);
+
 	mhi_cntrl->dentry = dentry;
 }
 
@@ -1770,10 +1793,8 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
 	return 0;
 
 bhie_error:
-	if (mhi_cntrl->rddm_image) {
-		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
-		mhi_cntrl->rddm_image = NULL;
-	}
+	if (mhi_cntrl->rddm_image)
+		mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
 
 error_dev_ctxt:
 	mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -1784,15 +1805,11 @@ EXPORT_SYMBOL(mhi_prepare_for_power_up);
 
 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
 {
-	if (mhi_cntrl->fbc_image) {
-		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
-		mhi_cntrl->fbc_image = NULL;
-	}
+	if (mhi_cntrl->fbc_image)
+		mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
 
-	if (mhi_cntrl->rddm_image) {
-		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
-		mhi_cntrl->rddm_image = NULL;
-	}
+	if (mhi_cntrl->rddm_image)
+		mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
 
 	mhi_deinit_dev_ctxt(mhi_cntrl);
 	mhi_cntrl->pre_init = false;
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index c2b99e8..99e7b23 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -727,8 +727,10 @@ struct mhi_timesync {
 	void __iomem *time_reg;
 	u32 int_sequence;
 	u64 local_time;
+	u64 remote_time;
 	bool db_support;
 	bool db_response_pending;
+	struct completion db_completion;
 	spinlock_t lock; /* list protection */
 	struct list_head head;
 };
@@ -749,16 +751,20 @@ struct mhi_bus {
 
 /* default MHI timeout */
 #define MHI_TIMEOUT_MS (1000)
+#define MHI_FORCE_WAKE_DELAY_US (100)
+
 extern struct mhi_bus mhi_bus;
 
 struct mhi_controller *find_mhi_controller_by_name(const char *name);
 
 /* debug fs related functions */
+int mhi_debugfs_mhi_regdump_show(struct seq_file *m, void *d);
 int mhi_debugfs_mhi_vote_show(struct seq_file *m, void *d);
 int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
 int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
 int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d);
 int mhi_debugfs_trigger_reset(void *data, u64 val);
+int mhi_debugfs_trigger_soc_reset(void *data, u64 val);
 
 void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl);
 void mhi_init_debugfs(struct mhi_controller *mhi_cntrl);
@@ -936,7 +942,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl);
 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
 			 struct image_info **image_info, size_t alloc_size);
 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
-			 struct image_info *image_info);
+			 struct image_info **image_info);
 
 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
 			 struct mhi_buf_info *buf_info);
@@ -965,6 +971,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 			struct mhi_chan *mhi_chan);
 void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl);
 void mhi_force_reg_write(struct mhi_controller *mhi_cntrl);
+void mhi_perform_soc_reset(struct mhi_controller *mhi_cntrl);
 
 /* isr handlers */
 irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index f09c333..498239f 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1402,6 +1402,10 @@ int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
 	if (unlikely(mhi_tsync->int_sequence != sequence)) {
 		MHI_ASSERT(1, "Unexpected response:0x%llx Expected:0x%llx\n",
 			   sequence, mhi_tsync->int_sequence);
+
+		mhi_device_put(mhi_cntrl->mhi_dev,
+			       MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+
 		mutex_unlock(&mhi_cntrl->tsync_mutex);
 		goto exit_tsync_process;
 	}
@@ -1427,6 +1431,11 @@ int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
 	} while (true);
 
 	mhi_tsync->db_response_pending = false;
+	mhi_tsync->remote_time = remote_time;
+	complete(&mhi_tsync->db_completion);
+
+	mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+
 	mutex_unlock(&mhi_cntrl->tsync_mutex);
 
 exit_tsync_process:
@@ -1484,13 +1493,10 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 	spin_unlock_bh(&mhi_event->lock);
 
-	atomic_inc(&mhi_cntrl->pending_pkts);
 	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev,
 				  MHI_VOTE_DEVICE | MHI_VOTE_BUS);
-	if (ret) {
-		atomic_dec(&mhi_cntrl->pending_pkts);
+	if (ret)
 		goto exit_bw_scale_process;
-	}
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 
@@ -1508,7 +1514,6 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
 	mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
-	atomic_dec(&mhi_cntrl->pending_pkts);
 
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 
@@ -2108,6 +2113,64 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
 	mutex_unlock(&mhi_chan->mutex);
 }
 
+int mhi_debugfs_mhi_regdump_show(struct seq_file *m, void *d)
+{
+	struct mhi_controller *mhi_cntrl = m->private;
+	enum mhi_dev_state state;
+	enum mhi_ee ee;
+	int i, ret;
+	u32 val;
+	void __iomem *mhi_base = mhi_cntrl->regs;
+	void __iomem *bhi_base = mhi_cntrl->bhi;
+	void __iomem *bhie_base = mhi_cntrl->bhie;
+	void __iomem *wake_db = mhi_cntrl->wake_db;
+	struct {
+		const char *name;
+		int offset;
+		void __iomem *base;
+	} debug_reg[] = {
+		{ "MHI_CNTRL", MHICTRL, mhi_base},
+		{ "MHI_STATUS", MHISTATUS, mhi_base},
+		{ "MHI_WAKE_DB", 0, wake_db},
+		{ "BHI_EXECENV", BHI_EXECENV, bhi_base},
+		{ "BHI_STATUS", BHI_STATUS, bhi_base},
+		{ "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+		{ "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+		{ "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+		{ "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+		{ "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+		{ "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+		{ "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+		{ "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+		{ NULL },
+	};
+
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+		return -EIO;
+
+	seq_printf(m, "host pm_state:%s dev_state:%s ee:%s\n",
+		   to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		   TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		   TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	state = mhi_get_mhi_state(mhi_cntrl);
+	ee = mhi_get_exec_env(mhi_cntrl);
+
+	seq_printf(m, "device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee),
+		   TO_MHI_STATE_STR(state));
+
+	for (i = 0; debug_reg[i].name; i++) {
+		if (!debug_reg[i].base)
+			continue;
+		ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
+				   debug_reg[i].offset, &val);
+		seq_printf(m, "reg:%s val:0x%x, ret:%d\n", debug_reg[i].name,
+			   val, ret);
+	}
+
+	return 0;
+}
+
 int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
 {
 	struct mhi_controller *mhi_cntrl = m->private;
@@ -2502,13 +2565,37 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
 {
 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+	u64 local_time;
 	int ret;
 
-	mutex_lock(&mhi_cntrl->tsync_mutex);
 	/* not all devices support time features */
-	if (!mhi_tsync) {
-		ret = -EIO;
-		goto error_unlock;
+	if (!mhi_tsync)
+		return -EINVAL;
+
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_ERR("MHI is not in active state, pm_state:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	mutex_lock(&mhi_cntrl->tsync_mutex);
+
+	/* return times from last async request completion */
+	if (mhi_tsync->db_response_pending) {
+		local_time = mhi_tsync->local_time;
+		mutex_unlock(&mhi_cntrl->tsync_mutex);
+
+		ret = wait_for_completion_timeout(&mhi_tsync->db_completion,
+				       msecs_to_jiffies(mhi_cntrl->timeout_ms));
+		if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || !ret) {
+			MHI_ERR("Pending DB request did not complete, abort\n");
+			return -EAGAIN;
+		}
+
+		*t_host = local_time;
+		*t_dev = mhi_tsync->remote_time;
+
+		return 0;
 	}
 
 	/* bring to M0 state */
@@ -2581,14 +2668,13 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
 	int ret = 0;
 
 	/* not all devices support all time features */
-	mutex_lock(&mhi_cntrl->tsync_mutex);
-	if (!mhi_tsync || !mhi_tsync->db_support) {
-		ret = -EIO;
-		goto error_unlock;
-	}
+	if (!mhi_tsync || !mhi_tsync->db_support)
+		return -EINVAL;
 
-	/* tsync db can only be rung in M0 state */
-	ret = __mhi_device_get_sync(mhi_cntrl);
+	mutex_lock(&mhi_cntrl->tsync_mutex);
+
+	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev,
+				  MHI_VOTE_DEVICE | MHI_VOTE_BUS);
 	if (ret)
 		goto error_unlock;
 
@@ -2656,21 +2742,21 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
 	MHI_VERB("time DB request with seq:0x%llx\n", mhi_tsync->int_sequence);
 
 	mhi_tsync->db_response_pending = true;
+	init_completion(&mhi_tsync->db_completion);
 
 skip_tsync_db:
 	spin_lock(&mhi_tsync->lock);
 	list_add_tail(&tsync_node->node, &mhi_tsync->head);
 	spin_unlock(&mhi_tsync->lock);
 
-	ret = 0;
+	mutex_unlock(&mhi_cntrl->tsync_mutex);
+
+	return 0;
 
 error_invalid_state:
-	if (ret)
-		kfree(tsync_node);
+	kfree(tsync_node);
 error_no_mem:
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	mhi_cntrl->wake_put(mhi_cntrl, false);
-	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
 error_unlock:
 	mutex_unlock(&mhi_cntrl->tsync_mutex);
 	return ret;
@@ -2690,7 +2776,7 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
 	struct {
 		const char *name;
 		int offset;
-		void *base;
+		void __iomem *base;
 	} debug_reg[] = {
 		{ "MHI_CNTRL", MHICTRL, mhi_base},
 		{ "MHI_STATUS", MHISTATUS, mhi_base},
@@ -2720,6 +2806,8 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
 		TO_MHI_STATE_STR(state));
 
 	for (i = 0; debug_reg[i].name; i++) {
+		if (!debug_reg[i].base)
+			continue;
 		ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
 				   debug_reg[i].offset, &val);
 		MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val,
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 6316379..e9a7a5a 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -405,35 +405,42 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
 	enum MHI_PM_STATE state;
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
+	/* Just check if we are racing with device_wake assertion */
+	if (atomic_read(&mhi_cntrl->dev_wake))
+		MHI_VERB("M2 transition request post dev_wake:%d\n",
+			 atomic_read(&mhi_cntrl->dev_wake));
+
 	/* if it fails, means we transition to M3 */
 	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
-	if (state == MHI_PM_M2) {
-		MHI_VERB("Entered M2 State\n");
-		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
-		mhi_cntrl->dev_state = MHI_STATE_M2;
-		mhi_cntrl->M2++;
-
+	if (state != MHI_PM_M2) {
+		/* Nothing to be done, handle M3 transition later */
 		write_unlock_irq(&mhi_cntrl->pm_lock);
-		wake_up_all(&mhi_cntrl->state_event);
-
-		/* transfer pending, exit M2 immediately */
-		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
-			     atomic_read(&mhi_cntrl->dev_wake))) {
-			MHI_VERB(
-				 "Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
-				 atomic_read(&mhi_cntrl->pending_pkts),
-				 atomic_read(&mhi_cntrl->dev_wake));
-			read_lock_bh(&mhi_cntrl->pm_lock);
-			mhi_cntrl->wake_get(mhi_cntrl, true);
-			mhi_cntrl->wake_put(mhi_cntrl, true);
-			read_unlock_bh(&mhi_cntrl->pm_lock);
-		} else {
-			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
-					     MHI_CB_IDLE);
-		}
-	} else {
-		write_unlock_irq(&mhi_cntrl->pm_lock);
+		return;
 	}
+
+	MHI_VERB("Entered M2 State\n");
+	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+	mhi_cntrl->dev_state = MHI_STATE_M2;
+	mhi_cntrl->M2++;
+
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	wake_up_all(&mhi_cntrl->state_event);
+
+	/* transfer pending, exit M2 immediately */
+	if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+		     atomic_read(&mhi_cntrl->dev_wake))) {
+		MHI_VERB(
+			 "Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
+			 atomic_read(&mhi_cntrl->pending_pkts),
+			 atomic_read(&mhi_cntrl->dev_wake));
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_get(mhi_cntrl, true);
+		mhi_cntrl->wake_put(mhi_cntrl, true);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		return;
+	}
+
+	mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_IDLE);
 }
 
 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
@@ -716,6 +723,17 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 }
 
+int mhi_debugfs_trigger_soc_reset(void *data, u64 val)
+{
+	struct mhi_controller *mhi_cntrl = data;
+
+	MHI_LOG("Trigger MHI SOC Reset\n");
+
+	mhi_perform_soc_reset(mhi_cntrl);
+
+	return 0;
+}
+
 int mhi_debugfs_trigger_reset(void *data, u64 val)
 {
 	struct mhi_controller *mhi_cntrl = data;
@@ -1099,10 +1117,9 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 
 	if (!mhi_cntrl->pre_init) {
 		/* free all allocated resources */
-		if (mhi_cntrl->fbc_image) {
-			mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
-			mhi_cntrl->fbc_image = NULL;
-		}
+		if (mhi_cntrl->fbc_image)
+			mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
+
 		mhi_deinit_dev_ctxt(mhi_cntrl);
 	}
 }
@@ -1241,6 +1258,7 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 	int ret;
 	enum MHI_PM_STATE new_state;
 	struct mhi_chan *itr, *tmp;
+	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (mhi_cntrl->pm_state == MHI_PM_DISABLE) {
@@ -1255,7 +1273,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
 	/* do a quick check to see if any pending votes to keep us busy */
-	if (atomic_read(&mhi_cntrl->pending_pkts)) {
+	if (atomic_read(&mhi_cntrl->pending_pkts) ||
+	    atomic_read(&mhi_dev->bus_vote)) {
 		MHI_VERB("Busy, aborting M3\n");
 		return -EBUSY;
 	}
@@ -1274,7 +1293,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
 	 * Check the votes once more to see if we should abort
 	 * suspend.
 	 */
-	if (atomic_read(&mhi_cntrl->pending_pkts)) {
+	if (atomic_read(&mhi_cntrl->pending_pkts) ||
+	    atomic_read(&mhi_dev->bus_vote)) {
 		MHI_VERB("Busy, aborting M3\n");
 		ret = -EBUSY;
 		goto error_suspend;
@@ -1619,6 +1639,57 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
 }
 EXPORT_SYMBOL(mhi_device_get_sync);
 
+int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		return -EIO;
+	}
+
+	mhi_cntrl->wake_get(mhi_cntrl, true);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	atomic_inc(&mhi_dev->dev_vote);
+	pm_wakeup_hard_event(&mhi_cntrl->mhi_dev->dev);
+	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+
+	/* Return if client doesn't want us to wait */
+	if (!timeout_us) {
+		if (mhi_cntrl->pm_state != MHI_PM_M0)
+			MHI_ERR("Return without waiting for M0\n");
+
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+		return 0;
+	}
+
+	while (mhi_cntrl->pm_state != MHI_PM_M0 &&
+			!MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) &&
+			timeout_us > 0) {
+		udelay(MHI_FORCE_WAKE_DELAY_US);
+		timeout_us -= MHI_FORCE_WAKE_DELAY_US;
+	}
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || timeout_us <= 0) {
+		MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_put(mhi_cntrl, false);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		atomic_dec(&mhi_dev->dev_vote);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+		return -ETIMEDOUT;
+	}
+
+	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_device_get_sync_atomic);
+
 void mhi_device_put(struct mhi_device *mhi_dev, int vote)
 {
 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 16a704b..fc7f09d 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -850,12 +850,23 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_file *fl;
-	int vmid;
+	int vmid, cid = -1, err = 0;
 	struct fastrpc_session_ctx *sess;
 
 	if (!map)
 		return;
 	fl = map->fl;
+	if (fl && !(map->flags == ADSP_MMAP_HEAP_ADDR ||
+				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
+		cid = fl->cid;
+		VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
+		if (err) {
+			err = -ECHRNG;
+			pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d\n",
+				__func__, cid, err);
+			return;
+		}
+	}
 	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
 				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
 		map->refs--;
@@ -2110,8 +2121,16 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
 {
 	struct smq_msg *msg = &ctx->msg;
 	struct fastrpc_file *fl = ctx->fl;
-	struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
-	int err = 0;
+	struct fastrpc_channel_ctx *channel_ctx = NULL;
+	int err = 0, cid = -1;
+
+	channel_ctx = &fl->apps->channel[fl->cid];
+	cid = fl->cid;
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
+	if (err) {
+		err = -ECHRNG;
+		goto bail;
+	}
 
 	mutex_lock(&channel_ctx->smd_mutex);
 	msg->pid = fl->tgid;
@@ -2308,10 +2327,23 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 {
 	struct smq_invoke_ctx *ctx = NULL;
 	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
-	int err = 0, interrupted = 0, cid = fl->cid;
+	int err = 0, interrupted = 0, cid = -1;
 	struct timespec64 invoket = {0};
 	int64_t *perf_counter = NULL;
 
+	cid = fl->cid;
+	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
+	if (err) {
+		err = -ECHRNG;
+		goto bail;
+	}
+	VERIFY(err, fl->sctx != NULL);
+	if (err) {
+		pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+			__func__, current->comm);
+		err = -EBADR;
+		goto bail;
+	}
 	if (fl->profile) {
 		perf_counter = getperfcounter(fl, PERF_COUNT);
 		ktime_get_real_ts64(&invoket);
@@ -2329,15 +2361,6 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 		}
 	}
 
-	VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS &&
-		fl->sctx != NULL);
-	if (err) {
-		pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
-			__func__, current->comm);
-		err = EBADR;
-		goto bail;
-	}
-
 	if (!kernel) {
 		err = context_restore_interrupted(fl, inv, &ctx);
 		if (err)
@@ -3919,7 +3942,7 @@ static const struct file_operations debugfs_fops = {
 static int fastrpc_channel_open(struct fastrpc_file *fl)
 {
 	struct fastrpc_apps *me = &gfa;
-	int cid, err = 0;
+	int cid = -1, err = 0;
 
 	VERIFY(err, fl && fl->sctx && fl->cid >= 0 && fl->cid < NUM_CHANNELS);
 	if (err) {
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 880cbeb..b829251 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -59,6 +59,8 @@
 #define DIAG_CTRL_MSG_LOG_MS_MASK	37
 #define DIAG_CTRL_MSG_EVENT_MS_MASK	38
 
+#define NON_HDLC_VERSION	1
+#define NON_HDLC_HEADER_SIZE	4
 #define CONTROL_CHAR	0x7E
 
 #define DIAG_ID_ROOT_STRING "root"
@@ -834,6 +836,7 @@ struct diagchar_dev {
 	unsigned char *buf_feature_mask_update;
 	uint8_t hdlc_disabled;
 	uint8_t p_hdlc_disabled[NUM_MD_SESSIONS];
+	uint8_t proc_hdlc_disabled[NUM_DIAG_MD_DEV];
 	struct mutex hdlc_disable_mutex;
 	struct mutex hdlc_recovery_mutex;
 	struct timer_list hdlc_reset_timer;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 2111a5a..40c818e 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1030,11 +1030,13 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
 	int max_len = 0;
 	uint8_t retry_count = 0;
 	uint8_t max_retries = 50;
-	uint16_t payload = 0;
+	uint16_t payload_len = 0;
 	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
 	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
 	int bridge_index = proc - 1;
-	uint8_t hdlc_disabled = 0;
+	unsigned char non_hdlc_header[NON_HDLC_HEADER_SIZE] = {
+	CONTROL_CHAR, NON_HDLC_VERSION, 0, 0 };
+	unsigned char end_byte[1] = { CONTROL_CHAR };
 
 	if (!buf)
 		return -EINVAL;
@@ -1059,39 +1061,6 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
 
 	if (driver->hdlc_encode_buf_len != 0)
 		return -EAGAIN;
-	mutex_lock(&driver->hdlc_disable_mutex);
-	hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
-	mutex_unlock(&driver->hdlc_disable_mutex);
-	if (hdlc_disabled) {
-		if (len < 4) {
-			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt\n",
-			__func__, len);
-			return -EBADMSG;
-		}
-		payload = *(uint16_t *)(buf + 2);
-		if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
-			pr_err("diag: Dropping packet, payload size is %d\n",
-				payload);
-			return -EBADMSG;
-		}
-		driver->hdlc_encode_buf_len = payload;
-		/*
-		 * Adding 5 bytes for start (1 byte), version (1 byte),
-		 * payload (2 bytes) and end (1 byte)
-		 */
-		if (len == (payload + 5)) {
-			/*
-			 * Adding 4 bytes for start (1 byte), version (1 byte)
-			 * and payload (2 bytes)
-			 */
-			memcpy(driver->hdlc_encode_buf, buf + 4, payload);
-			goto send_data;
-		} else {
-			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt\n",
-			__func__, len);
-			return -EBADMSG;
-		}
-	}
 
 	if (hdlc_flag) {
 		if (len > DIAG_MAX_HDLC_BUF_SIZE) {
@@ -1104,29 +1073,48 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
 		goto send_data;
 	}
 
-	/*
-	 * The worst case length will be twice as the incoming packet length.
-	 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
-	 */
-	max_len = (2 * len) + 3;
-	if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
-		pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
-		       max_len);
-		return -EBADMSG;
+	if (driver->proc_hdlc_disabled[proc]) {
+		/*
+		 * Adding 4 bytes for start (1 byte), version (1 byte)
+		 * and payload (2 bytes)
+		 */
+		payload_len = len;
+		memcpy(non_hdlc_header+2, &payload_len, 2);
+		memcpy(driver->hdlc_encode_buf,
+			non_hdlc_header, NON_HDLC_HEADER_SIZE);
+		memcpy(driver->hdlc_encode_buf +
+			NON_HDLC_HEADER_SIZE, buf, len);
+		memcpy((driver->hdlc_encode_buf +
+			NON_HDLC_HEADER_SIZE + len), end_byte, 1);
+		driver->hdlc_encode_buf_len =
+			len + NON_HDLC_HEADER_SIZE + 1;
+		goto send_data;
+	} else {
+		/*
+		 * The worst case length will be twice as the incoming packet
+		 * length.
+		 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+		 */
+		max_len = (2 * len) + 3;
+		if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
+			pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+				   max_len);
+			return -EBADMSG;
+		}
+
+		/* Perform HDLC encoding on incoming data */
+		send.state = DIAG_STATE_START;
+		send.pkt = (void *)(buf);
+		send.last = (void *)(buf + len - 1);
+		send.terminate = 1;
+
+		enc.dest = driver->hdlc_encode_buf;
+		enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+		diag_hdlc_encode(&send, &enc);
+		driver->hdlc_encode_buf_len = (int)(enc.dest -
+			(void *)driver->hdlc_encode_buf);
 	}
 
-	/* Perform HDLC encoding on incoming data */
-	send.state = DIAG_STATE_START;
-	send.pkt = (void *)(buf);
-	send.last = (void *)(buf + len - 1);
-	send.terminate = 1;
-
-	enc.dest = driver->hdlc_encode_buf;
-	enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
-	diag_hdlc_encode(&send, &enc);
-	driver->hdlc_encode_buf_len = (int)(enc.dest -
-					(void *)driver->hdlc_encode_buf);
-
 send_data:
 	err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
 				   driver->hdlc_encode_buf_len);
@@ -2269,10 +2257,13 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
 	mutex_lock(&driver->hdlc_disable_mutex);
 	mutex_lock(&driver->md_session_lock);
 	session_info = diag_md_session_get_pid(current->tgid);
-	if (session_info)
+	if (session_info) {
 		session_info->hdlc_disabled = hdlc_support;
-	else
+		driver->proc_hdlc_disabled[DIAG_LOCAL_PROC] =
+			hdlc_support;
+	} else {
 		driver->hdlc_disabled = hdlc_support;
+	}
 
 	peripheral =
 		diag_md_session_match_pid_peripheral(DIAG_LOCAL_PROC,
@@ -3026,6 +3017,8 @@ long diagchar_compat_ioctl(struct file *filp,
 			   unsigned int iocmd, unsigned long ioarg)
 {
 	int result = -EINVAL;
+	uint8_t hdlc_support, i;
+	struct diag_md_session_t *session_info = NULL;
 
 	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
 		result = diag_ioctl_cmd_reg_compat(ioarg);
@@ -3049,6 +3042,18 @@ long diagchar_compat_ioctl(struct file *filp,
 	} else if (iocmd >= DIAG_IOCTL_QUERY_PD_FEATUREMASK &&
 			iocmd <= DIAG_IOCTL_PASSTHRU_CONTROL) {
 		result = diagchar_ioctl_hw_accel(filp, iocmd, ioarg);
+	} else if (iocmd == DIAG_IOCTL_MDM_HDLC_TOGGLE) {
+		if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+				   sizeof(uint8_t)))
+			return -EFAULT;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info) {
+			for (i = DIAG_LOCAL_PROC + 1; i < NUM_DIAG_MD_DEV; i++)
+				driver->proc_hdlc_disabled[i] = hdlc_support;
+		}
+		mutex_unlock(&driver->md_session_lock);
+		result = 0;
 	} else {
 		result = -EINVAL;
 	}
@@ -3060,6 +3065,8 @@ long diagchar_ioctl(struct file *filp,
 			   unsigned int iocmd, unsigned long ioarg)
 {
 	int result = -EINVAL;
+	uint8_t hdlc_support, i;
+	struct diag_md_session_t *session_info = NULL;
 
 	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
 		result = diag_ioctl_cmd_reg(ioarg);
@@ -3083,6 +3090,18 @@ long diagchar_ioctl(struct file *filp,
 	} else if (iocmd >= DIAG_IOCTL_QUERY_PD_FEATUREMASK &&
 			iocmd <= DIAG_IOCTL_PASSTHRU_CONTROL) {
 		result = diagchar_ioctl_hw_accel(filp, iocmd, ioarg);
+	} else if (iocmd == DIAG_IOCTL_MDM_HDLC_TOGGLE) {
+		if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+				   sizeof(uint8_t)))
+			return -EFAULT;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info) {
+			for (i = DIAG_LOCAL_PROC + 1; i < NUM_DIAG_MD_DEV; i++)
+				driver->proc_hdlc_disabled[i] = hdlc_support;
+		}
+		mutex_unlock(&driver->md_session_lock);
+		result = 0;
 	} else {
 		result = -EINVAL;
 	}
diff --git a/drivers/clk/qcom/camcc-lagoon.c b/drivers/clk/qcom/camcc-lagoon.c
index a735541..7ec55d0 100644
--- a/drivers/clk/qcom/camcc-lagoon.c
+++ b/drivers/clk/qcom/camcc-lagoon.c
@@ -179,6 +179,7 @@ static struct pll_vco fabia_vco[] = {
 /* 600MHz configuration */
 static const struct alpha_pll_config cam_cc_pll0_config = {
 	.l = 0x1F,
+	.cal_l = 0x22,
 	.alpha = 0x4000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
@@ -234,6 +235,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
 /* 808MHz configuration */
 static const struct alpha_pll_config cam_cc_pll1_config = {
 	.l = 0x2A,
+	.cal_l = 0x2C,
 	.alpha = 0x1555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
@@ -357,6 +359,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
 /* 384MHz configuration */
 static const struct alpha_pll_config cam_cc_pll3_config = {
 	.l = 0x14,
+	.cal_l = 0x16,
 	.alpha = 0x0,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
diff --git a/drivers/clk/qcom/dispcc-lagoon.c b/drivers/clk/qcom/dispcc-lagoon.c
index fb88c51..4b02003 100644
--- a/drivers/clk/qcom/dispcc-lagoon.c
+++ b/drivers/clk/qcom/dispcc-lagoon.c
@@ -120,7 +120,7 @@ static struct pll_vco fabia_vco[] = {
 
 static const struct alpha_pll_config disp_cc_pll0_config = {
 	.l = 0x3A,
-	.cal_l = 0x3F,
+	.cal_l = 0x31,
 	.alpha = 0x5555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
index 95137f6..6bb7c90 100644
--- a/drivers/clk/qcom/gcc-bengal.c
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -1082,7 +1082,7 @@ static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
 	F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
 	F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
 	F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
-	F(580000000, P_GPLL8_OUT_EARLY, 1, 0, 0),
+	F(576000000, P_GPLL9_OUT_MAIN, 1, 0, 0),
 	{ }
 };
 
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index e16a9e5..94ab961 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -146,6 +146,9 @@ static int gdsc_is_enabled(struct regulator_dev *rdev)
 	if (!sc->toggle_logic)
 		return !sc->resets_asserted;
 
+	if (sc->skip_disable_before_enable)
+		return false;
+
 	if (sc->parent_regulator) {
 		/*
 		 * The parent regulator for the GDSC is required to be on to
@@ -209,6 +212,9 @@ static int gdsc_enable(struct regulator_dev *rdev)
 	uint32_t regval, hw_ctrl_regval = 0x0;
 	int i, ret = 0;
 
+	if (sc->skip_disable_before_enable)
+		return 0;
+
 	if (sc->parent_regulator) {
 		ret = regulator_set_voltage(sc->parent_regulator,
 				RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
@@ -367,7 +373,6 @@ static int gdsc_enable(struct regulator_dev *rdev)
 		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
 
 	sc->is_gdsc_enabled = true;
-	sc->skip_disable_before_enable = false;
 end:
 	if (ret && sc->bus_handle) {
 		msm_bus_scale_client_update_request(sc->bus_handle, 0);
@@ -386,16 +391,6 @@ static int gdsc_disable(struct regulator_dev *rdev)
 	uint32_t regval;
 	int i, ret = 0;
 
-	/*
-	 * Protect GDSC against late_init disabling when the GDSC is enabled
-	 * by an entity outside external to HLOS.
-	 */
-	if (sc->skip_disable_before_enable) {
-		dev_dbg(&rdev->dev, "Skip Disabling: %s\n", sc->rdesc.name);
-		sc->skip_disable_before_enable = false;
-		return 0;
-	}
-
 	if (sc->force_root_en)
 		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
 
diff --git a/drivers/clk/qcom/gpucc-lagoon.c b/drivers/clk/qcom/gpucc-lagoon.c
index ec1bbcf..1c74ecb 100644
--- a/drivers/clk/qcom/gpucc-lagoon.c
+++ b/drivers/clk/qcom/gpucc-lagoon.c
@@ -135,7 +135,7 @@ static struct clk_fixed_factor crc_div = {
 /* 514MHz Configuration*/
 static const struct alpha_pll_config gpu_cc_pll1_config = {
 	.l = 0x1A,
-	.cal_l = 0x3F,
+	.cal_l = 0x3D,
 	.alpha = 0xC555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
diff --git a/drivers/clk/qcom/npucc-lagoon.c b/drivers/clk/qcom/npucc-lagoon.c
index cd5adaae..99c28e6 100644
--- a/drivers/clk/qcom/npucc-lagoon.c
+++ b/drivers/clk/qcom/npucc-lagoon.c
@@ -110,6 +110,7 @@ static struct pll_vco fabia_vco[] = {
 /* 537.60MHz Configuration */
 static struct alpha_pll_config npu_cc_pll0_config = {
 	.l = 0x1C,
+	.cal_l = 0x3F,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
 	.test_ctl_val = 0x40000000,
@@ -147,6 +148,7 @@ static struct clk_alpha_pll npu_cc_pll0 = {
 /* 300MHz Configuration */
 static struct alpha_pll_config npu_cc_pll1_config = {
 	.l = 0xF,
+	.cal_l = 0x33,
 	.alpha = 0xA000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
@@ -182,6 +184,7 @@ static struct clk_alpha_pll npu_cc_pll1 = {
 /* 250MHz Configuration */
 static struct alpha_pll_config npu_q6ss_pll_config = {
 	.l = 0xD,
+	.cal_l = 0x1E,
 	.alpha = 0x555,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002067,
@@ -228,11 +231,11 @@ static struct clk_fixed_factor npu_cc_crc_div = {
 
 static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
 	F(100000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(192000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(268800000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(403200000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	F(515000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(650000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
-	F(850000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(748800000, P_NPU_CC_CRC_DIV, 1, 0, 0),
 	{ }
 };
 
@@ -253,11 +256,11 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
 			[VDD_MIN] = 100000000,
-			[VDD_LOWER] = 268800000,
-			[VDD_LOW] = 403200000,
-			[VDD_LOW_L1] = 515000000,
-			[VDD_NOMINAL] = 650000000,
-			[VDD_HIGH] = 850000000},
+			[VDD_LOWER] = 192000000,
+			[VDD_LOW] = 268800000,
+			[VDD_LOW_L1] = 403200000,
+			[VDD_NOMINAL] = 515000000,
+			[VDD_HIGH] = 748800000},
 	},
 };
 
diff --git a/drivers/clk/qcom/videocc-lagoon.c b/drivers/clk/qcom/videocc-lagoon.c
index 32838a6..0f49c37 100644
--- a/drivers/clk/qcom/videocc-lagoon.c
+++ b/drivers/clk/qcom/videocc-lagoon.c
@@ -127,7 +127,6 @@ static struct clk_alpha_pll_postdiv video_pll0_out_even = {
 };
 
 static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
 	F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
 	F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a4f9557..ae115e6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -456,6 +456,16 @@
 	  16 to 32 channels for peripheral to memory or memory to memory
 	  transfers.
 
+config QCOM_SPS_DMA
+	tristate "Qualcomm technologies inc DMA driver for sps-BAM"
+	depends on ARCH_QCOM
+	select DMA_ENGINE
+	help
+	  Enable support for Qualcomm technologies inc, BAM DMA engine.
+	  This DMA-engine-driver is a wrapper of the sps-BAM library. DMA
+	  engine callbacks are implemented using the sps-BAM functionality
+	  to access HW.
+
 config SIRF_DMA
 	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
 	depends on ARCH_SIRF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c91702d..68ec7a6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -72,6 +72,7 @@
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_QCOM_SPS_DMA) += qcom-sps-dma.o
 
 obj-y += mediatek/
 obj-y += qcom/
diff --git a/drivers/dma/qcom-sps-dma.c b/drivers/dma/qcom-sps-dma.c
new file mode 100644
index 0000000..631516b
--- /dev/null
+++ b/drivers/dma/qcom-sps-dma.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015,2017-2018, 2020, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Qualcomm technologies inc, DMA API for BAM (Bus Access Manager).
+ * This DMA driver uses sps-BAM API to access the HW, thus it is effectively a
+ * DMA engine wrapper of the sps-BAM API.
+ *
+ * Client channel configuration example:
+ * struct dma_slave_config config {
+ *    .direction = DMA_MEM_TO_DEV;
+ * };
+ *
+ * chan = dma_request_slave_channel(client_dev, "rx");
+ * dmaengine_slave_config(chan, &config);
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+#include "dmaengine.h"
+
+#define QBAM_OF_SLAVE_N_ARGS	(4)
+#define QBAM_OF_MANAGE_LOCAL	"qcom,managed-locally"
+#define QBAM_OF_SUM_THRESHOLD	"qcom,summing-threshold"
+#define QBAM_MAX_DESCRIPTORS	(0x100)
+#define QBAM_MAX_CHANNELS	(32)
+
+/*
+ * qbam_async_tx_descriptor - dma descriptor plus a list of xfer_bufs
+ *
+ * @sgl scatterlist of transfer buffers
+ * @sg_len size of that list
+ * @flags dma xfer flags
+ */
+struct qbam_async_tx_descriptor {
+	struct dma_async_tx_descriptor	dma_desc;
+	struct scatterlist		*sgl;
+	unsigned int			sg_len;
+	unsigned long			flags;
+};
+
+#define DMA_TO_QBAM_ASYNC_DESC(dma_async_desc) \
+	container_of(dma_async_desc, struct qbam_async_tx_descriptor, dma_desc)
+
+struct qbam_channel;
+/*
+ * qbam_device - top level device of current driver
+ * @handle bam sps handle.
+ * @regs bam register space virtual base address.
+ * @mem_resource bam register space resource.
+ * @deregister_required if bam is registered by this driver it need to be
+ *   unregistered by this driver.
+ * @manage is bame managed locally or remotely,
+ * @summing_threshold event threshold.
+ * @irq bam interrupt line.
+ * @channels has the same channels as qbam_dev->dma_dev.channels but
+ *   supports fast access by pipe index.
+ */
+struct qbam_device {
+	struct dma_device		dma_dev;
+	void __iomem			*regs;
+	struct resource			*mem_resource;
+	ulong				handle;
+	bool				deregister_required;
+	u32				summing_threshold;
+	u32				manage;
+	int				irq;
+	struct qbam_channel		*channels[QBAM_MAX_CHANNELS];
+};
+
+/* qbam_pipe: aggregate of bam pipe related entries of qbam_channel */
+struct qbam_pipe {
+	u32				index;
+	struct sps_pipe			*handle;
+	struct sps_connect		cfg;
+	u32				num_descriptors;
+	u32				sps_connect_flags;
+	u32				sps_register_event_flags;
+};
+
+/*
+ * qbam_channel - dma channel plus bam pipe info and current pending transfers
+ *
+ * @direction is a producer or consumer (MEM => DEV or DEV => MEM)
+ * @pending_desc next set of transfer to process
+ * @error last error that took place on the current pending_desc
+ */
+struct qbam_channel {
+	struct qbam_pipe		bam_pipe;
+
+	struct dma_chan			chan;
+	enum dma_transfer_direction	direction;
+	struct qbam_async_tx_descriptor	pending_desc;
+
+	struct qbam_device		*qbam_dev;
+	struct mutex			lock;
+	int				error;
+};
+#define DMA_TO_QBAM_CHAN(dma_chan) \
+			container_of(dma_chan, struct qbam_channel, chan)
+#define qbam_err(qbam_dev, fmt ...) dev_err(qbam_dev->dma_dev.dev, fmt)
+
+/*  qbam_disconnect_chan - disconnect a channel */
+static int qbam_disconnect_chan(struct qbam_channel *qbam_chan)
+{
+	struct qbam_device  *qbam_dev    = qbam_chan->qbam_dev;
+	struct sps_pipe     *pipe_handle = qbam_chan->bam_pipe.handle;
+	struct sps_connect   pipe_config_no_irq = {.options = SPS_O_POLL};
+	int ret;
+
+	/*
+	 * SW workaround:
+	 * When disconnecting BAM pipe a spurious interrupt sometimes appears.
+	 * To avoid that, we change the pipe setting from interrupt (default)
+	 * to polling (SPS_O_POLL) before diconnecting the pipe.
+	 */
+	ret = sps_set_config(pipe_handle, &pipe_config_no_irq);
+	if (ret)
+		qbam_err(qbam_dev,
+			"error:%d sps_set_config(pipe:%d) before disconnect\n",
+			ret, qbam_chan->bam_pipe.index);
+
+	ret = sps_disconnect(pipe_handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+
+	return ret;
+}
+
+/*  qbam_free_chan - disconnect channel and free its resources */
+static void qbam_free_chan(struct dma_chan *chan)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device  *qbam_dev  = qbam_chan->qbam_dev;
+
+	mutex_lock(&qbam_chan->lock);
+	if (qbam_disconnect_chan(qbam_chan))
+		qbam_err(qbam_dev,
+			"error free_chan() failed to disconnect(pipe:%d)\n",
+			qbam_chan->bam_pipe.index);
+	qbam_chan->pending_desc.sgl = NULL;
+	qbam_chan->pending_desc.sg_len = 0;
+	mutex_unlock(&qbam_chan->lock);
+}
+
+static struct dma_chan *qbam_dma_xlate(struct of_phandle_args *dma_spec,
+							struct of_dma *of)
+{
+	struct qbam_device  *qbam_dev  = of->of_dma_data;
+	struct qbam_channel *qbam_chan;
+	u32 channel_index;
+	u32 num_descriptors;
+
+	if (dma_spec->args_count != QBAM_OF_SLAVE_N_ARGS) {
+		qbam_err(qbam_dev,
+			"invalid number of dma arguments, expect:%d got:%d\n",
+			QBAM_OF_SLAVE_N_ARGS, dma_spec->args_count);
+		return NULL;
+	}
+
+	channel_index = dma_spec->args[0];
+
+	if (channel_index >= QBAM_MAX_CHANNELS) {
+		qbam_err(qbam_dev,
+			"error: channel_index:%d out of bounds",
+			channel_index);
+		return NULL;
+	}
+	qbam_chan = qbam_dev->channels[channel_index];
+	 /* return qbam_chan if exists, or create one */
+	if (qbam_chan) {
+		qbam_chan->chan.client_count = 1;
+		return &qbam_chan->chan;
+	}
+
+	num_descriptors = dma_spec->args[1];
+	if (!num_descriptors || (num_descriptors > QBAM_MAX_DESCRIPTORS)) {
+		qbam_err(qbam_dev,
+			"invalid number of descriptors, range[1..%d] got:%d\n",
+			QBAM_MAX_DESCRIPTORS, num_descriptors);
+		return NULL;
+	}
+
+	/* allocate a channel */
+	qbam_chan = kzalloc(sizeof(*qbam_chan), GFP_KERNEL);
+	if (!qbam_chan)
+		return NULL;
+
+	/* allocate BAM resources for that channel */
+	qbam_chan->bam_pipe.handle = sps_alloc_endpoint();
+	if (!qbam_chan->bam_pipe.handle) {
+		qbam_err(qbam_dev, "error: sps_alloc_endpoint() return NULL\n");
+		kfree(qbam_chan);
+		return NULL;
+	}
+
+	/* init dma_chan */
+	qbam_chan->chan.device = &qbam_dev->dma_dev;
+	dma_cookie_init(&qbam_chan->chan);
+	qbam_chan->chan.client_count                 = 1;
+	/* init qbam_chan */
+	qbam_chan->bam_pipe.index                    = channel_index;
+	qbam_chan->bam_pipe.num_descriptors          = num_descriptors;
+	qbam_chan->bam_pipe.sps_connect_flags        = dma_spec->args[2];
+	qbam_chan->bam_pipe.sps_register_event_flags = dma_spec->args[3];
+	qbam_chan->qbam_dev                          = qbam_dev;
+	mutex_init(&qbam_chan->lock);
+
+	/* add to dma_device list of channels */
+	list_add(&qbam_chan->chan.device_node, &qbam_dev->dma_dev.channels);
+	qbam_dev->channels[channel_index] = qbam_chan;
+
+	return &qbam_chan->chan;
+}
+
+static enum dma_status qbam_tx_status(struct dma_chan *chan,
+			dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_async_tx_descriptor	*qbam_desc = &qbam_chan->pending_desc;
+	enum dma_status ret;
+
+	mutex_lock(&qbam_chan->lock);
+
+	if (qbam_chan->error) {
+		mutex_unlock(&qbam_chan->lock);
+		return DMA_ERROR;
+	}
+
+	ret = dma_cookie_status(chan, cookie, state);
+	if (ret == DMA_IN_PROGRESS) {
+		struct scatterlist *sg;
+		int i;
+		u32 transfer_size = 0;
+
+		for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i)
+			transfer_size += sg_dma_len(sg);
+
+		dma_set_residue(state, transfer_size);
+	}
+	mutex_unlock(&qbam_chan->lock);
+
+	return ret;
+}
+
+/*
+ * qbam_init_bam_handle - find or create bam handle.
+ *
+ * BAM device needs to be registered for each BLSP once and only once. if it
+ * was registered, then we find the handle to the registered bam and return
+ * it, otherwise we register it here.
+ * The module which registered BAM is responsible for deregistering it.
+ */
+static int qbam_init_bam_handle(struct qbam_device *qbam_dev)
+{
+	int ret = 0;
+	struct sps_bam_props bam_props = {0};
+
+	/*
+	 * Check if BAM is already registred with SPS on the current
+	 * BLSP. If it isn't then go ahead and register it.
+	 */
+	ret = sps_phy2h(qbam_dev->mem_resource->start, &qbam_dev->handle);
+	if (qbam_dev->handle)
+		return 0;
+
+	qbam_dev->regs = devm_ioremap_resource(qbam_dev->dma_dev.dev,
+					       qbam_dev->mem_resource);
+	if (IS_ERR(qbam_dev->regs)) {
+		qbam_err(qbam_dev, "error:%ld ioremap(phy:0x%lx len:0x%lx)\n",
+			 PTR_ERR(qbam_dev->regs),
+			 (ulong) qbam_dev->mem_resource->start,
+			 (ulong) resource_size(qbam_dev->mem_resource));
+		return PTR_ERR(qbam_dev->regs);
+	}
+
+	bam_props.phys_addr		= qbam_dev->mem_resource->start;
+	bam_props.virt_addr		= qbam_dev->regs;
+	bam_props.summing_threshold	= qbam_dev->summing_threshold;
+	bam_props.manage		= qbam_dev->manage;
+	bam_props.irq			= qbam_dev->irq;
+
+	ret = sps_register_bam_device(&bam_props, &qbam_dev->handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_register_bam_device\n"
+			 "(phy:0x%lx virt:0x%lx irq:%d)\n",
+			 ret, (ulong) bam_props.phys_addr,
+			 (ulong) bam_props.virt_addr, qbam_dev->irq);
+	else
+		qbam_dev->deregister_required = true;
+
+	return ret;
+}
+
+
+static int qbam_alloc_chan(struct dma_chan *chan)
+{
+	return 0;
+}
+
+static void qbam_eot_callback(struct sps_event_notify *notify)
+{
+	struct qbam_async_tx_descriptor *qbam_desc = notify->data.transfer.user;
+	struct dma_async_tx_descriptor  *dma_desc  = &qbam_desc->dma_desc;
+	dma_async_tx_callback callback	= dma_desc->callback;
+	void *param			= dma_desc->callback_param;
+
+	if (callback)
+		callback(param);
+}
+
+static void qbam_error_callback(struct sps_event_notify *notify)
+{
+	struct qbam_channel *qbam_chan	= notify->user;
+
+	qbam_err(qbam_chan->qbam_dev, "error: %s(pipe:%d\n)",
+		 __func__, qbam_chan->bam_pipe.index);
+}
+
+static int qbam_connect_chan(struct qbam_channel *qbam_chan)
+{
+	int ret = 0;
+	struct qbam_device       *qbam_dev = qbam_chan->qbam_dev;
+	struct sps_register_event bam_eot_event = {
+		.mode		= SPS_TRIGGER_CALLBACK,
+		.options	= qbam_chan->bam_pipe.sps_register_event_flags,
+		.callback	= qbam_eot_callback,
+		};
+	struct sps_register_event bam_error_event = {
+		.mode		= SPS_TRIGGER_CALLBACK,
+		.options	= SPS_O_ERROR,
+		.callback	= qbam_error_callback,
+		.user		= qbam_chan,
+		};
+
+	ret = sps_connect(qbam_chan->bam_pipe.handle, &qbam_chan->bam_pipe.cfg);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_connect(pipe:%d)\n", ret,
+			 qbam_chan->bam_pipe.index);
+		return ret;
+	}
+
+	ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_eot_event);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_register_event(eot@pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+		goto need_disconnect;
+	}
+
+	ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_error_event);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_register_event(err@pipe:%d)\n",
+			 ret, qbam_chan->bam_pipe.index);
+		goto need_disconnect;
+	}
+
+	return 0;
+
+need_disconnect:
+	ret = sps_disconnect(qbam_chan->bam_pipe.handle);
+	if (ret)
+		qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n", ret,
+			 qbam_chan->bam_pipe.index);
+	return ret;
+}
+
+/*
+ * qbam_slave_cfg - configure and connect a BAM pipe
+ *
+ * @cfg only cares about cfg->direction
+ */
+static int qbam_slave_cfg(struct dma_chan *chan,
+						struct dma_slave_config *cfg)
+{
+	int ret = 0;
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
+	struct sps_connect *pipe_cfg = &qbam_chan->bam_pipe.cfg;
+
+	if (!qbam_dev->handle) {
+		ret = qbam_init_bam_handle(qbam_dev);
+		if (ret)
+			return ret;
+	}
+
+	if (qbam_chan->bam_pipe.cfg.desc.base)
+		goto cfg_done;
+
+	ret = sps_get_config(qbam_chan->bam_pipe.handle,
+						&qbam_chan->bam_pipe.cfg);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d sps_get_config(0x%p)\n",
+			 ret, qbam_chan->bam_pipe.handle);
+		return ret;
+	}
+
+	qbam_chan->direction = cfg->direction;
+	if (cfg->direction == DMA_MEM_TO_DEV) {
+		pipe_cfg->source          = SPS_DEV_HANDLE_MEM;
+		pipe_cfg->destination     = qbam_dev->handle;
+		pipe_cfg->mode            = SPS_MODE_DEST;
+		pipe_cfg->src_pipe_index  = 0;
+		pipe_cfg->dest_pipe_index = qbam_chan->bam_pipe.index;
+	} else {
+		pipe_cfg->source          = qbam_dev->handle;
+		pipe_cfg->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_cfg->mode            = SPS_MODE_SRC;
+		pipe_cfg->src_pipe_index  = qbam_chan->bam_pipe.index;
+		pipe_cfg->dest_pipe_index = 0;
+	}
+	pipe_cfg->options   =  qbam_chan->bam_pipe.sps_connect_flags;
+	pipe_cfg->desc.size = (qbam_chan->bam_pipe.num_descriptors + 1) *
+						 sizeof(struct sps_iovec);
+	/* managed dma_alloc_coherent() */
+	pipe_cfg->desc.base = dmam_alloc_coherent(qbam_dev->dma_dev.dev,
+						  pipe_cfg->desc.size,
+						  &pipe_cfg->desc.phys_base,
+						  GFP_KERNEL);
+	if (!pipe_cfg->desc.base) {
+		qbam_err(qbam_dev,
+			"error dma_alloc_coherent(desc-sz:%llu * n-descs:%d)\n",
+			(u64) sizeof(struct sps_iovec),
+			qbam_chan->bam_pipe.num_descriptors);
+		return -ENOMEM;
+	}
+cfg_done:
+	ret = qbam_connect_chan(qbam_chan);
+	if (ret)
+		dmam_free_coherent(qbam_dev->dma_dev.dev, pipe_cfg->desc.size,
+				 pipe_cfg->desc.base, pipe_cfg->desc.phys_base);
+
+	return ret;
+}
+
+static int qbam_flush_chan(struct dma_chan *chan)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	int ret = qbam_disconnect_chan(qbam_chan);
+
+	if (ret) {
+		qbam_err(qbam_chan->qbam_dev,
+			 "error: disconnect flush(pipe:%d\n)",
+			 qbam_chan->bam_pipe.index);
+		return ret;
+	}
+	ret = qbam_connect_chan(qbam_chan);
+	if (ret)
+		qbam_err(qbam_chan->qbam_dev,
+			 "error: reconnect flush(pipe:%d\n)",
+			 qbam_chan->bam_pipe.index);
+	return ret;
+}
+
+/* qbam_tx_submit - sets the descriptor as the next one to be executed */
+static dma_cookie_t qbam_tx_submit(struct dma_async_tx_descriptor *dma_desc)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(dma_desc->chan);
+	dma_cookie_t ret;
+
+	mutex_lock(&qbam_chan->lock);
+
+	ret = dma_cookie_assign(dma_desc);
+
+	mutex_unlock(&qbam_chan->lock);
+
+	return ret;
+}
+
+/*
+ * qbam_prep_slave_sg - creates qbam_xfer_buf from a list of sg
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ * @return the newly created descriptor or negative ERR_PTR() on error
+ */
+static struct dma_async_tx_descriptor *qbam_prep_slave_sg(struct dma_chan *chan,
+	struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
+{
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
+	struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
+
+	if (qbam_chan->direction != direction) {
+		qbam_err(qbam_dev,
+			"invalid dma transfer direction expected:%d given:%d\n",
+			qbam_chan->direction, direction);
+		return ERR_PTR(-EINVAL);
+	}
+
+	qbam_desc->dma_desc.chan	= &qbam_chan->chan;
+	qbam_desc->dma_desc.tx_submit	= qbam_tx_submit;
+	qbam_desc->sgl			= sgl;
+	qbam_desc->sg_len		= sg_len;
+	qbam_desc->flags		= flags;
+	return &qbam_desc->dma_desc;
+}
+
+/*
+ * qbam_issue_pending - queue pending descriptor to BAM
+ *
+ * Iterate over the transfers of the pending descriptor and push them to bam
+ */
+static void qbam_issue_pending(struct dma_chan *chan)
+{
+	int i;
+	int ret = 0;
+	struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
+	struct qbam_device  *qbam_dev  = qbam_chan->qbam_dev;
+	struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
+	struct scatterlist		*sg;
+
+	mutex_lock(&qbam_chan->lock);
+	if (!qbam_chan->pending_desc.sgl) {
+		qbam_err(qbam_dev,
+		   "error %s() no pending descriptor pipe:%d\n",
+		   __func__, qbam_chan->bam_pipe.index);
+		mutex_unlock(&qbam_chan->lock);
+		return;
+	}
+
+	for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i) {
+
+		/* Add BAM flags only on the last buffer */
+		bool is_last_buf = (i == ((qbam_desc->sg_len) - 1));
+
+		ret = sps_transfer_one(qbam_chan->bam_pipe.handle,
+					sg_dma_address(sg), sg_dma_len(sg),
+					qbam_desc,
+					(is_last_buf ? qbam_desc->flags : 0));
+		if (ret < 0) {
+			qbam_chan->error = ret;
+
+			qbam_err(qbam_dev, "erorr:%d sps_transfer_one\n"
+				"(addr:0x%lx len:%d flags:0x%lx pipe:%d)\n",
+				ret, (ulong) sg_dma_address(sg), sg_dma_len(sg),
+				qbam_desc->flags, qbam_chan->bam_pipe.index);
+			break;
+		}
+	}
+
+	dma_cookie_complete(&qbam_desc->dma_desc);
+	qbam_chan->error = 0;
+	qbam_desc->sgl = NULL;
+	qbam_desc->sg_len = 0;
+	mutex_unlock(&qbam_chan->lock);
+};
+
+static int qbam_deregister_bam_dev(struct qbam_device *qbam_dev)
+{
+	int ret;
+
+	if (!qbam_dev->handle)
+		return 0;
+
+	ret = sps_deregister_bam_device(qbam_dev->handle);
+	if (ret)
+		qbam_err(qbam_dev,
+			"error:%d sps_deregister_bam_device(hndl:0x%lx) failed",
+			ret, qbam_dev->handle);
+	return ret;
+}
+
+static void qbam_pipes_free(struct qbam_device *qbam_dev)
+{
+	struct qbam_channel *qbam_chan_cur, *qbam_chan_next;
+
+	list_for_each_entry_safe(qbam_chan_cur, qbam_chan_next,
+			&qbam_dev->dma_dev.channels, chan.device_node) {
+		mutex_lock(&qbam_chan_cur->lock);
+		qbam_free_chan(&qbam_chan_cur->chan);
+		sps_free_endpoint(qbam_chan_cur->bam_pipe.handle);
+		list_del(&qbam_chan_cur->chan.device_node);
+		mutex_unlock(&qbam_chan_cur->lock);
+		kfree(qbam_chan_cur);
+	}
+}
+
+static int qbam_probe(struct platform_device *pdev)
+{
+	struct qbam_device *qbam_dev;
+	int ret;
+	bool managed_locally;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	qbam_dev = devm_kzalloc(&pdev->dev, sizeof(*qbam_dev), GFP_KERNEL);
+	if (!qbam_dev)
+		return -ENOMEM;
+
+	qbam_dev->dma_dev.dev = &pdev->dev;
+	platform_set_drvdata(pdev, qbam_dev);
+
+	qbam_dev->mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!qbam_dev->mem_resource) {
+		qbam_err(qbam_dev, "missing 'reg' DT entry");
+		return -ENODEV;
+	}
+
+	qbam_dev->irq = platform_get_irq(pdev, 0);
+	if (qbam_dev->irq < 0) {
+		qbam_err(qbam_dev, "missing DT IRQ resource entry");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(of_node, QBAM_OF_SUM_THRESHOLD,
+				   &qbam_dev->summing_threshold);
+	if (ret) {
+		qbam_err(qbam_dev, "missing '%s' DT entry",
+			 QBAM_OF_SUM_THRESHOLD);
+		return ret;
+	}
+
+	/* read from DT and set sps_bam_props.manage */
+	managed_locally = of_property_read_bool(of_node, QBAM_OF_MANAGE_LOCAL);
+	qbam_dev->manage = managed_locally ? SPS_BAM_MGR_LOCAL :
+					     SPS_BAM_MGR_DEVICE_REMOTE;
+
+	/* Init channels */
+	INIT_LIST_HEAD(&qbam_dev->dma_dev.channels);
+
+	/* Set capabilities */
+	dma_cap_zero(qbam_dev->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE,		qbam_dev->dma_dev.cap_mask);
+	dma_cap_set(DMA_PRIVATE,	qbam_dev->dma_dev.cap_mask);
+
+	/* Initialize dmaengine callback apis */
+	qbam_dev->dma_dev.device_alloc_chan_resources	= qbam_alloc_chan;
+	qbam_dev->dma_dev.device_free_chan_resources	= qbam_free_chan;
+	qbam_dev->dma_dev.device_prep_slave_sg		= qbam_prep_slave_sg;
+	qbam_dev->dma_dev.device_terminate_all		= qbam_flush_chan;
+	qbam_dev->dma_dev.device_config			= qbam_slave_cfg;
+	qbam_dev->dma_dev.device_issue_pending		= qbam_issue_pending;
+	qbam_dev->dma_dev.device_tx_status		= qbam_tx_status;
+
+	/* Regiser to DMA framework */
+	dma_async_device_register(&qbam_dev->dma_dev);
+
+	/*
+	 * Do not return error in order to not break the existing
+	 * way of requesting channels.
+	 */
+	ret = of_dma_controller_register(of_node, qbam_dma_xlate, qbam_dev);
+	if (ret) {
+		qbam_err(qbam_dev, "error:%d of_dma_controller_register()\n",
+			 ret);
+		goto err_unregister_dma;
+	}
+	return 0;
+
+err_unregister_dma:
+	dma_async_device_unregister(&qbam_dev->dma_dev);
+	if (qbam_dev->deregister_required)
+		return qbam_deregister_bam_dev(qbam_dev);
+
+	return ret;
+}
+
+static int qbam_remove(struct platform_device *pdev)
+{
+	struct qbam_device *qbam_dev = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&qbam_dev->dma_dev);
+
+	/* free BAM pipes resources */
+	qbam_pipes_free(qbam_dev);
+
+	if (qbam_dev->deregister_required)
+		return qbam_deregister_bam_dev(qbam_dev);
+
+	return 0;
+}
+
+static const struct of_device_id qbam_of_match[] = {
+	{ .compatible = "qcom,sps-dma" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qbam_of_match);
+
+static struct platform_driver qbam_driver = {
+	.probe = qbam_probe,
+	.remove = qbam_remove,
+	.driver = {
+		.name = "qcom-sps-dma",
+		.of_match_table = qbam_of_match,
+	},
+};
+
+module_platform_driver(qbam_driver);
+
+MODULE_DESCRIPTION("DMA-API driver to qcom BAM");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-sps-dma");
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 22cbca7..5947176 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -279,6 +279,7 @@ static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
 	[MSM_GPI_QUP_NOTIFY] = "NOTIFY",
 	[MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
 	[MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
+	[MSM_GPI_QUP_FW_ERROR] = "UNHANDLED ERROR",
 	[MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
 	[MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
 	[MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
@@ -2238,6 +2239,10 @@ int gpi_terminate_all(struct dma_chan *chan)
 		if (ret) {
 			GPII_ERR(gpii, gpii_chan->chid,
 				 "Error resetting channel ret:%d\n", ret);
+			if (!gpii->reg_table_dump) {
+				gpi_dump_debug_reg(gpii);
+				gpii->reg_table_dump = true;
+			}
 			goto terminate_exit;
 		}
 
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
index 95da862..6598129d 100644
--- a/drivers/gpu/drm/bridge/lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -24,13 +24,14 @@
 #include <linux/regulator/consumer.h>
 #include <linux/firmware.h>
 #include <linux/hdmi.h>
+#include <linux/string.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_crtc_helper.h>
-#include <linux/string.h>
+#include <drm/drm_client.h>
 
 #define CFG_HPD_INTERRUPTS BIT(0)
 #define CFG_EDID_INTERRUPTS BIT(1)
@@ -183,6 +184,11 @@ void lt9611_hpd_work(struct work_struct *work)
 	envp[4] = NULL;
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 			   envp);
+
+	if (dev->mode_config.funcs->output_poll_changed)
+		dev->mode_config.funcs->output_poll_changed(dev);
+
+	drm_client_dev_hotplug(dev);
 }
 
 static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge)
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7720bd1..704891c 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -3050,7 +3050,7 @@ void adreno_spin_idle_debug(struct adreno_device *adreno_dev,
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int rptr, wptr;
 	unsigned int status, status3, intstatus;
-	unsigned int hwfault;
+	unsigned int hwfault, cx_status;
 
 	dev_err(device->dev, str);
 
@@ -3062,21 +3062,31 @@ void adreno_spin_idle_debug(struct adreno_device *adreno_dev,
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &intstatus);
 	adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &hwfault);
 
-	dev_err(device->dev,
-		"rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
-		adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
-
-	dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
 
 	/*
 	 * If CP is stuck, gmu may not perform as expected. So force a gmu
 	 * snapshot which captures entire state as well as sets the gmu fault
 	 * because things need to be reset anyway.
 	 */
-	if (gmu_core_isenabled(device))
+	if (gmu_core_isenabled(device)) {
+		gmu_core_regread(device,
+				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &cx_status);
+		dev_err(device->dev,
+				"rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X cx_busy_status:%8.8X\n",
+				adreno_dev->cur_rb->id, rptr, wptr, status,
+				status3, intstatus, cx_status);
+
+		dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
 		gmu_core_snapshot(device);
-	else
+	} else {
+		dev_err(device->dev,
+				"rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
+				adreno_dev->cur_rb->id, rptr, wptr, status,
+				status3, intstatus);
+
+		dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
 		kgsl_device_snapshot(device, NULL, false);
+	}
 }
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b3cd958..6f01901 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -88,10 +88,15 @@ static u32 a6xx_ifpc_pwrup_reglist[] = {
 	A6XX_CP_AHB_CNTL,
 };
 
-/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
+/* Applicable to a620 and a650 */
 static u32 a650_pwrup_reglist[] = {
 	A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
-	A6XX_CP_PROTECT_REG + 47,
+	A6XX_CP_PROTECT_REG + 47,         /* Programmed for infinite span */
+	A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0,
+	A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+	A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+	A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+	A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
 };
 
 /* Applicable to a640 and a680 */
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index ba1520f..b889764 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1819,7 +1819,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 		adreno_snapshot_registers(device, snapshot,
 			a6xx_rscc_snapshot_registers,
 			ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
-	} else if (adreno_is_a610(adreno_dev)) {
+	} else if (adreno_is_a610(adreno_dev) || adreno_is_a702(adreno_dev)) {
 		adreno_snapshot_registers(device, snapshot,
 			a6xx_gmu_wrapper_registers,
 			ARRAY_SIZE(a6xx_gmu_wrapper_registers) / 2);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ca22326..56e8da9 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -313,6 +313,22 @@ void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
 EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
 
 #if IS_ENABLED(CONFIG_QCOM_CX_IPEAK)
+static int kgsl_pwr_cx_ipeak_freq_limit(void *ptr, unsigned int freq)
+{
+	struct kgsl_pwr_limit *cx_ipeak_pwr_limit = ptr;
+
+	if (IS_ERR_OR_NULL(cx_ipeak_pwr_limit))
+		return -EINVAL;
+
+	/* CX-ipeak safe interrupt to remove freq limit */
+	if (freq == 0) {
+		kgsl_pwr_limits_set_default(cx_ipeak_pwr_limit);
+		return 0;
+	}
+
+	return kgsl_pwr_limits_set_freq(cx_ipeak_pwr_limit, freq);
+}
+
 static int kgsl_pwrctrl_cx_ipeak_vote(struct kgsl_device *device,
 		u64 old_freq, u64 new_freq)
 {
@@ -425,6 +441,29 @@ static int kgsl_pwrctrl_cx_ipeak_init(struct kgsl_device *device)
 		++i;
 	}
 
+	/* cx_ipeak limits for GPU freq throttling */
+	pwr->cx_ipeak_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
+	if (IS_ERR_OR_NULL(pwr->cx_ipeak_pwr_limit)) {
+		dev_err(device->dev,
+				"Failed to get cx_ipeak power limit\n");
+		of_node_put(child);
+		goto error;
+	}
+
+	cx_ipeak_client = &pwr->gpu_ipeak_client[0];
+	if (!IS_ERR_OR_NULL(cx_ipeak_client->client)) {
+		ret = cx_ipeak_victim_register(cx_ipeak_client->client,
+				kgsl_pwr_cx_ipeak_freq_limit,
+				pwr->cx_ipeak_pwr_limit);
+		if (ret) {
+			dev_err(device->dev,
+					"Failed to register GPU-CX-Ipeak victim\n");
+			kgsl_pwr_limits_del(pwr->cx_ipeak_pwr_limit);
+			of_node_put(child);
+			goto error;
+		}
+	}
+
 	of_node_put(node);
 	return 0;
 
@@ -2379,6 +2418,9 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 		}
 	}
 
+	INIT_LIST_HEAD(&pwr->limits);
+	spin_lock_init(&pwr->limits_lock);
+
 	result = kgsl_pwrctrl_cx_ipeak_init(device);
 	if (result)
 		goto error_cleanup_bus_ib;
@@ -2386,8 +2428,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 	INIT_WORK(&pwr->thermal_cycle_ws, kgsl_thermal_cycle);
 	timer_setup(&pwr->thermal_timer, kgsl_thermal_timer, 0);
 
-	INIT_LIST_HEAD(&pwr->limits);
-	spin_lock_init(&pwr->limits_lock);
 	pwr->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
 
 	kgsl_pwrctrl_vbif_init(device);
@@ -2418,6 +2458,12 @@ void kgsl_pwrctrl_close(struct kgsl_device *device)
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	int i;
 
+	kgsl_pwr_limits_del(pwr->cx_ipeak_pwr_limit);
+	pwr->cx_ipeak_pwr_limit = NULL;
+
+	if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[0].client))
+		cx_ipeak_victim_unregister(pwr->gpu_ipeak_client[0].client);
+
 	for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) {
 		if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[i].client)) {
 			cx_ipeak_unregister(pwr->gpu_ipeak_client[i].client);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 15addf7..7283064 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_PWRCTRL_H
 #define __KGSL_PWRCTRL_H
@@ -177,6 +177,7 @@ struct gpu_cx_ipeak_client {
  * @limits - list head for limits
  * @limits_lock - spin lock to protect limits list
  * @sysfs_pwr_limit - pointer to the sysfs limits node
+ * @cx_ipeak_pwr_limit - pointer to the cx_ipeak limits node
  * isense_clk_indx - index of isense clock, 0 if no isense
  * isense_clk_on_level - isense clock rate is XO rate below this level.
  * tzone_name - pointer to thermal zone name of GPU temperature sensor
@@ -236,6 +237,7 @@ struct kgsl_pwrctrl {
 	struct list_head limits;
 	spinlock_t limits_lock;
 	struct kgsl_pwr_limit *sysfs_pwr_limit;
+	struct kgsl_pwr_limit *cx_ipeak_pwr_limit;
 	unsigned int gpu_bimc_int_clk_freq;
 	bool gpu_bimc_interface_enabled;
 	const char *tzone_name;
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 38b5c88..a90b9e3 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/file.h>
@@ -309,8 +309,7 @@ int kgsl_sync_timeline_create(struct kgsl_context *context)
 	}
 
 	kref_init(&ktimeline->kref);
-	snprintf(ktimeline->name, sizeof(ktimeline->name),
-		"%s_%d-%.15s(%d)-%.15s(%d)",
+	ktimeline->name = kasprintf(GFP_KERNEL, "%s_%d-%.15s(%d)-%.15s(%d)",
 		context->device->name, context->id,
 		current->group_leader->comm, current->group_leader->pid,
 		current->comm, current->pid);
@@ -354,7 +353,10 @@ static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *ktimeline,
 
 void kgsl_sync_timeline_destroy(struct kgsl_context *context)
 {
-	kfree(context->ktimeline);
+	struct kgsl_sync_timeline *ktimeline = context->ktimeline;
+
+	kfree(ktimeline->name);
+	kfree(ktimeline);
 }
 
 static void kgsl_sync_timeline_release(struct kref *kref)
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 989658d..43209b1 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2014,2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014,2018-2020 The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_SYNC_H
 #define __KGSL_SYNC_H
@@ -21,7 +21,7 @@
  */
 struct kgsl_sync_timeline {
 	struct kref kref;
-	char name[32];
+	char *name;
 
 	u64 fence_context;
 
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 6353106..36cac86 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -360,7 +360,7 @@ static int usb_transfer_small_packet(struct qdss_request *usb_req,
 				devm_kfree(tmcdrvdata->dev, usb_req);
 				usb_req = NULL;
 				drvdata->usb_req = NULL;
-				dev_err(tmcdrvdata->dev,
+				dev_err_ratelimited(tmcdrvdata->dev,
 					"Write data failed:%d\n", ret);
 				goto out;
 			}
@@ -442,7 +442,8 @@ static void usb_read_work_fn(struct work_struct *work)
 							usb_req->sg);
 					devm_kfree(tmcdrvdata->dev, usb_req);
 					usb_req = NULL;
-					dev_err(tmcdrvdata->dev, "No data in ETR\n");
+					dev_err_ratelimited(tmcdrvdata->dev,
+						 "No data in ETR\n");
 					return;
 				}
 
@@ -473,7 +474,7 @@ static void usb_read_work_fn(struct work_struct *work)
 					devm_kfree(tmcdrvdata->dev, usb_req);
 					usb_req = NULL;
 					drvdata->usb_req = NULL;
-					dev_err(tmcdrvdata->dev,
+					dev_err_ratelimited(tmcdrvdata->dev,
 						"Write data failed:%d\n", ret);
 					if (ret == -EAGAIN)
 						continue;
@@ -517,6 +518,13 @@ void usb_bypass_notifier(void *priv, unsigned int event,
 	if (!drvdata)
 		return;
 
+	if (tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB
+				|| tmcdrvdata->mode == CS_MODE_DISABLED) {
+		dev_err(&tmcdrvdata->csdev->dev,
+		"%s: ETR is not USB mode, or ETR is disabled.\n", __func__);
+		return;
+	}
+
 	switch (event) {
 	case USB_QDSS_CONNECT:
 		usb_qdss_alloc_req(ch, USB_BUF_NUM);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 7284e28..1c07657 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -2168,15 +2168,19 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 		goto out;
 	}
 
+	drvdata->reading = true;
+
 	/* Disable the TMC if we are trying to read from a running session */
 	if (drvdata->mode == CS_MODE_SYSFS) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		mutex_unlock(&drvdata->mem_lock);
 		coresight_disable_all_source_link();
+		mutex_lock(&drvdata->mem_lock);
 		spin_lock_irqsave(&drvdata->spinlock, flags);
 
 		__tmc_etr_disable_hw(drvdata);
 	}
-	drvdata->reading = true;
+
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	mutex_unlock(&drvdata->mem_lock);
@@ -2204,10 +2208,6 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 		 * be NULL.
 		 */
 		__tmc_etr_enable_hw(drvdata);
-
-		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		coresight_enable_all_source_link();
-		spin_lock_irqsave(&drvdata->spinlock, flags);
 	} else {
 		/*
 		 * The ETR is not tracing and the buffer was just read.
@@ -2224,5 +2224,9 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 		tmc_etr_free_sysfs_buf(sysfs_buf);
 
 	mutex_unlock(&drvdata->mem_lock);
+
+	if (drvdata->mode == CS_MODE_SYSFS)
+		coresight_enable_all_source_link();
+
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 5e98878..5b083e5 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1199,7 +1199,6 @@ static ssize_t enable_sink_store(struct device *dev,
 	int ret;
 	unsigned long val;
 	struct coresight_device *csdev = to_coresight_device(dev);
-	struct coresight_device *sink = NULL;
 
 	ret = kstrtoul(buf, 10, &val);
 	if (ret)
@@ -1207,10 +1206,7 @@ static ssize_t enable_sink_store(struct device *dev,
 	mutex_lock(&coresight_mutex);
 
 	if (val) {
-		sink = activated_sink ? activated_sink :
-			coresight_get_enabled_sink(false);
-		if (sink && strcmp(dev_name(&sink->dev),
-				dev_name(&csdev->dev)))
+		if (activated_sink)
 			goto err;
 		csdev->activated = true;
 	} else {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index f90553b..b71b457 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -360,6 +360,7 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
 	case MSM_GPI_QUP_MAX_EVENT:
 		/* fall through to stall impacted channel */
 	case MSM_GPI_QUP_CH_ERROR:
+	case MSM_GPI_QUP_FW_ERROR:
 	case MSM_GPI_QUP_PENDING_EVENT:
 	case MSM_GPI_QUP_EOT_DESC_MISMATCH:
 		break;
@@ -377,9 +378,9 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
 	}
 	if (cb_str->cb_event != MSM_GPI_QUP_NOTIFY)
 		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
-			"GSI QN err:0x%x, status:0x%x, err:%d slv_addr: 0x%x R/W: %d\n",
+			"GSI QN err:0x%x, status:0x%x, err:%d\n",
 			cb_str->error_log.error_code, m_stat,
-			cb_str->cb_event, gi2c->cur->addr, gi2c->cur->flags);
+			cb_str->cb_event);
 }
 
 static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
@@ -512,6 +513,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		struct msm_gpi_tre *go_t = &gi2c->go_t;
 		struct device *rx_dev = gi2c->wrapper_dev;
 		struct device *tx_dev = gi2c->wrapper_dev;
+		reinit_completion(&gi2c->xfer);
 
 		gi2c->cur = &msgs[i];
 
@@ -731,7 +733,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 	int i, ret = 0, timeout = 0;
 
 	gi2c->err = 0;
-	reinit_completion(&gi2c->xfer);
 
 	/* Client to respect system suspend */
 	if (!pm_runtime_enabled(gi2c->dev)) {
@@ -873,6 +874,8 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 				geni_abort_m_cmd(gi2c->base);
 			}
 		}
+		gi2c->cur_wr = 0;
+		gi2c->cur_rd = 0;
 
 		if (mode == SE_DMA) {
 			if (gi2c->err) {
@@ -905,8 +908,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 
 	pm_runtime_mark_last_busy(gi2c->dev);
 	pm_runtime_put_autosuspend(gi2c->dev);
-	gi2c->cur_wr = 0;
-	gi2c->cur_rd = 0;
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
diff --git a/drivers/input/touchscreen/nt36xxx/nt36xxx.c b/drivers/input/touchscreen/nt36xxx/nt36xxx.c
index de496cc..0ade7d1 100644
--- a/drivers/input/touchscreen/nt36xxx/nt36xxx.c
+++ b/drivers/input/touchscreen/nt36xxx/nt36xxx.c
@@ -1754,6 +1754,12 @@ static int32_t nvt_ts_suspend(struct device *dev)
 	buf[0] = EVENT_MAP_HOST_CMD;
 	buf[1] = 0x11;
 	CTP_I2C_WRITE(ts->client, I2C_FW_Address, buf, 2);
+
+	nvt_set_page(I2C_FW_Address, 0x11a50);
+	buf[0] = 0x11a50 & 0xff;
+	buf[1] = 0x11;
+	CTP_I2C_WRITE(ts->client, I2C_FW_Address, buf, 2);
+
 #endif // WAKEUP_GESTURE
 
 	mutex_unlock(&ts->lock);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index b1e48ba..c3f1d26 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/dma-contiguous.h>
@@ -117,197 +117,69 @@ static struct dma_fast_smmu_mapping *dev_get_mapping(struct device *dev)
 	return domain->iova_cookie;
 }
 
-/*
- * Checks if the allocated range (ending at @end) covered the upcoming
- * stale bit.  We don't need to know exactly where the range starts since
- * we already know where the candidate search range started.  If, starting
- * from the beginning of the candidate search range, we had to step over
- * (or landed directly on top of) the upcoming stale bit, then we return
- * true.
- *
- * Due to wrapping, there are two scenarios we'll need to check: (1) if the
- * range [search_start, upcoming_stale] spans 0 (i.e. search_start >
- * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale]
- * does *not* span 0 (i.e. search_start <= upcoming_stale).  And for each
- * of those two scenarios we need to handle three cases: (1) the bit was
- * found before wrapping or
- */
-static bool __bit_covered_stale(unsigned long upcoming_stale,
-				unsigned long search_start,
-				unsigned long end)
-{
-	if (search_start > upcoming_stale) {
-		if (end >= search_start) {
-			/*
-			 * We started searching above upcoming_stale and we
-			 * didn't wrap, so we couldn't have crossed
-			 * upcoming_stale.
-			 */
-			return false;
-		}
-		/*
-		 * We wrapped. Did we cross (or land on top of)
-		 * upcoming_stale?
-		 */
-		return end >= upcoming_stale;
-	}
-
-	if (search_start <= upcoming_stale) {
-		if (end >= search_start) {
-			/*
-			 * We didn't wrap.  Did we cross (or land on top
-			 * of) upcoming_stale?
-			 */
-			return end >= upcoming_stale;
-		}
-		/*
-		 * We wrapped. So we must have crossed upcoming_stale
-		 * (since we started searching below it).
-		 */
-		return true;
-	}
-
-	/* we should have covered all logical combinations... */
-	WARN_ON(1);
-	return true;
-}
-
 static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
 					 unsigned long attrs,
 					 size_t size)
 {
-	unsigned long bit, prev_search_start, nbits = size >> FAST_PAGE_SHIFT;
+	unsigned long bit, nbits = size >> FAST_PAGE_SHIFT;
 	unsigned long align = (1 << get_order(size)) - 1;
 
-	bit = bitmap_find_next_zero_area(
-		mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
-		nbits, align);
+	bit = bitmap_find_next_zero_area(mapping->clean_bitmap,
+					  mapping->num_4k_pages,
+					  mapping->next_start, nbits, align);
 	if (unlikely(bit > mapping->num_4k_pages)) {
 		/* try wrapping */
 		bit = bitmap_find_next_zero_area(
-			mapping->bitmap, mapping->num_4k_pages, 0, nbits,
+			mapping->clean_bitmap, mapping->num_4k_pages, 0, nbits,
 			align);
-		if (unlikely(bit > mapping->num_4k_pages))
-			return DMA_ERROR_CODE;
+		if (unlikely(bit > mapping->num_4k_pages)) {
+			/*
+			 * If we just re-allocated a VA whose TLB hasn't been
+			 * invalidated since it was last used and unmapped, we
+			 * need to invalidate it here.  We actually invalidate
+			 * the entire TLB so that we don't have to invalidate
+			 * the TLB again until we wrap back around.
+			 */
+			if (mapping->have_stale_tlbs) {
+				bool skip_sync = (attrs &
+						  DMA_ATTR_SKIP_CPU_SYNC);
+				struct iommu_domain_geometry *geometry =
+					&(mapping->domain->geometry);
+
+				iommu_tlbiall(mapping->domain);
+				bitmap_copy(mapping->clean_bitmap,
+					    mapping->bitmap,
+					    mapping->num_4k_pages);
+				mapping->have_stale_tlbs = false;
+				av8l_fast_clear_stale_ptes(mapping->pgtbl_ops,
+						geometry->aperture_start,
+						mapping->base,
+						mapping->base +
+						mapping->size - 1,
+						skip_sync);
+				bit = bitmap_find_next_zero_area(
+							mapping->clean_bitmap,
+							mapping->num_4k_pages,
+								 0, nbits,
+								 align);
+				if (unlikely(bit > mapping->num_4k_pages))
+					return DMA_ERROR_CODE;
+
+			} else {
+				return DMA_ERROR_CODE;
+			}
+		}
 	}
 
 	bitmap_set(mapping->bitmap, bit, nbits);
-	prev_search_start = mapping->next_start;
+	bitmap_set(mapping->clean_bitmap, bit, nbits);
 	mapping->next_start = bit + nbits;
 	if (unlikely(mapping->next_start >= mapping->num_4k_pages))
 		mapping->next_start = 0;
 
-	/*
-	 * If we just re-allocated a VA whose TLB hasn't been invalidated
-	 * since it was last used and unmapped, we need to invalidate it
-	 * here.  We actually invalidate the entire TLB so that we don't
-	 * have to invalidate the TLB again until we wrap back around.
-	 */
-	if (mapping->have_stale_tlbs &&
-	    __bit_covered_stale(mapping->upcoming_stale_bit,
-				prev_search_start,
-				bit + nbits - 1)) {
-		bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
-
-		iommu_tlbiall(mapping->domain);
-		mapping->have_stale_tlbs = false;
-		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops,
-				mapping->domain->geometry.aperture_start,
-				mapping->base,
-				mapping->base + mapping->size - 1,
-				skip_sync);
-	}
-
 	return (bit << FAST_PAGE_SHIFT) + mapping->base;
 }
 
-/*
- * Checks whether the candidate bit will be allocated sooner than the
- * current upcoming stale bit.  We can say candidate will be upcoming
- * sooner than the current upcoming stale bit if it lies between the
- * starting bit of the next search range and the upcoming stale bit
- * (allowing for wrap-around).
- *
- * Stated differently, we're checking the relative ordering of three
- * unsigned numbers.  So we need to check all 6 (i.e. 3!) permutations,
- * namely:
- *
- *     0 |---A---B---C---| TOP (Case 1)
- *     0 |---A---C---B---| TOP (Case 2)
- *     0 |---B---A---C---| TOP (Case 3)
- *     0 |---B---C---A---| TOP (Case 4)
- *     0 |---C---A---B---| TOP (Case 5)
- *     0 |---C---B---A---| TOP (Case 6)
- *
- * Note that since we're allowing numbers to wrap, the following three
- * scenarios are all equivalent for Case 1:
- *
- *     0 |---A---B---C---| TOP
- *     0 |---C---A---B---| TOP (C has wrapped. This is Case 5.)
- *     0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.)
- *
- * In any of these cases, if we start searching from A, we will find B
- * before we find C.
- *
- * We can also find two equivalent cases for Case 2:
- *
- *     0 |---A---C---B---| TOP
- *     0 |---B---A---C---| TOP (B has wrapped. This is Case 3.)
- *     0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.)
- *
- * In any of these cases, if we start searching from A, we will find C
- * before we find B.
- */
-static bool __bit_is_sooner(unsigned long candidate,
-			    struct dma_fast_smmu_mapping *mapping)
-{
-	unsigned long A = mapping->next_start;
-	unsigned long B = candidate;
-	unsigned long C = mapping->upcoming_stale_bit;
-
-	if ((A < B && B < C) ||	/* Case 1 */
-	    (C < A && A < B) ||	/* Case 5 */
-	    (B < C && C < A))	/* Case 4 */
-		return true;
-
-	if ((A < C && C < B) ||	/* Case 2 */
-	    (B < A && A < C) ||	/* Case 3 */
-	    (C < B && B < A))	/* Case 6 */
-		return false;
-
-	/*
-	 * For simplicity, we've been ignoring the possibility of any of
-	 * our three numbers being equal.  Handle those cases here (they
-	 * shouldn't happen very often, (I think?)).
-	 */
-
-	/*
-	 * If candidate is the next bit to be searched then it's definitely
-	 * sooner.
-	 */
-	if (A == B)
-		return true;
-
-	/*
-	 * If candidate is the next upcoming stale bit we'll return false
-	 * to avoid doing `upcoming = candidate' in the caller (which would
-	 * be useless since they're already equal)
-	 */
-	if (B == C)
-		return false;
-
-	/*
-	 * If next start is the upcoming stale bit then candidate can't
-	 * possibly be sooner.  The "soonest" bit is already selected.
-	 */
-	if (A == C)
-		return false;
-
-	/* We should have covered all logical combinations. */
-	WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C);
-	return true;
-}
-
 #ifdef CONFIG_ARM64
 static int __init atomic_pool_init(void)
 {
@@ -381,12 +253,8 @@ static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
 	/*
 	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
 	 * when we're about to re-allocate a VA that was previously
-	 * unmapped but hasn't yet been invalidated.  So we need to keep
-	 * track of which bit is the closest to being re-allocated here.
+	 * unmapped but hasn't yet been invalidated.
 	 */
-	if (__bit_is_sooner(start_bit, mapping))
-		mapping->upcoming_stale_bit = start_bit;
-
 	bitmap_clear(mapping->bitmap, start_bit, nbits);
 	mapping->have_stale_tlbs = true;
 }
@@ -1107,6 +975,14 @@ static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
 	if (!fast->bitmap)
 		goto err2;
 
+	fast->clean_bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL |
+				     __GFP_NOWARN | __GFP_NORETRY);
+	if (!fast->clean_bitmap)
+		fast->clean_bitmap = vzalloc(fast->bitmap_size);
+
+	if (!fast->clean_bitmap)
+		goto err3;
+
 	spin_lock_init(&fast->lock);
 
 	fast->iovad = kzalloc(sizeof(*fast->iovad), GFP_KERNEL);
@@ -1118,6 +994,8 @@ static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
 	return fast;
 
 err_free_bitmap:
+	kvfree(fast->clean_bitmap);
+err3:
 	kvfree(fast->bitmap);
 err2:
 	kfree(fast);
@@ -1184,6 +1062,9 @@ void fast_smmu_put_dma_cookie(struct iommu_domain *domain)
 	if (fast->bitmap)
 		kvfree(fast->bitmap);
 
+	if (fast->clean_bitmap)
+		kvfree(fast->clean_bitmap);
+
 	kfree(fast);
 	domain->iova_cookie = NULL;
 }
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 19259ae..329205b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -212,8 +212,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	struct rb_node *curr, *prev;
 	struct iova *curr_iova;
 	unsigned long flags;
-	unsigned long new_pfn;
+	unsigned long new_pfn, low_pfn_new;
 	unsigned long align_mask = ~0UL;
+	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
 
 	if (size_aligned)
 		align_mask <<= limit_align(iovad, fls_long(size - 1));
@@ -222,15 +223,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 	curr = __get_cached_rbnode(iovad, limit_pfn);
 	curr_iova = rb_entry(curr, struct iova, node);
+	low_pfn_new = curr_iova->pfn_hi + 1;
+
+retry:
 	do {
-		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
-		new_pfn = (limit_pfn - size) & align_mask;
+		high_pfn = min(high_pfn, curr_iova->pfn_lo);
+		new_pfn = (high_pfn - size) & align_mask;
 		prev = curr;
 		curr = rb_prev(curr);
 		curr_iova = rb_entry(curr, struct iova, node);
-	} while (curr && new_pfn <= curr_iova->pfn_hi);
+	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
 
-	if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+	if (high_pfn < size || new_pfn < low_pfn) {
+		if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
+			high_pfn = limit_pfn;
+			low_pfn = low_pfn_new;
+			curr = &iovad->anchor.node;
+			curr_iova = rb_entry(curr, struct iova, node);
+			goto retry;
+		}
 		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 		return -ENOMEM;
 	}
@@ -521,6 +532,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
 		flush_rcache = false;
 		for_each_online_cpu(cpu)
 			free_cpu_cached_iovas(cpu, iovad);
+		free_global_cached_iovas(iovad);
 		goto retry;
 	}
 
@@ -1134,5 +1146,27 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
 	}
 }
 
+/*
+ * free all the IOVA ranges of global cache
+ */
+void free_global_cached_iovas(struct iova_domain *iovad)
+{
+	struct iova_rcache *rcache;
+	unsigned long flags;
+	int i, j;
+
+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+		rcache = &iovad->rcaches[i];
+		spin_lock_irqsave(&rcache->lock, flags);
+		for (j = 0; j < rcache->depot_size; ++j) {
+			iova_magazine_free_pfns(rcache->depot[j], iovad);
+			iova_magazine_free(rcache->depot[j]);
+			rcache->depot[j] = NULL;
+		}
+		rcache->depot_size = 0;
+		spin_unlock_irqrestore(&rcache->lock, flags);
+	}
+}
+
 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index 2e50c4f..967e16f6 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -1485,60 +1485,53 @@ int rtc6226_power_up(struct rtc6226_device *radio)
  **************************************************************************/
 
 /*
- * rtc6226_fops_read - read RDS data
+ * rtc6226_fops_read - read event data
  */
-static ssize_t rtc6226_fops_read(struct file *file, char __user *buf,
+static ssize_t rtc6226_fops_read(struct file *file, char __user *buffer,
 		size_t count, loff_t *ppos)
 {
-	struct rtc6226_device *radio = video_drvdata(file);
-	int retval = 0;
-	unsigned int block_count = 0;
+	struct rtc6226_device *radio = video_get_drvdata(video_devdata(file));
+	enum rtc6226_buf_t buf_type = -1;
+	u8 buf_fifo[STD_BUF_SIZE] = {0};
+	struct kfifo *data_fifo = NULL;
+	int len = 0, retval = -1;
+	u32 bytesused = 0;
 
-	/* switch on rds reception */
-	mutex_lock(&radio->lock);
-	/* if RDS is not on, then turn on RDS */
-	if ((radio->registers[SYSCFG] & SYSCFG_CSR0_RDS_EN) == 0)
-		rtc6226_rds_on(radio);
-
-	/* block if no new data available */
-	while (radio->wr_index == radio->rd_index) {
-		if (file->f_flags & O_NONBLOCK) {
-			retval = -EWOULDBLOCK;
-			goto done;
-		}
-		if (wait_event_interruptible(radio->read_queue,
-				radio->wr_index != radio->rd_index) < 0) {
-			retval = -EINTR;
-			goto done;
-		}
+	if ((radio == NULL) || (buffer == NULL)) {
+		FMDERR("%s radio/buffer is NULL\n", __func__);
+		return -ENXIO;
 	}
 
-	/* calculate block count from byte count */
-	count /= 3;
-	FMDBG("%s : count = %zu\n", __func__, count);
+	buf_type = count;
+	len = STD_BUF_SIZE;
+	FMDBG("%s: requesting buffer %d\n", __func__, buf_type);
 
-	/* copy RDS block out of internal buffer and to user buffer */
-	while (block_count < count) {
-		if (radio->rd_index == radio->wr_index)
-			break;
-		/* always transfer rds complete blocks */
-		if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
-			/* retval = -EFAULT; */
-			break;
-		/* increment and wrap read pointer */
-		radio->rd_index += 3;
-		if (radio->rd_index >= radio->buf_size)
-			radio->rd_index = 0;
-		/* increment counters */
-		block_count++;
-		buf += 3;
-		retval += 3;
-		FMDBG("%s : block_count = %d, count = %zu\n", __func__,
-			block_count, count);
+	if ((buf_type < RTC6226_FM_BUF_MAX) && (buf_type >= 0)) {
+		data_fifo = &radio->data_buf[buf_type];
+		if (buf_type == RTC6226_FM_BUF_EVENTS) {
+			if (wait_event_interruptible(radio->event_queue,
+						kfifo_len(data_fifo)) < 0) {
+				return -EINTR;
+			}
+		}
+	} else {
+		FMDERR("%s invalid buffer type\n", __func__);
+		return -EINVAL;
 	}
-
-done:
-	mutex_unlock(&radio->lock);
+	if (len <= STD_BUF_SIZE) {
+		bytesused = kfifo_out_locked(data_fifo, &buf_fifo[0],
+				len, &radio->buf_lock[buf_type]);
+	} else {
+		FMDERR("%s kfifo_out_locked can not use len more than 128\n",
+			__func__);
+		return -EINVAL;
+	}
+	retval = copy_to_user(buffer, &buf_fifo[0], bytesused);
+	if (retval > 0) {
+		FMDERR("%s Failed to copy %d bytes data\n", __func__, retval);
+		return -EAGAIN;
+	}
+	retval = bytesused;
 	return retval;
 }
 
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 72d286c..274a711 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -162,7 +162,8 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
 			ctx->cd_gpio_isr = mmc_gpio_cd_irqt;
 		ret = devm_request_threaded_irq(host->parent, irq,
 			NULL, ctx->cd_gpio_isr,
-			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_ONESHOT | IRQF_SHARED,
 			ctx->cd_label, host);
 		if (ret < 0)
 			irq = ret;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9459b77..ff7f9b2 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -892,12 +892,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
 	if (!data && !cmd->busy_timeout)
 		return 0xE;
 
-	/* During initialization, don't use max timeout as the clock is slow */
-	if ((host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT) &&
-		(host->clock > 400000)) {
-		return 0xF;
-	}
-
 	/* timeout in us */
 	target_timeout = sdhci_target_timeout(host, cmd, data);
 
@@ -927,9 +921,9 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
 			break;
 	}
 
-	if (count >= 0xF) {
-		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) ||
-		    !(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT))
+	if (count >= 0xF &&
+		!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
 			DBG("Too large timeout 0x%x requested for CMD%d!\n",
 			    count, cmd->opcode);
 		count = 0xE;
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 9002866..489a2d0 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -904,17 +904,21 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
 out:
 	return ret;
 }
-#else
-int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
-{
-	return 0;
-}
-#endif
 
 void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
 {
 	debugfs_remove_recursive(plat_priv->root_dentry);
 }
+#else
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+}
+#endif
 
 int cnss_debug_init(void)
 {
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 6757ea9..848f2ab 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -178,6 +178,7 @@ int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
 	case CNSS_BUS_WIDTH_MEDIUM:
 	case CNSS_BUS_WIDTH_HIGH:
 	case CNSS_BUS_WIDTH_VERY_HIGH:
+	case CNSS_BUS_WIDTH_LOW_LATENCY:
 		ret = msm_bus_scale_client_update_request
 			(bus_bw_info->bus_client, bandwidth);
 		if (!ret)
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index e7dc9ff..46cc594c 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -150,6 +150,18 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
 	return ret;
 }
 
+#ifdef CONFIG_CNSS2_DEBUG
+static inline u32 cnss_get_host_build_type(void)
+{
+	return QMI_HOST_BUILD_TYPE_PRIMARY_V01;
+}
+#else
+static inline u32 cnss_get_host_build_type(void)
+{
+	return QMI_HOST_BUILD_TYPE_SECONDARY_V01;
+}
+#endif
+
 static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
 {
 	struct wlfw_host_cap_req_msg_v01 *req;
@@ -209,6 +221,9 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
 			    req->ddr_range[0].start, req->ddr_range[0].size);
 	}
 
+	req->host_build_type_valid = 1;
+	req->host_build_type = cnss_get_host_build_type();
+
 	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
 			   wlfw_host_cap_resp_msg_v01_ei, resp);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c
index 1899d4a..09c7e8d 100644
--- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c
@@ -464,8 +464,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
 		.elem_size      = sizeof(u8),
 		.array_type       = NO_ARRAY,
 		.tlv_type       = 0x10,
-		.offset         = offsetof(struct
-					   wlfw_ind_register_req_msg_v01,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
 					   fw_ready_enable_valid),
 	},
 	{
@@ -766,6 +765,24 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
 					   m3_dump_upload_req_enable),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x21,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   wfc_call_twt_config_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x21,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   wfc_call_twt_config_enable),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -2713,6 +2730,24 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
 		.ei_array      = wlfw_host_ddr_range_s_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x20,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   host_build_type_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_host_build_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x20,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   host_build_type),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -3530,6 +3565,26 @@ struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[] = {
 					   option),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_qdss_trace_mode_req_msg_v01,
+					   hw_trc_disable_override_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_qmi_param_value_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_qdss_trace_mode_req_msg_v01,
+					   hw_trc_disable_override),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -3754,6 +3809,126 @@ struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[] = {
 					   wfc_call_status),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   wfc_call_active_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   wfc_call_active),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   all_wfc_calls_held_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   all_wfc_calls_held),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   is_wfc_emergency_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   is_wfc_emergency),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   twt_ims_start_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   twt_ims_start),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   twt_ims_int_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   twt_ims_int),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   media_quality_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_wfc_media_quality_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct
+					   wlfw_wfc_call_status_req_msg_v01,
+					   media_quality),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -4138,3 +4313,131 @@ struct qmi_elem_info wlfw_exit_power_save_resp_msg_v01_ei[] = {
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
+
+struct qmi_elem_info wlfw_wfc_call_twt_config_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_start_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_start),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_int_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_int),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_upo_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_upo),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_sp_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_sp),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_dl_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_dl),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_config_changed_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         =
+		offsetof(struct wlfw_wfc_call_twt_config_ind_msg_v01,
+			 twt_sta_config_changed),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h
index e105cd4..4c2e5d8 100644
--- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h
@@ -84,6 +84,7 @@
 #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
 #define QMI_WLFW_EXIT_POWER_SAVE_RESP_V01 0x0050
 #define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01 0x0051
 #define QMI_WLFW_VBATT_RESP_V01 0x0032
 #define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
 #define QMI_WLFW_QDSS_TRACE_FREE_IND_V01 0x0046
@@ -153,6 +154,7 @@ enum wlfw_mem_type_enum_v01 {
 	QMI_WLFW_MEM_CAL_V01 = 4,
 	QMI_WLFW_MEM_DPD_V01 = 5,
 	QMI_WLFW_MEM_QDSS_V01 = 6,
+	QMI_WLFW_MEM_HANG_DATA_V01 = 7,
 	WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
@@ -163,6 +165,15 @@ enum wlfw_qdss_trace_mode_enum_v01 {
 	WLFW_QDSS_TRACE_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
+enum wlfw_wfc_media_quality_v01 {
+	WLFW_WFC_MEDIA_QUALITY_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_WFC_MEDIA_QUAL_NOT_AVAILABLE_V01 = 0,
+	QMI_WLFW_WFC_MEDIA_QUAL_BAD_V01 = 1,
+	QMI_WLFW_WFC_MEDIA_QUAL_GOOD_V01 = 2,
+	QMI_WLFW_WFC_MEDIA_QUAL_EXCELLENT_V01 = 3,
+	WLFW_WFC_MEDIA_QUALITY_MAX_VAL_V01 = INT_MAX,
+};
+
 enum wlfw_soc_wake_enum_v01 {
 	WLFW_SOC_WAKE_ENUM_MIN_VAL_V01 = INT_MIN,
 	QMI_WLFW_WAKE_REQUEST_V01 = 0,
@@ -170,6 +181,22 @@ enum wlfw_soc_wake_enum_v01 {
 	WLFW_SOC_WAKE_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
+enum wlfw_host_build_type_v01 {
+	WLFW_HOST_BUILD_TYPE_MIN_VAL_V01 = INT_MIN,
+	QMI_HOST_BUILD_TYPE_UNSPECIFIED_V01 = 0,
+	QMI_HOST_BUILD_TYPE_PRIMARY_V01 = 1,
+	QMI_HOST_BUILD_TYPE_SECONDARY_V01 = 2,
+	WLFW_HOST_BUILD_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_qmi_param_value_v01 {
+	WLFW_QMI_PARAM_VALUE_MIN_VAL_V01 = INT_MIN,
+	QMI_PARAM_INVALID_V01 = 0,
+	QMI_PARAM_ENABLE_V01 = 1,
+	QMI_PARAM_DISABLE_V01 = 2,
+	WLFW_QMI_PARAM_VALUE_MAX_VAL_V01 = INT_MAX,
+};
+
 #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
 #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
 #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
@@ -302,9 +329,11 @@ struct wlfw_ind_register_req_msg_v01 {
 	u8 respond_get_info_enable;
 	u8 m3_dump_upload_req_enable_valid;
 	u8 m3_dump_upload_req_enable;
+	u8 wfc_call_twt_config_enable_valid;
+	u8 wfc_call_twt_config_enable;
 };
 
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 74
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 78
 extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
 
 struct wlfw_ind_register_resp_msg_v01 {
@@ -698,9 +727,11 @@ struct wlfw_host_cap_req_msg_v01 {
 	u8 ddr_range_valid;
 	struct wlfw_host_ddr_range_s_v01
 		ddr_range[QMI_WLFW_MAX_HOST_DDR_RANGE_SIZE_V01];
+	u8 host_build_type_valid;
+	enum wlfw_host_build_type_v01 host_build_type;
 };
 
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 312
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 319
 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
 
 struct wlfw_host_cap_resp_msg_v01 {
@@ -909,9 +940,11 @@ struct wlfw_qdss_trace_mode_req_msg_v01 {
 	enum wlfw_qdss_trace_mode_enum_v01 mode;
 	u8 option_valid;
 	u64 option;
+	u8 hw_trc_disable_override_valid;
+	enum wlfw_qmi_param_value_v01 hw_trc_disable_override;
 };
 
-#define WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN 18
+#define WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN 25
 extern struct qmi_elem_info wlfw_qdss_trace_mode_req_msg_v01_ei[];
 
 struct wlfw_qdss_trace_mode_resp_msg_v01 {
@@ -979,9 +1012,21 @@ extern struct qmi_elem_info wlfw_antenna_grant_resp_msg_v01_ei[];
 struct wlfw_wfc_call_status_req_msg_v01 {
 	u32 wfc_call_status_len;
 	u8 wfc_call_status[QMI_WLFW_MAX_WFC_CALL_STATUS_DATA_SIZE_V01];
+	u8 wfc_call_active_valid;
+	u8 wfc_call_active;
+	u8 all_wfc_calls_held_valid;
+	u8 all_wfc_calls_held;
+	u8 is_wfc_emergency_valid;
+	u8 is_wfc_emergency;
+	u8 twt_ims_start_valid;
+	u64 twt_ims_start;
+	u8 twt_ims_int_valid;
+	u16 twt_ims_int;
+	u8 media_quality_valid;
+	enum wlfw_wfc_media_quality_v01 media_quality;
 };
 
-#define WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN 261
+#define WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN 296
 extern struct qmi_elem_info wlfw_wfc_call_status_req_msg_v01_ei[];
 
 struct wlfw_wfc_call_status_resp_msg_v01 {
@@ -1092,4 +1137,22 @@ struct wlfw_exit_power_save_resp_msg_v01 {
 #define WLFW_EXIT_POWER_SAVE_RESP_MSG_V01_MAX_MSG_LEN 7
 extern struct qmi_elem_info wlfw_exit_power_save_resp_msg_v01_ei[];
 
+struct wlfw_wfc_call_twt_config_ind_msg_v01 {
+	u8 twt_sta_start_valid;
+	u64 twt_sta_start;
+	u8 twt_sta_int_valid;
+	u16 twt_sta_int;
+	u8 twt_sta_upo_valid;
+	u16 twt_sta_upo;
+	u8 twt_sta_sp_valid;
+	u16 twt_sta_sp;
+	u8 twt_sta_dl_valid;
+	u16 twt_sta_dl;
+	u8 twt_sta_config_changed_valid;
+	u8 twt_sta_config_changed;
+};
+
+#define WLFW_WFC_CALL_TWT_CONFIG_IND_MSG_V01_MAX_MSG_LEN 35
+extern struct qmi_elem_info wlfw_wfc_call_twt_config_ind_msg_v01_ei[];
+
 #endif
diff --git a/drivers/pinctrl/qcom/pinctrl-lagoon.c b/drivers/pinctrl/qcom/pinctrl-lagoon.c
index d133a1a5..02f2403 100644
--- a/drivers/pinctrl/qcom/pinctrl-lagoon.c
+++ b/drivers/pinctrl/qcom/pinctrl-lagoon.c
@@ -1614,6 +1614,10 @@ static const struct msm_pingroup lagoon_groups[] = {
 	[163] = UFS_RESET(ufs_reset, 0x1ae000),
 };
 
+static const int lagoon_reserved_gpios[] = {
+	13, 14, 15, 16, 45, 46, 56, 57, -1
+};
+
 static const struct msm_pinctrl_soc_data lagoon_pinctrl = {
 	.pins = lagoon_pins,
 	.npins = ARRAY_SIZE(lagoon_pins),
@@ -1621,6 +1625,7 @@ static const struct msm_pinctrl_soc_data lagoon_pinctrl = {
 	.nfunctions = ARRAY_SIZE(lagoon_functions),
 	.groups = lagoon_groups,
 	.ngroups = ARRAY_SIZE(lagoon_groups),
+	.reserved_gpios = lagoon_reserved_gpios,
 	.ngpios = 156,
 };
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 0a8ca53..80a3709 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -988,8 +988,23 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip,
 	int ret;
 	unsigned int len, i;
 	unsigned int max_gpios = pctrl->soc->ngpios;
+	const int *reserved = pctrl->soc->reserved_gpios;
 	u16 *tmp;
 
+	/* Driver provided reserved list overrides */
+	if (reserved) {
+		bitmap_fill(chip->valid_mask, max_gpios);
+		for (i = 0; reserved[i] >= 0; i++) {
+			if (i >= max_gpios || reserved[i] >= max_gpios) {
+				dev_err(pctrl->dev, "invalid list of reserved"
+						"GPIOs\n");
+				return -EINVAL;
+			}
+			clear_bit(reserved[i], chip->valid_mask);
+		}
+		return 0;
+	}
+
 	/* The number of GPIOs in the ACPI tables */
 	len = ret = device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0);
 	if (ret < 0)
@@ -1019,6 +1034,9 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip,
 
 static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
 {
+	if (pctrl->soc->reserved_gpios)
+		return true;
+
 	return device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0) > 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 55e73e7..20005f6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -138,6 +138,7 @@ struct msm_pinctrl_soc_data {
 	bool pull_no_keeper;
 	struct pinctrl_qup *qup_regs;
 	unsigned int nqup_regs;
+	const int *reserved_gpios;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 78ec187b..071920a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -58,8 +58,8 @@
 
 #define IPA_SUSPEND_BUSY_TIMEOUT (msecs_to_jiffies(10))
 
-#define DEFAULT_MPM_RING_SIZE_UL 6
-#define DEFAULT_MPM_RING_SIZE_DL 16
+#define DEFAULT_MPM_RING_SIZE_UL 64
+#define DEFAULT_MPM_RING_SIZE_DL 64
 #define DEFAULT_MPM_TETH_AGGR_SIZE 24
 #define DEFAULT_MPM_UC_THRESH_SIZE 4
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 871b3d9..06103be 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1466,6 +1466,9 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
+	/* Set the disconnect in progress flag to avoid calling cb.*/
+	atomic_set(&ep->disconnect_in_progress, 1);
+
 	gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
 	if (gsi_res != GSI_STATUS_SUCCESS) {
 		IPAERR("Error deallocating channel: %d\n", gsi_res);
@@ -1792,9 +1795,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
 	/* Set disconnect in progress flag so further flow control events are
 	 * not honored.
 	 */
-	spin_lock(&ipa3_ctx->disconnect_lock);
-	ep->disconnect_in_progress = true;
-	spin_unlock(&ipa3_ctx->disconnect_lock);
+	atomic_set(&ep->disconnect_in_progress, 1);
 
 	/* If flow is disabled at this point, restore the ep state.*/
 	ep_ctrl.ipa_ep_delay = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index bcf58a7..d5017fb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1502,9 +1502,9 @@ static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
 #define TX_STATS(y) \
-	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+	stats.tx_ch_stats[0].y
 #define RX_STATS(y) \
-	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+	stats.rx_ch_stats[0].y
 
 	struct Ipa3HwStatsNTNInfoData_t stats;
 	int nbytes;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 25cf988..41be7c4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3265,11 +3265,8 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 	metadata = status.metadata;
 	ucp = status.ucp;
 	ep = &ipa3_ctx->ep[src_pipe];
-	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
-		!ep->valid ||
-		!ep->client_notify)) {
-		IPAERR_RL("drop pipe=%d ep_valid=%d client_notify=%pK\n",
-		  src_pipe, ep->valid, ep->client_notify);
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
+		IPAERR_RL("drop pipe=%d\n", src_pipe);
 		dev_kfree_skb_any(rx_skb);
 		return;
 	}
@@ -3291,7 +3288,12 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 			metadata, *(u32 *)rx_skb->cb);
 	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
-	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+	if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
+				ep->valid && ep->client_notify))
+		ep->client_notify(ep->priv, IPA_RECEIVE,
+				(unsigned long)(rx_skb));
+	else
+		dev_kfree_skb_any(rx_skb);
 }
 
 static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index dbb6684..4db044f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -935,7 +935,7 @@ struct ipa3_ep_context {
 	struct ipa3_wlan_stats wstats;
 	u32 uc_offload_state;
 	u32 gsi_offload_state;
-	bool disconnect_in_progress;
+	atomic_t disconnect_in_progress;
 	u32 qmi_request_sent;
 	u32 eot_in_poll_err;
 	bool ep_delay_set;
@@ -3230,5 +3230,5 @@ int ipa3_uc_send_enable_flow_control(uint16_t gsi_chid,
 int ipa3_uc_send_disable_flow_control(void);
 int ipa3_uc_send_update_flow_control(uint32_t bitmask,
 	uint8_t  add_delete);
-int ipa3_qmi_reg_dereg_for_bw(bool bw_reg);
+int ipa3_qmi_reg_dereg_for_bw(bool bw_reg, int bw_reg_dereg_type);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index f2cdd9b..c20fb9e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -72,6 +72,15 @@
 #define IPA_MPM_FLOW_CTRL_DELETE 0
 #define IPA_MPM_NUM_OF_INIT_CMD_DESC 2
 #define IPA_UC_FC_DB_ADDR 0x1EC2088
+#define IPA_MAX_BW_REG_DEREG_CACHE 20
+
+enum bw_reg_dereg_type {
+	BW_VOTE_WAN_NOTIFY = 0,
+	BW_UNVOTE_WAN_NOTIFY = 1,
+	BW_VOTE_PROBE_CB = 2,
+	BW_VOTE_XDCI_ENABLE = 3,
+	BW_UNVOTE_XDCI_DISABLE = 4,
+};
 
 enum mhip_re_type {
 	MHIP_RE_XFER = 0x2,
@@ -387,6 +396,11 @@ struct ipa_mpm_mhi_driver {
 	enum ipa_mpm_remote_state remote_state;
 };
 
+struct bw_cache {
+	int bw_reg_dereg_type;
+	int ref_count;
+};
+
 struct ipa_mpm_context {
 	struct ipa_mpm_dev_info dev_info;
 	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
@@ -399,6 +413,8 @@ struct ipa_mpm_context {
 	atomic_t adpl_over_odl_available;
 	atomic_t active_teth_count;
 	atomic_t voted_before;
+	struct bw_cache bw_reg_dereg_cache[IPA_MAX_BW_REG_DEREG_CACHE];
+	int cache_index;
 	struct device *parent_pdev;
 	struct ipa_smmu_cb_ctx carved_smmu_cb;
 	struct device *mhi_parent_dev;
@@ -716,7 +732,6 @@ static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
 
 	if (carved_iova <= 0) {
 		IPA_MPM_ERR("carved_iova is zero/negative\n");
-		WARN_ON(1);
 		return;
 	}
 
@@ -1554,9 +1569,8 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
 		if ((atomic_read(
 			&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
 								== 0)) {
-			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
+			IPA_MPM_ERR("probe_id %d PCIE clock already devoted\n",
 				probe_id);
-			WARN_ON(1);
 			*is_acted = true;
 			return 0;
 		}
@@ -1592,9 +1606,8 @@ static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
 		if ((atomic_read
 			(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
 								== 0)) {
-			IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
+			IPA_MPM_ERR("probe_id %d IPA clock count < 0\n",
 				probe_id);
-			WARN_ON(1);
 			return;
 		}
 		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
@@ -1940,7 +1953,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 			ipa_mpm_ctx->md[probe_id].teth_state =
 						IPA_MPM_TETH_CONNECTED;
 			/* Register for BW indication from Q6 */
-			if (!ipa3_qmi_reg_dereg_for_bw(true))
+			if (!ipa3_qmi_reg_dereg_for_bw(true,
+				BW_VOTE_WAN_NOTIFY))
 				IPA_MPM_ERR(
 					"Failed rgstring for QMIBW Ind, might be SSR");
 			break;
@@ -1994,7 +2008,8 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 
 		/* De-register for BW indication from Q6*/
 		if (atomic_read(&ipa_mpm_ctx->active_teth_count) >= 1) {
-			if (!ipa3_qmi_reg_dereg_for_bw(false))
+			if (!ipa3_qmi_reg_dereg_for_bw(false,
+				BW_UNVOTE_WAN_NOTIFY))
 				IPA_MPM_DBG(
 					"Failed De-rgstrng QMI BW Indctn,might be SSR");
 		} else {
@@ -2347,7 +2362,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
 	if (ret) {
 		IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
-		WARN_ON(1);
 		/*
 		 * WA to handle prepare_for_tx failures.
 		 * Though prepare for transfer fails, indicate success
@@ -2550,9 +2564,12 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 			pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
 			ipa3_xdci_ep_delay_rm(pipe_idx);
 			/* Register for BW indication from Q6*/
-			if (!ipa3_qmi_reg_dereg_for_bw(true))
-				IPA_MPM_DBG(
-					"QMI BW reg Req failed,might be SSR");
+			if (ipa_mpm_ctx->md[probe_id].teth_state ==
+				IPA_MPM_TETH_CONNECTED)
+				if (!ipa3_qmi_reg_dereg_for_bw(true,
+					BW_VOTE_PROBE_CB))
+					IPA_MPM_DBG(
+						"QMI BW reg Req failed,might be SSR");
 		}
 		break;
 	default:
@@ -2910,7 +2927,7 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 	case MHIP_STATUS_NO_OP:
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
 		/* Register for BW indication from Q6*/
-		if (!ipa3_qmi_reg_dereg_for_bw(true))
+		if (!ipa3_qmi_reg_dereg_for_bw(true, BW_VOTE_XDCI_ENABLE))
 			IPA_MPM_DBG("Fail regst QMI BW Indctn,might be SSR");
 
 		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
@@ -3055,7 +3072,8 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
 		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
 		/* De-register for BW indication from Q6*/
 		if (atomic_read(&ipa_mpm_ctx->active_teth_count) >= 1) {
-			if (!ipa3_qmi_reg_dereg_for_bw(false))
+			if (!ipa3_qmi_reg_dereg_for_bw(false,
+				BW_UNVOTE_XDCI_DISABLE))
 				IPA_MPM_DBG(
 					"Failed De-rgstrng QMI BW Indctn,might be SSR");
 		} else {
@@ -3186,6 +3204,8 @@ static int ipa_mpm_probe(struct platform_device *pdev)
 		mutex_init(&ipa_mpm_ctx->md[i].mutex);
 		mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
 	}
+	mutex_init(&ipa_mpm_ctx->mutex);
+	ipa_mpm_ctx->cache_index = 0;
 
 	ipa_mpm_ctx->dev_info.pdev = pdev;
 	ipa_mpm_ctx->dev_info.dev = &pdev->dev;
@@ -3484,10 +3504,21 @@ int ipa3_mpm_enable_adpl_over_odl(bool enable)
 	return ret;
 }
 
-int ipa3_qmi_reg_dereg_for_bw(bool bw_reg)
+int ipa3_qmi_reg_dereg_for_bw(bool bw_reg, int bw_reg_dereg_type)
 {
 	int rt;
 
+	mutex_lock(&ipa_mpm_ctx->mutex);
+	ipa_mpm_ctx->bw_reg_dereg_cache[
+		ipa_mpm_ctx->cache_index].bw_reg_dereg_type =
+		bw_reg_dereg_type;
+	ipa_mpm_ctx->bw_reg_dereg_cache[
+		ipa_mpm_ctx->cache_index].ref_count =
+		atomic_read(&ipa_mpm_ctx->active_teth_count);
+	ipa_mpm_ctx->cache_index =
+		(ipa_mpm_ctx->cache_index + 1) % IPA_MAX_BW_REG_DEREG_CACHE;
+	mutex_unlock(&ipa_mpm_ctx->mutex);
+
 	if (bw_reg) {
 		atomic_inc(&ipa_mpm_ctx->active_teth_count);
 		if (atomic_read(&ipa_mpm_ctx->active_teth_count) == 1) {
@@ -3502,7 +3533,10 @@ int ipa3_qmi_reg_dereg_for_bw(bool bw_reg)
 				atomic_set(&ipa_mpm_ctx->voted_before, 0);
 				return false;
 			}
-			IPA_MPM_DBG("QMI BW regst success");
+			IPA_MPM_DBG("QMI BW regst success from %d",
+				ipa_mpm_ctx->bw_reg_dereg_cache[
+					ipa_mpm_ctx->cache_index -
+					1].bw_reg_dereg_type);
 		} else {
 			IPA_MPM_DBG("bw_change to %d no-op, teth_count = %d",
 				bw_reg,
@@ -3521,7 +3555,10 @@ int ipa3_qmi_reg_dereg_for_bw(bool bw_reg)
 				IPA_MPM_ERR("QMI BW de-regst fail, rt= %d", rt);
 				return false;
 			}
-			IPA_MPM_DBG("QMI BW De-regst success");
+			IPA_MPM_DBG("QMI BW De-regst success %d",
+				ipa_mpm_ctx->bw_reg_dereg_cache[
+					ipa_mpm_ctx->cache_index -
+					1].bw_reg_dereg_type);
 		} else {
 			IPA_MPM_DBG("bw_change to %d no-op, teth_count = %d",
 				bw_reg,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index b8616b8..461f792 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -550,6 +550,9 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
 		return -EFAULT;
 	}
 
+	atomic_set(&ep_ul->disconnect_in_progress, 1);
+	atomic_set(&ep_dl->disconnect_in_progress, 1);
+
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 		cmd.size = sizeof(*cmd_data_v4_0);
 	else
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 2854c1e..69bd9df 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -2195,6 +2195,7 @@ int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
 	struct ipa3_ep_context *ep;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	int ipa_ep_idx;
+	struct ipa_ep_cfg_holb holb_cfg;
 
 	IPADBG("ep=%d\n", clnt_hdl);
 
@@ -2215,6 +2216,18 @@ int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5)
+			holb_cfg.tmr_val = IPA_HOLB_TMR_VAL;
+		else
+			holb_cfg.tmr_val = IPA_HOLB_TMR_VAL_4_5;
+
+		result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	ep->gsi_offload_state |= IPA_WDI_ENABLED;
 	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
diff --git a/drivers/power/supply/qcom/qpnp-smblite.c b/drivers/power/supply/qcom/qpnp-smblite.c
index f0d5ece..0d81fa1 100644
--- a/drivers/power/supply/qcom/qpnp-smblite.c
+++ b/drivers/power/supply/qcom/qpnp-smblite.c
@@ -992,7 +992,8 @@ static int smblite_configure_typec(struct smb_charger *chg)
 	}
 
 	rc = smblite_lib_masked_write(chg, TYPE_C_MODE_CFG_REG,
-					EN_SNK_ONLY_BIT, 0);
+					EN_TRY_SNK_BIT | EN_SNK_ONLY_BIT,
+					EN_TRY_SNK_BIT);
 	if (rc < 0) {
 		dev_err(chg->dev,
 			"Couldn't configure TYPE_C_MODE_CFG_REG rc=%d\n",
diff --git a/drivers/power/supply/qcom/smb1398-charger.c b/drivers/power/supply/qcom/smb1398-charger.c
index 2c2e339..8b2ce9c 100644
--- a/drivers/power/supply/qcom/smb1398-charger.c
+++ b/drivers/power/supply/qcom/smb1398-charger.c
@@ -19,6 +19,8 @@
 #include <linux/iio/consumer.h>
 
 /* Status register definition */
+#define PERPH0_REVISION4		0x2603
+
 #define INPUT_STATUS_REG		0x2609
 #define INPUT_USB_IN			BIT(1)
 #define INPUT_WLS_IN			BIT(0)
@@ -173,6 +175,10 @@
 #define PERPH0_CFG_SDCDC_REG		0x267A
 #define EN_WIN_UV_BIT			BIT(7)
 
+#define PERPH0_SSUPPLY_CFG0_REG		0x2682
+#define EN_HV_OV_OPTION2_BIT		BIT(7)
+#define EN_MV_OV_OPTION2_BIT		BIT(5)
+
 #define SSUPLY_TEMP_CTRL_REG		0x2683
 #define SEL_OUT_TEMP_MAX_MASK		GENMASK(7, 5)
 #define SEL_OUT_TEMP_MAX_SHFT		5
@@ -187,6 +193,10 @@
 #define DIV2_ILIM_STS			BIT(5)
 #define DIV2_CFLY_SS_DONE_STS		BIT(1)
 
+#define PERPH1_LOCK_SPARE_REG		0x27C3
+#define CFG_LOCK_SPARE1_MASK		GENMASK(7, 6)
+#define CFG_LOCK_SPARE1_SHIFT		6
+
 /* available voters */
 #define ILIM_VOTER			"ILIM_VOTER"
 #define TAPER_VOTER			"TAPER_VOTER"
@@ -228,6 +238,13 @@ enum isns_mode {
 	ISNS_MODE_STANDBY,
 };
 
+enum ovp {
+	OVP_17P7V = 0,
+	OVP_14V,
+	OVP_22P2V,
+	OVP_7P3,
+};
+
 enum {
 	/* Perph0 IRQs */
 	CFLY_HARD_FAULT_LATCH_IRQ,
@@ -917,6 +934,24 @@ static int div2_cp_master_get_prop_suspended(struct smb1398_chip *chip,
 	return 0;
 }
 
+#define DEFAULT_HVDCP3_MIN_ICL_UA 1000000
+static int smb1398_div2_cp_get_min_icl(struct smb1398_chip *chip)
+{
+	union power_supply_propval pval;
+	int rc;
+
+	/* Use max(dt_min_icl, 1A) for HVDCP3 */
+	if (chip->usb_psy) {
+		rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_REAL_TYPE, &pval);
+		if (rc >= 0 && (pval.intval == POWER_SUPPLY_TYPE_USB_HVDCP_3))
+			return max(chip->div2_cp_min_ilim_ua,
+				DEFAULT_HVDCP3_MIN_ICL_UA);
+	}
+
+	return chip->div2_cp_min_ilim_ua;
+}
+
 static int div2_cp_master_get_prop(struct power_supply *psy,
 				enum power_supply_property prop,
 				union power_supply_propval *val)
@@ -1017,7 +1052,7 @@ static int div2_cp_master_get_prop(struct power_supply *psy,
 		val->intval = chip->pl_output_mode;
 		break;
 	case POWER_SUPPLY_PROP_MIN_ICL:
-		val->intval = chip->div2_cp_min_ilim_ua;
+		val->intval = smb1398_div2_cp_get_min_icl(chip);
 		break;
 	default:
 		rc = -EINVAL;
@@ -1316,7 +1351,7 @@ static int smb1398_div2_cp_ilim_vote_cb(struct votable *votable,
 {
 	struct smb1398_chip *chip = (struct smb1398_chip *)data;
 	union power_supply_propval pval = {0};
-	int rc = 0, max_ilim_ua;
+	int rc = 0, max_ilim_ua, min_ilim_ua;
 	bool slave_dis, split_ilim = false;
 
 	if (!is_psy_voter_available(chip) || chip->in_suspend)
@@ -1325,19 +1360,21 @@ static int smb1398_div2_cp_ilim_vote_cb(struct votable *votable,
 	if (!client)
 		return -EINVAL;
 
+	min_ilim_ua = smb1398_div2_cp_get_min_icl(chip);
+
 	ilim_ua = (ilim_ua * DIV2_ILIM_CFG_PCT) / 100;
 
 	max_ilim_ua = is_cps_available(chip) ?
 		DIV2_MAX_ILIM_DUAL_CP_UA : DIV2_MAX_ILIM_UA;
 	ilim_ua = min(ilim_ua, max_ilim_ua);
-	if (ilim_ua < chip->div2_cp_min_ilim_ua) {
+	if (ilim_ua < min_ilim_ua) {
 		dev_dbg(chip->dev, "ilim %duA is too low to config CP charging\n",
 				ilim_ua);
 		vote(chip->div2_cp_disable_votable, ILIM_VOTER, true, 0);
 	} else {
 		if (is_cps_available(chip)) {
 			split_ilim = true;
-			slave_dis = ilim_ua < (2 * chip->div2_cp_min_ilim_ua);
+			slave_dis = ilim_ua < (2 * min_ilim_ua);
 			vote(chip->div2_cp_slave_disable_votable, ILIM_VOTER,
 					slave_dis, 0);
 			slave_dis = !!get_effective_result(
@@ -1508,7 +1545,7 @@ static int smb1398_get_irq_index_byname(const char *irq_name)
 {
 	int i;
 
-	for (i = 0; i < NUM_IRQS; i++) {
+	for (i = 0; i < ARRAY_SIZE(smb_irqs); i++) {
 		if (smb_irqs[i].name != NULL)
 			if (strcmp(smb_irqs[i].name, irq_name) == 0)
 				return i;
@@ -1764,7 +1801,7 @@ static void smb1398_taper_work(struct work_struct *work)
 	struct smb1398_chip *chip = container_of(work,
 			struct smb1398_chip, taper_work);
 	union power_supply_propval pval = {0};
-	int rc, fcc_ua, fv_uv, stepper_ua, main_fcc_ua;
+	int rc, fcc_ua, fv_uv, stepper_ua, main_fcc_ua = 0, min_ilim_ua;
 	bool slave_en;
 
 	if (!is_psy_voter_available(chip))
@@ -1776,6 +1813,8 @@ static void smb1398_taper_work(struct work_struct *work)
 	if (chip->fcc_main_votable)
 		main_fcc_ua = get_effective_result(chip->fcc_main_votable);
 
+	min_ilim_ua = smb1398_div2_cp_get_min_icl(chip);
+
 	chip->taper_entry_fv = get_effective_result(chip->fv_votable);
 	while (true) {
 		rc = power_supply_get_property(chip->batt_psy,
@@ -1811,7 +1850,7 @@ static void smb1398_taper_work(struct work_struct *work)
 			 * If total FCC is less than the minimum ILIM to
 			 * keep CP master and slave online, disable CP.
 			 */
-			if (fcc_ua < (chip->div2_cp_min_ilim_ua * 2)) {
+			if (fcc_ua < (min_ilim_ua * 2)) {
 				vote(chip->div2_cp_disable_votable,
 						TAPER_VOTER, true, 0);
 				/*
@@ -1860,10 +1899,53 @@ static void smb1398_taper_work(struct work_struct *work)
 	chip->taper_work_running = false;
 }
 
+static int smb1398_update_ovp(struct smb1398_chip *chip)
+{
+	int rc = 0;
+	u8 reg = 0;
+
+	rc = smb1398_read(chip, PERPH0_REVISION4, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't read PERPH0_REVISION4 rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Ignore for REV2 and below */
+	if (reg <= 2)
+		return 0;
+
+	rc = smb1398_masked_write(chip, PERPH0_SSUPPLY_CFG0_REG,
+			EN_HV_OV_OPTION2_BIT | EN_MV_OV_OPTION2_BIT,
+			EN_HV_OV_OPTION2_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set PERPH0_SSUPPLY_CFG0_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb1398_masked_write(chip, PERPH1_LOCK_SPARE_REG,
+				CFG_LOCK_SPARE1_MASK,
+				OVP_14V << CFG_LOCK_SPARE1_SHIFT);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set PERPH1_LOCK_SPARE_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int smb1398_div2_cp_hw_init(struct smb1398_chip *chip)
 {
 	int rc = 0;
 
+	rc = smb1398_update_ovp(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't update OVP threshold rc=%d\n", rc);
+		return rc;
+	}
+
 	/* Configure window (Vin/2 - Vout) OV level to 500mV */
 	rc = smb1398_masked_write(chip, DIV2_PROTECTION_REG,
 			DIV2_WIN_OV_SEL_MASK, WIN_OV_500_MV);
@@ -2184,6 +2266,12 @@ static int smb1398_div2_cp_slave_probe(struct smb1398_chip *chip)
 	int rc;
 	u8 status;
 
+	rc = smb1398_update_ovp(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't update OVP threshold rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smb1398_read(chip, MODE_STATUS_REG, &status);
 	if (rc < 0) {
 		dev_err(chip->dev, "Couldn't read slave MODE_STATUS_REG, rc=%d\n",
diff --git a/drivers/power/supply/qcom/smblite-lib.c b/drivers/power/supply/qcom/smblite-lib.c
index 5c127f0..06f5f0c 100644
--- a/drivers/power/supply/qcom/smblite-lib.c
+++ b/drivers/power/supply/qcom/smblite-lib.c
@@ -2072,14 +2072,116 @@ irqreturn_t smblite_icl_change_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static int smblite_lib_role_switch_failure(struct smb_charger *chg)
+{
+	int rc = 0;
+	union power_supply_propval pval = {0, };
+
+	rc = smblite_lib_get_prop_usb_present(chg, &pval);
+	if (rc < 0) {
+		smblite_lib_err(chg, "Couldn't get usb presence status rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	/*
+	 * When role switch fails notify the
+	 * current charger state to usb driver.
+	 */
+	if (pval.intval) {
+		smblite_lib_dbg(chg, PR_MISC, "Role reversal failed, notifying device mode to usb driver.\n");
+		smblite_lib_notify_device_mode(chg, true);
+	}
+
+	return rc;
+}
+
+static int typec_partner_register(struct smb_charger *chg)
+{
+	int typec_mode, rc = 0;
+
+	mutex_lock(&chg->typec_lock);
+
+	if (!chg->typec_port || chg->pr_swap_in_progress)
+		goto unlock;
+
+	if (!chg->typec_partner) {
+		if (chg->sink_src_mode == AUDIO_ACCESS_MODE)
+			chg->typec_partner_desc.accessory =
+					TYPEC_ACCESSORY_AUDIO;
+		else
+			chg->typec_partner_desc.accessory =
+					TYPEC_ACCESSORY_NONE;
+
+		chg->typec_partner = typec_register_partner(chg->typec_port,
+				&chg->typec_partner_desc);
+		if (IS_ERR(chg->typec_partner)) {
+			rc = PTR_ERR(chg->typec_partner);
+			pr_err("Couldn't to register typec_partner rc=%d\n",
+								rc);
+			goto unlock;
+		}
+	}
+
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+		goto unlock;
+
+	typec_mode = smblite_lib_get_prop_typec_mode(chg);
+
+	if (typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+			|| typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+		if (chg->typec_role_swap_failed) {
+			rc = smblite_lib_role_switch_failure(chg);
+			if (rc < 0)
+				smblite_lib_err(chg, "Failed to role switch rc=%d\n",
+								rc);
+			chg->typec_role_swap_failed = false;
+		}
+
+		typec_set_data_role(chg->typec_port, TYPEC_DEVICE);
+		typec_set_pwr_role(chg->typec_port, TYPEC_SINK);
+	} else {
+		typec_set_data_role(chg->typec_port, TYPEC_HOST);
+		typec_set_pwr_role(chg->typec_port, TYPEC_SOURCE);
+	}
+
+unlock:
+	mutex_unlock(&chg->typec_lock);
+	return rc;
+}
+
+static void typec_partner_unregister(struct smb_charger *chg)
+{
+	mutex_lock(&chg->typec_lock);
+
+	if (!chg->typec_port)
+		goto unlock;
+
+	if (chg->typec_partner && !chg->pr_swap_in_progress) {
+		smblite_lib_dbg(chg, PR_MISC, "Un-registering typeC partner\n");
+		typec_unregister_partner(chg->typec_partner);
+		chg->typec_partner = NULL;
+	}
+
+unlock:
+	mutex_unlock(&chg->typec_lock);
+}
+
 static void smblite_lib_micro_usb_plugin(struct smb_charger *chg,
 					bool vbus_rising)
 {
+	int rc = 0;
+
 	if (vbus_rising) {
 		smblite_lib_notify_device_mode(chg, true);
+		rc = typec_partner_register(chg);
+		if (rc < 0)
+			smblite_lib_err(chg, "Couldn't register partner rc =%d\n",
+					rc);
 	} else {
 		smblite_lib_notify_device_mode(chg, false);
 		smblite_lib_uusb_removal(chg);
+		typec_partner_unregister(chg);
 	}
 }
 
@@ -2263,52 +2365,6 @@ static void typec_ra_ra_insertion(struct smb_charger *chg)
 	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 }
 
-static int typec_partner_register(struct smb_charger *chg)
-{
-	int typec_mode, rc = 0;
-
-	if (!chg->typec_port)
-		return 0;
-
-	if (chg->typec_partner && chg->pr_swap_in_progress)
-		return 0;
-
-	if (chg->sink_src_mode == AUDIO_ACCESS_MODE)
-		chg->typec_partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
-	else
-		chg->typec_partner_desc.accessory = TYPEC_ACCESSORY_NONE;
-
-	chg->typec_partner = typec_register_partner(chg->typec_port,
-			&chg->typec_partner_desc);
-	if (IS_ERR(chg->typec_partner)) {
-		rc = PTR_ERR(chg->typec_partner);
-		pr_err("Couldn't to register typec_partner rc=%d\n", rc);
-		return rc;
-	}
-
-	typec_mode = smblite_lib_get_prop_typec_mode(chg);
-
-	if (typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
-			|| typec_mode == POWER_SUPPLY_TYPEC_NONE) {
-		typec_set_data_role(chg->typec_port, TYPEC_DEVICE);
-		typec_set_pwr_role(chg->typec_port, TYPEC_SINK);
-	} else {
-		typec_set_data_role(chg->typec_port, TYPEC_HOST);
-		typec_set_pwr_role(chg->typec_port, TYPEC_SOURCE);
-	}
-
-	return rc;
-}
-
-static void typec_partner_unregister(struct smb_charger *chg)
-{
-	if (chg->typec_partner && !chg->pr_swap_in_progress) {
-		smblite_lib_dbg(chg, PR_MISC, "Un-registering typeC partner\n");
-		typec_unregister_partner(chg->typec_partner);
-		chg->typec_partner = NULL;
-	}
-}
-
 static const char * const dr_mode_text[] = {
 	"ufp", "dfp", "none"
 };
@@ -2401,30 +2457,6 @@ int smblite_lib_typec_port_type_set(const struct typec_capability *cap,
 	return rc;
 }
 
-static int smblite_lib_role_switch_failure(struct smb_charger *chg, int mode)
-{
-	int rc = 0;
-	union power_supply_propval pval = {0, };
-
-	rc = smblite_lib_get_prop_usb_present(chg, &pval);
-	if (rc < 0) {
-		smblite_lib_err(chg, "Couldn't get usb presence status rc=%d\n",
-					rc);
-		return rc;
-	}
-
-	/*
-	 * When role switch fails notify the
-	 * current charger state to usb driver.
-	 */
-	if (pval.intval && mode == TYPEC_PORT_SRC) {
-		smblite_lib_dbg(chg, PR_MISC, "Role reversal failed, notifying device mode to usb driver.\n");
-		smblite_lib_notify_device_mode(chg, true);
-	}
-
-	return rc;
-}
-
 static void smblite_lib_typec_role_check_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -2460,16 +2492,13 @@ static void smblite_lib_typec_role_check_work(struct work_struct *work)
 			|| chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
 			smblite_lib_dbg(chg, PR_MISC, "Role reversal not latched to DFP in %d msecs. Resetting to DRP mode\n",
 						ROLE_REVERSAL_DELAY_MS);
+			chg->pr_swap_in_progress = false;
+			chg->typec_role_swap_failed = true;
 			rc = smblite_lib_force_dr_mode(chg,
 							TYPEC_PORT_DRP);
 			if (rc < 0)
 				smblite_lib_err(chg, "Couldn't to set DRP mode, rc=%d\n",
 							rc);
-			rc = smblite_lib_role_switch_failure(chg,
-							TYPEC_PORT_SRC);
-			if (rc < 0)
-				smblite_lib_err(chg, "Couldn't to role switch rc=%d\n",
-							rc);
 		} else {
 			chg->power_role = POWER_SUPPLY_TYPEC_PR_SOURCE;
 			typec_set_pwr_role(chg->typec_port, TYPEC_SOURCE);
@@ -2491,8 +2520,6 @@ static void typec_sink_removal(struct smb_charger *chg)
 {
 	if (chg->otg_present)
 		smblite_lib_notify_usb_host(chg, false);
-
-	typec_partner_unregister(chg);
 }
 
 static void typec_src_removal(struct smb_charger *chg)
@@ -2530,7 +2557,6 @@ static void typec_src_removal(struct smb_charger *chg)
 
 	smblite_lib_notify_device_mode(chg, false);
 
-	typec_partner_unregister(chg);
 	chg->typec_legacy = false;
 }
 
@@ -2596,6 +2622,7 @@ irqreturn_t smblite_typec_state_change_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+#define TYPEC_DETACH_DETECT_DELAY_MS 2000
 irqreturn_t smblite_typec_attach_detach_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -2663,8 +2690,25 @@ irqreturn_t smblite_typec_attach_detach_irq_handler(int irq, void *data)
 		 * mode configuration is reset properly.
 		 */
 
-		if (chg->typec_port && !chg->pr_swap_in_progress)
+		if (chg->typec_port && !chg->pr_swap_in_progress) {
+			/*
+			 * Schedule the work to differentiate actual removal
+			 * of cable and detach interrupt during role swap,
+			 * unregister the partner only during actual cable
+			 * removal.
+			 */
+			cancel_delayed_work(&chg->pr_swap_detach_work);
+			vote(chg->awake_votable, DETACH_DETECT_VOTER, true, 0);
+			schedule_delayed_work(&chg->pr_swap_detach_work,
+				msecs_to_jiffies(TYPEC_DETACH_DETECT_DELAY_MS));
 			smblite_lib_force_dr_mode(chg, TYPEC_PORT_DRP);
+
+			/*
+			 * To handle cable removal during role
+			 * swap failure.
+			 */
+			chg->typec_role_swap_failed = false;
+		}
 	}
 
 	rc = smblite_lib_masked_write(chg, USB_CMD_PULLDOWN_REG,
@@ -2814,6 +2858,28 @@ irqreturn_t smblite_usb_id_irq_handler(int irq, void *data)
 /***************
  * Work Queues *
  ***************/
+static void smblite_lib_pr_swap_detach_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						pr_swap_detach_work.work);
+	int rc;
+	u8 stat;
+
+	rc = smblite_lib_read(chg, TYPE_C_STATE_MACHINE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblite_lib_err(chg, "Couldn't read STATE_MACHINE_STS rc=%d\n",
+								rc);
+		goto out;
+	}
+	smblite_lib_dbg(chg, PR_REGISTER, "STATE_MACHINE_STS %#x\n", stat);
+
+	if (!(stat & TYPEC_ATTACH_DETACH_STATE_BIT))
+		typec_partner_unregister(chg);
+
+out:
+	vote(chg->awake_votable, DETACH_DETECT_VOTER, false, 0);
+}
+
 static void bms_update_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -3212,6 +3278,8 @@ int smblite_lib_init(struct smb_charger *chg)
 					smblite_lib_thermal_regulation_work);
 	INIT_DELAYED_WORK(&chg->role_reversal_check,
 					smblite_lib_typec_role_check_work);
+	INIT_DELAYED_WORK(&chg->pr_swap_detach_work,
+					smblite_lib_pr_swap_detach_work);
 	chg->fake_capacity = -EINVAL;
 	chg->fake_batt_status = -EINVAL;
 	chg->sink_src_mode = UNATTACHED_MODE;
@@ -3270,6 +3338,7 @@ int smblite_lib_deinit(struct smb_charger *chg)
 		cancel_delayed_work_sync(&chg->bb_removal_work);
 		cancel_delayed_work_sync(&chg->thermal_regulation_work);
 		cancel_delayed_work_sync(&chg->role_reversal_check);
+		cancel_delayed_work_sync(&chg->pr_swap_detach_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblite_lib_destroy_votables(chg);
 		qcom_step_chg_deinit();
diff --git a/drivers/power/supply/qcom/smblite-lib.h b/drivers/power/supply/qcom/smblite-lib.h
index 0d6e8c3..a598464 100644
--- a/drivers/power/supply/qcom/smblite-lib.h
+++ b/drivers/power/supply/qcom/smblite-lib.h
@@ -275,6 +275,7 @@ struct smb_charger {
 	struct delayed_work	bb_removal_work;
 	struct delayed_work	thermal_regulation_work;
 	struct delayed_work	role_reversal_check;
+	struct delayed_work	pr_swap_detach_work;
 
 	struct charger_param	chg_param;
 
@@ -313,6 +314,7 @@ struct smb_charger {
 	bool			ldo_mode;
 	int			usb_id_gpio;
 	int			usb_id_irq;
+	bool			typec_role_swap_failed;
 
 	/* workaround flag */
 	u32			wa_flags;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 56b7e0e..a335300 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -974,6 +974,14 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
 		/* Drop the message */
 		goto advance_rx;
 	}
+
+	if (!channel->ept.cb) {
+		dev_err(glink->dev,
+			"Callback not available on channel %s\n",
+			channel->name);
+		return -EAGAIN;
+	}
+
 	CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size);
 
 	if (glink->intentless) {
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 60169a3..162a77a 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
 #define UFS_VENDOR_SAMSUNG     0x1CE
 #define UFS_VENDOR_SKHYNIX     0x1AD
 #define UFS_VENDOR_WDC         0x145
+#define UFS_VENDOR_MICRON      0x12C
 
 /**
  * ufs_dev_fix - ufs device quirk info
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9504ec1..6b9d697 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -438,6 +438,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_NO_LINK_OFF),
+	UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_NO_LINK_OFF),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
 		UFS_DEVICE_QUIRK_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
@@ -6506,8 +6508,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
 			hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
-			__ufshcd_release(hba, false);
-			__ufshcd_hibern8_release(hba, false);
 			if (cmd->request) {
 				/*
 				 * As we are accessing the "request" structure,
@@ -6519,6 +6519,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 			}
 
 			clear_bit_unlock(index, &hba->lrb_in_use);
+			/*
+			 *__ufshcd_release and __ufshcd_hibern8_release is
+			 * called after clear_bit_unlock so that
+			 * these function can be called with updated state of
+			 * lrb_in_use flag so that for last transfer req
+			 * completion, gate and hibernate work function would
+			 * be called to gate the clock and put the link in
+			 * hibern8 state.
+			 */
+			__ufshcd_release(hba, false);
+			__ufshcd_hibern8_release(hba, false);
 
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
@@ -6574,7 +6585,6 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
-			ufshcd_release_all(hba);
 			if (cmd->request) {
 				/*
 				 * As we are accessing the "request" structure,
@@ -6585,6 +6595,17 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 					true);
 			}
 			clear_bit_unlock(index, &hba->lrb_in_use);
+
+			/*
+			 * ufshcd_release_all is called after clear_bit_unlock
+			 * so that the function can be called with updated state
+			 * of lrb_in_use flag so that for last abort req
+			 * completion, gate and hibernate work function would
+			 * be called to gate the clock and put the link in
+			 * hibern8 state.
+			 */
+			 ufshcd_release_all(hba);
+
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
@@ -8506,8 +8527,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
 
-	/* Enable WB only for UFS-3.1 OR if desc len >= 0x59 */
+	/* Enable WB only for UFS-3.1 or UFS-2.2 OR if desc len >= 0x59 */
 	if ((dev_desc->wspecversion >= 0x310) ||
+	    (dev_desc->wspecversion == 0x220) ||
 	    (dev_desc->wmanufacturerid == UFS_VENDOR_TOSHIBA &&
 	     dev_desc->wspecversion >= 0x300 &&
 	     hba->desc_size.dev_desc >= 0x59)) {
@@ -9060,21 +9082,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 			goto out;
 	}
 
-	/**
-	 * UFS3.0 and newer devices use Vcc and Vccq(1.2V)
-	 * while UFS2.1 devices use Vcc and Vccq2(1.8V) power
-	 * supplies. If the system allows turning off the regulators
-	 * during power collapse event, turn off the regulators
-	 * during system suspend events. This will cause the UFS
-	 * device to re-initialize upon system resume events.
+	/*
+	 * On 4.19 kernel, the controlling of Vccq requlator got changed.
+	 * Its relying on sys_suspend_pwr_off regulator dt flag instead of spm
+	 * level.
+	 * Updated logic is not honoring spm level specified and the device
+	 * specific quirks for the desired link state.
+	 * Below change is to fix above listed issue without distrubing the
+	 * present logic.
 	 */
-	if ((hba->dev_info.w_spec_version >= 0x300 && hba->vreg_info.vccq &&
-		hba->vreg_info.vccq->sys_suspend_pwr_off) ||
-		(hba->dev_info.w_spec_version < 0x300 &&
-		hba->vreg_info.vccq2->sys_suspend_pwr_off))
-		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+	if (hba->spm_lvl == ufs_get_desired_pm_lvl_for_dev_link_state(
 				UFS_POWERDOWN_PWR_MODE,
-				UIC_LINK_OFF_STATE);
+				UIC_LINK_OFF_STATE)) {
+		if ((hba->dev_info.w_spec_version >= 0x300 &&
+		     hba->vreg_info.vccq &&
+		     !hba->vreg_info.vccq->sys_suspend_pwr_off))
+			hba->vreg_info.vccq->sys_suspend_pwr_off = true;
+
+		if ((hba->dev_info.w_spec_version < 0x300 &&
+		     !hba->vreg_info.vccq2->sys_suspend_pwr_off))
+			hba->vreg_info.vccq2->sys_suspend_pwr_off = true;
+	}
 
 	/* UFS device is also active now */
 	ufshcd_set_ufs_dev_active(hba);
@@ -10278,10 +10306,18 @@ static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 {
+	int ret;
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
 	if (ufshcd_is_link_off(hba) ||
 	    (ufshcd_is_link_hibern8(hba)
 	     && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
-		ufshcd_setup_hba_vreg(hba, true);
+		ret = ufshcd_setup_hba_vreg(hba, true);
+
+	if (ret && (info->vdd_hba->enabled == false)) {
+		dev_err(hba->dev, "vdd_hba is not enabled\n");
+		BUG_ON(1);
+	}
 }
 
 /**
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 68bcdd7..e0be5b3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -589,6 +589,14 @@
 	  deadlocks. It does not run during the bootup process, so it will
 	  not catch any early lockups.
 
+config QCOM_INITIAL_LOGBUF
+	bool "QCOM save initial log_buf"
+	depends on QCOM_WATCHDOG_V2
+	help
+	  This enables to keep copy of initial log_buf at 100 second from
+	  bootup.It can help in debugging issues which are manifestation of
+	  failure during initial bootup.
+
 config QCOM_FORCE_WDOG_BITE_ON_PANIC
 	bool "QCOM force watchdog bite"
 	depends on QCOM_WATCHDOG_V2
diff --git a/drivers/soc/qcom/cx_ipeak.c b/drivers/soc/qcom/cx_ipeak.c
index 6693f31..9952316 100644
--- a/drivers/soc/qcom/cx_ipeak.c
+++ b/drivers/soc/qcom/cx_ipeak.c
@@ -7,7 +7,6 @@
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/printk.h>
-#include <linux/spinlock.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/slab.h>
@@ -44,30 +43,34 @@ struct cx_ipeak_core_ops {
 	struct cx_ipeak_client* (*register_client)(int client_id);
 };
 
-static struct cx_ipeak_device {
-	spinlock_t vote_lock;
-	void __iomem *tcsr_vptr;
-	struct cx_ipeak_core_ops *core_ops;
-	u32 victims_count;
-	u32 victims_reg_count;
-	u32 danger_intr_num;
-	u32 safe_intr_num;
-} device_ipeak;
-
-struct cx_ipeak_client {
-	int vote_count;
-	unsigned int offset;
-	int client_id;
-	struct cx_ipeak_device *dev;
-};
-
 static struct cx_ipeak_victims {
-	int client_id;
-	int victim_id;
+	u32 client_id;
+	u32 victim_id;
 	u32 freq_limit;
 	void *data;
 	cx_ipeak_victim_fn victim_cb;
-} victims_ipeak[CXIP_VICTIMS];
+	struct cx_ipeak_client *client;
+} victim_list[CXIP_VICTIMS];
+
+static struct cx_ipeak_device {
+	struct platform_device *pdev;
+	struct mutex vote_lock;
+	struct mutex throttle_lock;
+	void __iomem *tcsr_vptr;
+	struct cx_ipeak_core_ops *core_ops;
+	u32 victims_count;
+	int danger_intr_num;
+	int safe_intr_num;
+} device_ipeak;
+
+struct cx_ipeak_client {
+	u32 vote_count;
+	unsigned int offset;
+	u32 client_id;
+	bool danger_assert;
+	struct cx_ipeak_device *dev;
+};
+
 
 /**
  * cx_ipeak_register() - allocate client structure and fill device private and
@@ -181,16 +184,15 @@ int cx_ipeak_victim_register(struct cx_ipeak_client *client,
 	if (!victim_cb)
 		return -EINVAL;
 
-	while (i < device_ipeak.victims_count) {
-		if (client->client_id == victims_ipeak[i].client_id) {
-			victims_ipeak[i].victim_cb = victim_cb;
-			victims_ipeak[i].data = data;
-			device_ipeak.victims_reg_count++;
-			break;
+	for (i = 0; i < device_ipeak.victims_count; i++)
+		if (client->client_id == victim_list[i].client_id) {
+			victim_list[i].victim_cb = victim_cb;
+			victim_list[i].data = data;
+			victim_list[i].client = client;
+			return 0;
 		}
-		i++;
-	}
-	return 0;
+
+	return -ENOENT;
 }
 EXPORT_SYMBOL(cx_ipeak_victim_register);
 
@@ -204,15 +206,12 @@ void cx_ipeak_victim_unregister(struct cx_ipeak_client *client)
 {
 	int i = 0;
 
-	while (i < device_ipeak.victims_count) {
-		if (client->client_id == victims_ipeak[i].client_id) {
-			victims_ipeak[i].victim_cb = NULL;
-			victims_ipeak[i].data = NULL;
-			device_ipeak.victims_reg_count--;
-			break;
+	for (i = 0; i < device_ipeak.victims_count; i++)
+		if (client->client_id == victim_list[i].client_id) {
+			victim_list[i].victim_cb = NULL;
+			victim_list[i].data = NULL;
+			victim_list[i].client = NULL;
 		}
-		i++;
-	}
 }
 EXPORT_SYMBOL(cx_ipeak_victim_unregister);
 
@@ -243,7 +242,7 @@ static int cx_ipeak_update_v1(struct cx_ipeak_client *client, bool vote)
 	unsigned int reg_val;
 	int ret = 0;
 
-	spin_lock(&client->dev->vote_lock);
+	mutex_lock(&client->dev->vote_lock);
 
 	if (vote) {
 		if (client->vote_count == 0) {
@@ -282,27 +281,36 @@ static int cx_ipeak_update_v1(struct cx_ipeak_client *client, bool vote)
 	}
 
 done:
-	spin_unlock(&client->dev->vote_lock);
+	mutex_unlock(&client->dev->vote_lock);
 	return ret;
 }
 
 static int cx_ipeak_update_v2(struct cx_ipeak_client *client, bool vote)
 {
-	unsigned int reg_val;
+	u32 reg_val;
 	int ret = 0;
 
-	spin_lock(&client->dev->vote_lock);
+	mutex_lock(&client->dev->vote_lock);
 
 	if (vote) {
 		if (client->vote_count == 0) {
 			writel_relaxed(BIT(0),
-				       client->dev->tcsr_vptr +
-				       client->offset);
+					client->dev->tcsr_vptr +
+					client->offset);
 
-			ret = readl_poll_timeout(client->dev->tcsr_vptr +
-						 TCSR_CXIP_LM_DANGER_OFFSET,
-						 reg_val, !reg_val, 0,
-						 CXIP_POLL_TIMEOUT_US);
+			ret = readl_poll_timeout(
+					client->dev->tcsr_vptr +
+					TCSR_CXIP_LM_DANGER_OFFSET,
+					reg_val, !reg_val ||
+					client->danger_assert,
+					0, CXIP_POLL_TIMEOUT_US);
+			/*
+			 * If poll exits due to danger assert condition return
+			 * error to client to avoid voting.
+			 */
+			if (client->danger_assert)
+				ret = -ETIMEDOUT;
+
 			if (ret) {
 				writel_relaxed(0,
 					       client->dev->tcsr_vptr +
@@ -325,57 +333,86 @@ static int cx_ipeak_update_v2(struct cx_ipeak_client *client, bool vote)
 	}
 
 done:
-	spin_unlock(&client->dev->vote_lock);
+	mutex_unlock(&client->dev->vote_lock);
 	return ret;
 }
 
-static irqreturn_t cx_ipeak_irq_handler(int irq, void *data)
+static irqreturn_t cx_ipeak_irq_soft_handler(int irq, void *data)
 {
 	int i;
 	irqreturn_t ret = IRQ_NONE;
 
-	for (i = 0; i < device_ipeak.victims_reg_count; i++) {
-		cx_ipeak_victim_fn victim_cb = victims_ipeak[i].victim_cb;
+	mutex_lock(&device_ipeak.throttle_lock);
+
+	for (i = 0; i < device_ipeak.victims_count; i++) {
+		cx_ipeak_victim_fn victim_cb = victim_list[i].victim_cb;
+		struct cx_ipeak_client *victim_client = victim_list[i].client;
+
+		if (!victim_cb || !victim_client)
+			continue;
 
 		if (irq == device_ipeak.danger_intr_num) {
-		/*
-		 * To set frequency limit at victim client
-		 * side in danger interrupt case
-		 */
-			victim_cb(victims_ipeak[i].data,
-				victims_ipeak[i].freq_limit);
+
+			victim_client->danger_assert = true;
+
+			/*
+			 * To set frequency limit at victim client
+			 * side in danger interrupt case
+			 */
+
+			ret = victim_cb(victim_list[i].data,
+					victim_list[i].freq_limit);
+
+			if (ret) {
+				dev_err(&device_ipeak.pdev->dev,
+					"Unable to throttle client:%d freq:%d\n",
+					victim_list[i].client_id,
+					victim_list[i].freq_limit);
+				victim_client->danger_assert = false;
+				ret = IRQ_HANDLED;
+				goto done;
+			}
+
 			writel_relaxed(1, (device_ipeak.tcsr_vptr +
 						CXIP_VICTIM_OFFSET +
-						((victims_ipeak[i].victim_id)*
-							 CXIP_CLIENT_OFFSET)));
+						((victim_list[i].victim_id)*
+						 CXIP_CLIENT_OFFSET)));
+
 			ret = IRQ_HANDLED;
 		} else if (irq == device_ipeak.safe_intr_num) {
-		/*
-		 * To remove frequency limit at victim client
-		 * side in safe interrupt case
-		 */
-			victim_cb(victims_ipeak[i].data, 0);
+			victim_client->danger_assert = false;
+			/*
+			 * To remove frequency limit at victim client
+			 * side in safe interrupt case
+			 */
+			ret = victim_cb(victim_list[i].data, 0);
+
+			if (ret)
+				dev_err(&device_ipeak.pdev->dev, "Unable to remove freq limit client:%d\n",
+						victim_list[i].client_id);
+
 			writel_relaxed(0, (device_ipeak.tcsr_vptr +
 						CXIP_VICTIM_OFFSET +
-						((victims_ipeak[i].victim_id)*
+						((victim_list[i].victim_id)*
 						 CXIP_CLIENT_OFFSET)));
 			ret = IRQ_HANDLED;
 		}
 	}
-
+done:
+	mutex_unlock(&device_ipeak.throttle_lock);
 	return ret;
 }
 
 int cx_ipeak_request_irq(struct platform_device *pdev, const  char *name,
-		irq_handler_t handler, void *data)
+		irq_handler_t handler, irq_handler_t thread_fn, void *data)
 {
 	int ret, num = platform_get_irq_byname(pdev, name);
 
 	if (num < 0)
 		return num;
 
-	ret = devm_request_irq(&pdev->dev, num, handler, IRQF_TRIGGER_RISING,
-				name, data);
+	ret = devm_request_threaded_irq(&pdev->dev, num, handler, thread_fn,
+			IRQF_ONESHOT | IRQF_TRIGGER_RISING, name, data);
 
 	if (ret)
 		dev_err(&pdev->dev, "Unable to get interrupt %s: %d\n",
@@ -409,7 +446,6 @@ struct cx_ipeak_core_ops core_ops_v2 = {
 static int cx_ipeak_probe(struct platform_device *pdev)
 {
 	struct resource *res;
-	int status = -EINVAL;
 	int i, ret, count;
 	u32 victim_en;
 
@@ -440,21 +476,21 @@ static int cx_ipeak_probe(struct platform_device *pdev)
 		for (i = 0; i < (count/VICTIM_ENTRIES); i++) {
 			ret = of_property_read_u32_index(pdev->dev.of_node,
 					"victims_table", i*VICTIM_ENTRIES,
-					&victims_ipeak[i].client_id);
+					&victim_list[i].client_id);
 
 			if (ret)
 				return ret;
 
 			ret = of_property_read_u32_index(pdev->dev.of_node,
 					"victims_table", (i*VICTIM_ENTRIES) + 1,
-					&victims_ipeak[i].victim_id);
+					&victim_list[i].victim_id);
 
 			if (ret)
 				return ret;
 
 			ret = of_property_read_u32_index(pdev->dev.of_node,
 					"victims_table", (i*VICTIM_ENTRIES) + 2,
-					&victims_ipeak[i].freq_limit);
+					&victim_list[i].freq_limit);
 
 			if (ret)
 				return ret;
@@ -462,24 +498,25 @@ static int cx_ipeak_probe(struct platform_device *pdev)
 			device_ipeak.victims_count++;
 		}
 
-		status = cx_ipeak_request_irq(pdev, "cx_ipeak_danger",
-				cx_ipeak_irq_handler, NULL);
+		device_ipeak.danger_intr_num = cx_ipeak_request_irq(pdev,
+				"cx_ipeak_danger", NULL,
+				cx_ipeak_irq_soft_handler, NULL);
 
-		if (status < 0)
-			return status;
+		if (device_ipeak.danger_intr_num < 0)
+			return device_ipeak.danger_intr_num;
 
-		device_ipeak.danger_intr_num = status;
+		device_ipeak.safe_intr_num = cx_ipeak_request_irq(pdev,
+				"cx_ipeak_safe", NULL,
+				cx_ipeak_irq_soft_handler, NULL);
 
-		status = cx_ipeak_request_irq(pdev, "cx_ipeak_safe",
-				cx_ipeak_irq_handler, NULL);
+		if (device_ipeak.safe_intr_num < 0)
+			return device_ipeak.safe_intr_num;
 
-		if (status < 0)
-			return status;
-
-		device_ipeak.safe_intr_num = status;
 	}
 
-	spin_lock_init(&device_ipeak.vote_lock);
+	device_ipeak.pdev = pdev;
+	mutex_init(&device_ipeak.vote_lock);
+	mutex_init(&device_ipeak.throttle_lock);
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index ada4be8..d8e2fd3 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -1580,7 +1580,8 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
 	if (drvdata->ram_size <= *ppos)
 		return 0;
 
-	if ((*ppos + len) > drvdata->ram_size)
+	if ((*ppos + len) < len
+		|| (*ppos + len) > drvdata->ram_size)
 		len = (drvdata->ram_size - *ppos);
 
 	buf = kzalloc(len, GFP_KERNEL);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index addf18e..4dcfc83 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -844,6 +844,8 @@ static int icnss_driver_event_server_arrive(void *data)
 
 	set_bit(ICNSS_WLFW_EXISTS, &penv->state);
 	clear_bit(ICNSS_FW_DOWN, &penv->state);
+	clear_bit(ICNSS_FW_READY, &penv->state);
+
 	icnss_ignore_fw_timeout(false);
 
 	if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
@@ -1460,12 +1462,11 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
 		icnss_pr_info("Collecting msa0 segment dump\n");
 		icnss_msa0_ramdump(priv);
-
-		return NOTIFY_OK;
+		goto out;
 	}
 
 	if (code != SUBSYS_BEFORE_SHUTDOWN)
-		return NOTIFY_OK;
+		goto out;
 
 	priv->is_ssr = true;
 
@@ -1481,7 +1482,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 						 ICNSS_UEVENT_FW_DOWN,
 						 &fw_down_data);
 		}
-		return NOTIFY_OK;
+		goto out;
 	}
 
 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
@@ -1498,8 +1499,10 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
 
-	if (event_data == NULL)
+	if (event_data == NULL) {
+		icnss_pr_vdbg("Exit %s, event_data is NULL\n", __func__);
 		return notifier_from_errno(-ENOMEM);
+	}
 
 	event_data->crashed = notif->crashed;
 
@@ -1511,7 +1514,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 	}
 	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 				ICNSS_EVENT_SYNC, event_data);
-
+out:
+	icnss_pr_vdbg("Exit %s,state: 0x%lx\n", __func__, priv->state);
 	return NOTIFY_OK;
 }
 
@@ -2799,6 +2803,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 			continue;
 		case ICNSS_PDR:
 			seq_puts(s, "PDR TRIGGERED");
+			continue;
+		case ICNSS_DEL_SERVER:
+			seq_puts(s, "DEL SERVER");
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/icnss2/debug.c b/drivers/soc/qcom/icnss2/debug.c
index 8731a7b..3771a6b 100644
--- a/drivers/soc/qcom/icnss2/debug.c
+++ b/drivers/soc/qcom/icnss2/debug.c
@@ -404,6 +404,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
 			continue;
 		case ICNSS_PDR:
 			seq_puts(s, "PDR TRIGGERED");
+			continue;
+		case ICNSS_DEL_SERVER:
+			seq_puts(s, "DEL SERVER");
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index d32c795..c034ead 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -610,7 +610,7 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
 			goto err_power_on;
 		}
 
-		icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%pK\n",
+		icnss_pr_dbg("Non-Secured Bar Address pa: %pa, va: 0x%pK\n",
 			     &priv->mem_base_pa,
 			     priv->mem_base_va);
 
diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h
index 44efede..7f1e37d 100644
--- a/drivers/soc/qcom/icnss2/main.h
+++ b/drivers/soc/qcom/icnss2/main.h
@@ -109,6 +109,7 @@ enum icnss_driver_state {
 	ICNSS_MODE_ON,
 	ICNSS_BLOCK_SHUTDOWN,
 	ICNSS_PDR,
+	ICNSS_DEL_SERVER,
 };
 
 struct ce_irq_list {
diff --git a/drivers/soc/qcom/icnss2/power.c b/drivers/soc/qcom/icnss2/power.c
index 8ed9f98..3844c1a 100644
--- a/drivers/soc/qcom/icnss2/power.c
+++ b/drivers/soc/qcom/icnss2/power.c
@@ -12,22 +12,10 @@
 #include "qmi.h"
 #include "debug.h"
 
-static struct icnss_vreg_cfg icnss_vreg_list[] = {
-	{"vdd-wlan-core", 1300000, 1300000, 0, 0, 0, false},
-	{"vdd-wlan-io", 1800000, 1800000, 0, 0, 0, false},
-	{"vdd-wlan-xtal-aon", 0, 0, 0, 0, 0, false},
-	{"vdd-wlan-xtal", 1800000, 1800000, 0, 2, 0, false},
-	{"vdd-wlan", 0, 0, 0, 0, 0, false},
-	{"vdd-wlan-ctrl1", 0, 0, 0, 0, 0, false},
-	{"vdd-wlan-ctrl2", 0, 0, 0, 0, 0, false},
-	{"vdd-wlan-sp2t", 2700000, 2700000, 0, 0, 0, false},
-	{"wlan-ant-switch", 1800000, 1800000, 0, 0, 0, false},
-	{"wlan-soc-swreg", 1200000, 1200000, 0, 0, 0, false},
-	{"vdd-wlan-aon", 950000, 950000, 0, 0, 0, false},
-	{"vdd-wlan-dig", 950000, 952000, 0, 0, 0, false},
-	{"vdd-wlan-rfa1", 1900000, 1900000, 0, 0, 0, false},
-	{"vdd-wlan-rfa2", 1350000, 1350000, 0, 0, 0, false},
-	{"vdd-wlan-en", 0, 0, 0, 10, 0, false},
+static struct icnss_vreg_cfg icnss_wcn6750_vreg_list[] = {
+	{"vdd-cx-mx", 824000, 952000, 0, 0, 0, false},
+	{"vdd-1.8-xo", 1872000, 1872000, 0, 0, 0, false},
+	{"vdd-1.3-rfa", 1256000, 1352000, 0, 0, 0, false},
 };
 
 static struct icnss_vreg_cfg icnss_adrestea_vreg_list[] = {
@@ -46,7 +34,7 @@ static struct icnss_clk_cfg icnss_adrestea_clk_list[] = {
 	{"cxo_ref_clk_pin", 0, 0},
 };
 
-#define ICNSS_VREG_LIST_SIZE		ARRAY_SIZE(icnss_vreg_list)
+#define ICNSS_VREG_LIST_SIZE		ARRAY_SIZE(icnss_wcn6750_vreg_list)
 #define ICNSS_VREG_ADRESTEA_LIST_SIZE	ARRAY_SIZE(icnss_adrestea_vreg_list)
 #define ICNSS_CLK_LIST_SIZE		ARRAY_SIZE(icnss_clk_list)
 #define ICNSS_CLK_ADRESTEA_LIST_SIZE	ARRAY_SIZE(icnss_adrestea_clk_list)
@@ -286,7 +274,7 @@ static struct icnss_vreg_cfg *get_vreg_list(u32 *vreg_list_size,
 	switch (device_id) {
 	case WCN6750_DEVICE_ID:
 		*vreg_list_size = ICNSS_VREG_LIST_SIZE;
-		return icnss_vreg_list;
+		return icnss_wcn6750_vreg_list;
 
 	case ADRASTEA_DEVICE_ID:
 		*vreg_list_size = ICNSS_VREG_ADRESTEA_LIST_SIZE;
diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c
index 6c8d8f1..6223d73 100644
--- a/drivers/soc/qcom/icnss2/qmi.c
+++ b/drivers/soc/qcom/icnss2/qmi.c
@@ -609,7 +609,8 @@ int wlfw_cap_send_sync_msg(struct icnss_priv *priv)
 	if (!priv)
 		return -ENODEV;
 
-	icnss_pr_dbg("Sending capability message, state: 0x%lx\n", priv->state);
+	icnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
+		     priv->state);
 
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
@@ -1296,6 +1297,8 @@ int wlfw_athdiag_write_send_sync_msg(struct icnss_priv *priv,
 			resp->resp.result, resp->resp.error);
 		ret = -resp->resp.result;
 		goto out;
+	} else {
+		ret = 0;
 	}
 
 out:
@@ -1943,6 +1946,8 @@ int icnss_clear_server(struct icnss_priv *priv)
 
 	icnss_unregister_fw_service(priv);
 
+	clear_bit(ICNSS_DEL_SERVER, &priv->state);
+
 	ret =  icnss_register_fw_service(priv);
 	if (ret < 0) {
 		icnss_pr_err("WLFW server registration failed\n");
@@ -1959,6 +1964,12 @@ static int wlfw_new_server(struct qmi_handle *qmi,
 		container_of(qmi, struct icnss_priv, qmi);
 	struct icnss_event_server_arrive_data *event_data;
 
+	if (priv && test_bit(ICNSS_DEL_SERVER, &priv->state)) {
+		icnss_pr_info("WLFW server delete in progress, Ignore server arrive: 0x%lx\n",
+			      priv->state);
+		return 0;
+	}
+
 	icnss_pr_dbg("WLFW server arrive: node %u port %u\n",
 		     service->node, service->port);
 
@@ -1980,9 +1991,16 @@ static void wlfw_del_server(struct qmi_handle *qmi,
 {
 	struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi);
 
+	if (priv && test_bit(ICNSS_DEL_SERVER, &priv->state)) {
+		icnss_pr_info("WLFW server delete in progress, Ignore server delete:  0x%lx\n",
+			      priv->state);
+		return;
+	}
+
 	icnss_pr_dbg("WLFW server delete\n");
 
 	if (priv) {
+		set_bit(ICNSS_DEL_SERVER, &priv->state);
 		set_bit(ICNSS_FW_DOWN, &priv->state);
 		icnss_ignore_fw_timeout(true);
 	}
@@ -2189,6 +2207,18 @@ int icnss_send_vbatt_update(struct icnss_priv *priv, uint64_t voltage_uv)
 	return ret;
 }
 
+#ifdef CONFIG_ICNSS2_DEBUG
+static inline u32 icnss_get_host_build_type(void)
+{
+	return QMI_HOST_BUILD_TYPE_PRIMARY_V01;
+}
+#else
+static inline u32 icnss_get_host_build_type(void)
+{
+	return QMI_HOST_BUILD_TYPE_SECONDARY_V01;
+}
+#endif
+
 int wlfw_host_cap_send_sync(struct icnss_priv *priv)
 {
 	struct wlfw_host_cap_req_msg_v01 *req;
@@ -2224,6 +2254,9 @@ int wlfw_host_cap_send_sync(struct icnss_priv *priv)
 	req->cal_done = priv->cal_done;
 	icnss_pr_dbg("Calibration done is %d\n", priv->cal_done);
 
+	req->host_build_type_valid = 1;
+	req->host_build_type = icnss_get_host_build_type();
+
 	ret = qmi_txn_init(&priv->qmi, &txn,
 			   wlfw_host_cap_resp_msg_v01_ei, resp);
 	if (ret < 0) {
diff --git a/drivers/soc/qcom/icnss_private.h b/drivers/soc/qcom/icnss_private.h
index e864a0b..45ad71a 100644
--- a/drivers/soc/qcom/icnss_private.h
+++ b/drivers/soc/qcom/icnss_private.h
@@ -157,6 +157,7 @@ enum icnss_driver_state {
 	ICNSS_MODE_ON,
 	ICNSS_BLOCK_SHUTDOWN,
 	ICNSS_PDR,
+	ICNSS_DEL_SERVER,
 };
 
 struct ce_irq_list {
diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c
index 3141600..1f73ae4 100644
--- a/drivers/soc/qcom/icnss_qmi.c
+++ b/drivers/soc/qcom/icnss_qmi.c
@@ -1265,6 +1265,8 @@ int icnss_clear_server(struct icnss_priv *priv)
 
 	icnss_unregister_fw_service(priv);
 
+	clear_bit(ICNSS_DEL_SERVER, &priv->state);
+
 	ret =  icnss_register_fw_service(priv);
 	if (ret < 0) {
 		icnss_pr_err("WLFW server registration failed\n");
@@ -1277,8 +1279,15 @@ int icnss_clear_server(struct icnss_priv *priv)
 static int wlfw_new_server(struct qmi_handle *qmi,
 			   struct qmi_service *service)
 {
+	struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi);
 	struct icnss_event_server_arrive_data *event_data;
 
+	if (priv && test_bit(ICNSS_DEL_SERVER, &priv->state)) {
+		icnss_pr_info("WLFW server delete in progress, Ignore server arrive: 0x%lx\n",
+			      priv->state);
+		return 0;
+	}
+
 	icnss_pr_dbg("WLFW server arrive: node %u port %u\n",
 		     service->node, service->port);
 
@@ -1300,9 +1309,16 @@ static void wlfw_del_server(struct qmi_handle *qmi,
 {
 	struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi);
 
+	if (priv && test_bit(ICNSS_DEL_SERVER, &priv->state)) {
+		icnss_pr_info("WLFW server delete in progress, Ignore server delete:  0x%lx\n",
+			      priv->state);
+		return;
+	}
+
 	icnss_pr_dbg("WLFW server delete\n");
 
 	if (priv) {
+		set_bit(ICNSS_DEL_SERVER, &priv->state);
 		set_bit(ICNSS_FW_DOWN, &priv->state);
 		icnss_ignore_fw_timeout(true);
 	}
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index 87b72ed..c0ef1be 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -11,6 +11,7 @@
 #include <linux/thread_info.h>
 #include <soc/qcom/minidump.h>
 #include <asm/sections.h>
+#include <asm/stacktrace.h>
 #include <linux/mm.h>
 #include <linux/sched/task.h>
 #include <linux/vmalloc.h>
@@ -161,10 +162,47 @@ void dump_stack_minidump(u64 sp)
 		pr_err("Failed to add current task %d in Minidump\n", cpu);
 }
 
+#ifdef CONFIG_ARM64
+static void register_irq_stack(void)
+{
+	int cpu;
+	unsigned int i;
+	int irq_stack_pages_count;
+	u64 irq_stack_base;
+	struct md_region irq_sp_entry;
+	u64 sp;
+
+	for_each_possible_cpu(cpu) {
+		irq_stack_base = (u64)per_cpu(irq_stack_ptr, cpu);
+		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+			irq_stack_pages_count = IRQ_STACK_SIZE / PAGE_SIZE;
+			sp = irq_stack_base & ~(PAGE_SIZE - 1);
+			for (i = 0; i < irq_stack_pages_count; i++) {
+				scnprintf(irq_sp_entry.name,
+					  sizeof(irq_sp_entry.name),
+					  "KISTACK%d_%d", cpu, i);
+				register_stack_entry(&irq_sp_entry, sp,
+						     PAGE_SIZE, cpu);
+				sp += PAGE_SIZE;
+			}
+		} else {
+			sp = irq_stack_base;
+			scnprintf(irq_sp_entry.name, sizeof(irq_sp_entry.name),
+				  "KISTACK%d", cpu);
+			register_stack_entry(&irq_sp_entry, sp, IRQ_STACK_SIZE,
+					     cpu);
+		}
+	}
+}
+#else
+static inline void register_irq_stack(void) {}
+#endif
+
 static int __init msm_minidump_log_init(void)
 {
 	register_kernel_sections();
+	register_irq_stack();
 	register_log_buf();
 	return 0;
 }
-late_initcall(msm_minidump_log_init);
+subsys_initcall(msm_minidump_log_init);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index c56cc90..5c55fb3 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -414,6 +414,33 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
 }
 
 /**
+ * print_aux_minidump_tocs() - Print the ToC for an auxiliary minidump entry
+ * @desc: PIL descriptor for the subsystem for which minidump is collected
+ *
+ * Prints out the table of contents(ToC) for all of the auxiliary
+ * minidump entries for a subsystem.
+ */
+static void print_aux_minidump_tocs(struct pil_desc *desc)
+{
+	int i;
+	struct md_ss_toc *toc;
+
+	for (i = 0; i < desc->num_aux_minidump_ids; i++) {
+		toc = desc->aux_minidump[i];
+		pr_debug("Minidump : md_aux_toc->toc_init 0x%x\n",
+			 (unsigned int)toc->md_ss_toc_init);
+		pr_debug("Minidump : md_aux_toc->enable_status 0x%x\n",
+			 (unsigned int)toc->md_ss_enable_status);
+		pr_debug("Minidump : md_aux_toc->encryption_status 0x%x\n",
+			 (unsigned int)toc->encryption_status);
+		pr_debug("Minidump : md_aux_toc->ss_region_count 0x%x\n",
+			 (unsigned int)toc->ss_region_count);
+		pr_debug("Minidump : md_aux_toc->smem_regions_baseptr 0x%x\n",
+			 (unsigned int)toc->md_ss_smem_regions_baseptr);
+	}
+}
+
+/**
  * pil_do_ramdump() - Ramdump an image
  * @desc: descriptor from pil_desc_init()
  * @ramdump_dev: ramdump device returned from create_ramdump_device()
@@ -441,6 +468,9 @@ int pil_do_ramdump(struct pil_desc *desc,
 		pr_debug("Minidump : md_ss_toc->md_ss_smem_regions_baseptr is 0x%x\n",
 			(unsigned int)
 			desc->minidump_ss->md_ss_smem_regions_baseptr);
+
+		print_aux_minidump_tocs(desc);
+
 		/**
 		 * Collect minidump if SS ToC is valid and segment table
 		 * is initialized in memory and encryption status is set.
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 92af4dcb6..2edd9af 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -450,7 +450,7 @@ static void usb_notifier(void *priv, unsigned int event,
 	struct qdss_bridge_drvdata *drvdata = priv;
 
 	if (!drvdata || drvdata->mode != MHI_TRANSFER_TYPE_USB
-			|| drvdata->opened == DISABLE) {
+			|| drvdata->opened != ENABLE) {
 		pr_err_ratelimited("%s can't be called in invalid status.\n",
 				__func__);
 		return;
@@ -530,6 +530,7 @@ static void qdss_bridge_open_work_fn(struct work_struct *work)
 
 	return;
 err:
+	drvdata->opened = DISABLE;
 	mhi_unprepare_from_transfer(drvdata->mhi_dev);
 	mhi_ch_close(drvdata);
 err_open:
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
index 5403a0c..d4418ba 100644
--- a/drivers/soc/qcom/rpmh_master_stat.c
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
@@ -126,7 +126,6 @@ static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
 {
 	ssize_t length;
 	int i = 0;
-	size_t size = 0;
 	struct msm_rpmh_master_stats *record = NULL;
 
 	mutex_lock(&rpmh_stats_mutex);
@@ -141,7 +140,7 @@ static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
 	for (i = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
 		record = (struct msm_rpmh_master_stats *) qcom_smem_get(
 					rpmh_masters[i].pid,
-					rpmh_masters[i].smem_id, &size);
+					rpmh_masters[i].smem_id, NULL);
 		if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
 			length += msm_rpmh_master_stats_print_data(
 					buf + length, PAGE_SIZE - length,
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 7b8485e..301d06f 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -7,6 +7,9 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/sort.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq_cpustat.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
 #include <linux/kthread.h>
@@ -28,6 +31,10 @@
 #include <linux/sched/clock.h>
 #include <linux/cpumask.h>
 #include <uapi/linux/sched/types.h>
+#ifdef CONFIG_QCOM_INITIAL_LOGBUF
+#include <linux/kallsyms.h>
+#include <linux/math64.h>
+#endif
 
 #define MODULE_NAME "msm_watchdog"
 #define WDT0_ACCSCSSNBARK_INT 0
@@ -47,11 +54,30 @@
 #define SCM_SET_REGSAVE_CMD	0x2
 #define SCM_SVC_SEC_WDOG_DIS	0x7
 #define MAX_CPU_CTX_SIZE	2048
+#define NR_TOP_HITTERS		10
+#define COMPARE_RET		-1
+
+typedef int (*compare_t) (const void *lhs, const void *rhs);
+
+#ifdef CONFIG_QCOM_INITIAL_LOGBUF
+#define LOGBUF_TIMEOUT		100000U
+
+static struct delayed_work log_buf_work;
+static char *init_log_buf;
+static unsigned int *log_buf_size;
+static dma_addr_t log_buf_paddr;
+#endif
 
 static struct msm_watchdog_data *wdog_data;
 
 static int cpu_idle_pc_state[NR_CPUS];
 
+struct irq_info {
+	unsigned int irq;
+	unsigned int total_count;
+	unsigned int irq_counter[NR_CPUS];
+};
+
 /*
  * user_pet_enable:
  *	Require userspace to write to a sysfs file every pet_time milliseconds.
@@ -92,6 +118,11 @@ struct msm_watchdog_data {
 	unsigned long long thread_start;
 	unsigned long long ping_start[NR_CPUS];
 	unsigned long long ping_end[NR_CPUS];
+	struct work_struct irq_counts_work;
+	struct irq_info irq_counts[NR_TOP_HITTERS];
+	struct irq_info ipi_counts[NR_IPI];
+	unsigned int tot_irq_count[NR_CPUS];
+	atomic_t irq_counts_running;
 };
 
 /*
@@ -404,6 +435,183 @@ static void pet_task_wakeup(struct timer_list *t)
 	wake_up(&wdog_dd->pet_complete);
 }
 
+static int cmp_irq_info_fn(const void *a, const void *b)
+{
+	struct irq_info *lhs = (struct irq_info *)a;
+	struct irq_info *rhs = (struct irq_info *)b;
+
+	if (lhs->total_count < rhs->total_count)
+		return 1;
+
+	if (lhs->total_count > rhs->total_count)
+		return COMPARE_RET;
+
+	return 0;
+}
+
+static void swap_irq_info_fn(void *a, void *b, int size)
+{
+	struct irq_info temp;
+	struct irq_info *lhs = (struct irq_info *)a;
+	struct irq_info *rhs = (struct irq_info *)b;
+
+	temp = *lhs;
+	*lhs = *rhs;
+	*rhs = temp;
+}
+
+static struct irq_info *search(struct irq_info *key, struct irq_info *base,
+			       size_t num, compare_t cmp)
+{
+	struct irq_info *pivot;
+	int result;
+
+	while (num > 0) {
+		pivot = base + (num >> 1);
+		result = cmp(key, pivot);
+
+		if (result == 0)
+			goto out;
+
+		if (result > 0) {
+			base = pivot + 1;
+			num--;
+		}
+
+		if (num)
+			num >>= 1;
+	}
+
+out:
+	return pivot;
+}
+
+static void print_irq_stat(struct msm_watchdog_data *wdog_dd)
+{
+	int index;
+	int cpu;
+	struct irq_info *info;
+
+
+	pr_info("(virq:irq_count)- ");
+	for (index = 0; index < NR_TOP_HITTERS; index++) {
+		info = &wdog_dd->irq_counts[index];
+		pr_cont("%u:%u ", info->irq, info->total_count);
+	}
+	pr_cont("\n");
+
+	pr_info("(cpu:irq_count)- ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%u:%u ", cpu, wdog_dd->tot_irq_count[cpu]);
+	pr_cont("\n");
+
+	pr_info("(ipi:irq_count)- ");
+	for (index = 0; index < NR_IPI; index++) {
+		info = &wdog_dd->ipi_counts[index];
+		pr_cont("%u:%u ", info->irq, info->total_count);
+	}
+	pr_cont("\n");
+}
+
+static void compute_irq_stat(struct work_struct *work)
+{
+	unsigned int count;
+	int index = 0, cpu, irq;
+	struct irq_desc *desc;
+	struct irq_info *pos;
+	struct irq_info *start;
+	struct irq_info key = {0};
+	unsigned int running;
+	struct msm_watchdog_data *wdog_dd = container_of(work,
+					    struct msm_watchdog_data,
+					    irq_counts_work);
+
+	size_t arr_size = ARRAY_SIZE(wdog_dd->irq_counts);
+
+	/* avoid parallel execution from bark handler and queued
+	 * irq_counts_work.
+	 */
+	running = atomic_xchg(&wdog_dd->irq_counts_running, 1);
+	if (running)
+		return;
+
+	/* per irq counts */
+	rcu_read_lock();
+	for_each_irq_nr(irq) {
+		desc = irq_to_desc(irq);
+		if (!desc)
+			continue;
+
+		count = kstat_irqs_usr(irq);
+		if (!count)
+			continue;
+
+		if (index < arr_size) {
+			wdog_dd->irq_counts[index].irq = irq;
+			wdog_dd->irq_counts[index].total_count = count;
+			for_each_possible_cpu(cpu)
+				wdog_dd->irq_counts[index].irq_counter[cpu] =
+					*per_cpu_ptr(desc->kstat_irqs, cpu);
+
+			index++;
+			if (index == arr_size)
+				sort(wdog_dd->irq_counts, arr_size,
+				     sizeof(*pos), cmp_irq_info_fn,
+				     swap_irq_info_fn);
+
+			continue;
+		}
+
+		key.total_count = count;
+		start = wdog_dd->irq_counts + (arr_size - 1);
+		pos = search(&key, wdog_dd->irq_counts,
+			     arr_size, cmp_irq_info_fn);
+		pr_debug("*pos:%u key:%u\n",
+				pos->total_count, key.total_count);
+		if (pos->total_count >= key.total_count) {
+			if (pos < start)
+				pos++;
+			else
+				pos = NULL;
+		}
+
+		pr_debug("count :%u irq:%u\n", count, irq);
+		if (pos && pos < start) {
+			start--;
+			for (; start >= pos ; start--)
+				*(start + 1) = *start;
+		}
+
+		if (pos) {
+			pos->irq = irq;
+			pos->total_count = count;
+			for_each_possible_cpu(cpu)
+				pos->irq_counter[cpu] =
+					*per_cpu_ptr(desc->kstat_irqs, cpu);
+		}
+	}
+	rcu_read_unlock();
+
+	/* per cpu total irq counts */
+	for_each_possible_cpu(cpu)
+		wdog_dd->tot_irq_count[cpu] = kstat_cpu_irqs_sum(cpu);
+
+	/* per IPI counts */
+	for (index = 0; index < NR_IPI; index++) {
+		wdog_dd->ipi_counts[index].total_count = 0;
+		wdog_dd->ipi_counts[index].irq = index;
+		for_each_possible_cpu(cpu) {
+			wdog_dd->ipi_counts[index].irq_counter[cpu] =
+				__IRQ_STAT(cpu, ipi_irqs[index]);
+			wdog_dd->ipi_counts[index].total_count +=
+				wdog_dd->ipi_counts[index].irq_counter[cpu];
+		}
+	}
+
+	print_irq_stat(wdog_dd);
+	atomic_xchg(&wdog_dd->irq_counts_running, 0);
+}
+
 static __ref int watchdog_kthread(void *arg)
 {
 	struct msm_watchdog_data *wdog_dd =
@@ -442,6 +650,7 @@ static __ref int watchdog_kthread(void *arg)
 		 * Could have been changed on other cpu
 		 */
 		mod_timer(&wdog_dd->pet_timer, jiffies + delay_time);
+		queue_work(system_unbound_wq, &wdog_dd->irq_counts_work);
 	}
 	return 0;
 }
@@ -470,6 +679,17 @@ static struct notifier_block wdog_cpu_pm_nb = {
 	.notifier_call = wdog_cpu_pm_notify,
 };
 
+#ifdef CONFIG_QCOM_INITIAL_LOGBUF
+static void log_buf_remove(void)
+{
+	flush_delayed_work(&log_buf_work);
+	dma_free_coherent(wdog_data->dev, *log_buf_size,
+			  init_log_buf, log_buf_paddr);
+}
+#else
+static void log_buf_remove(void) { return; }
+#endif
+
 static int msm_watchdog_remove(struct platform_device *pdev)
 {
 	struct msm_watchdog_data *wdog_dd =
@@ -489,6 +709,8 @@ static int msm_watchdog_remove(struct platform_device *pdev)
 	dev_info(wdog_dd->dev, "MSM Watchdog Exit - Deactivated\n");
 	del_timer_sync(&wdog_dd->pet_timer);
 	kthread_stop(wdog_dd->watchdog_task);
+	flush_work(&wdog_dd->irq_counts_work);
+	log_buf_remove();
 	kfree(wdog_dd);
 	return 0;
 }
@@ -497,6 +719,8 @@ void msm_trigger_wdog_bite(void)
 {
 	if (!wdog_data)
 		return;
+
+	compute_irq_stat(&wdog_data->irq_counts_work);
 	pr_info("Causing a watchdog bite!");
 	__raw_writel(1, wdog_data->base + WDT0_BITE_TIME);
 	/* Mke sure bite time is written before we reset */
@@ -568,6 +792,65 @@ static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
 	return error;
 }
 
+#ifdef CONFIG_QCOM_INITIAL_LOGBUF
+static void minidump_reg_init_log_buf(void)
+{
+	struct md_region md_entry;
+
+	/* Register init_log_buf info to minidump table */
+	strlcpy(md_entry.name, "KBOOT_LOG", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)init_log_buf;
+	md_entry.phys_addr = log_buf_paddr;
+	md_entry.size = *log_buf_size;
+	md_entry.id = MINIDUMP_DEFAULT_ID;
+	if (msm_minidump_add_region(&md_entry))
+		pr_err("Failed to add init_log_buf in Minidump\n");
+}
+
+static void log_buf_work_fn(struct work_struct *work)
+{
+	char **addr = NULL;
+
+	addr = (char **)kallsyms_lookup_name("log_buf");
+	if (!addr) {
+		dev_err(wdog_data->dev, "log_buf symbol not found\n");
+		goto out;
+	}
+
+	log_buf_size = (unsigned int *)kallsyms_lookup_name("log_buf_len");
+	if (!log_buf_size) {
+		dev_err(wdog_data->dev, "log_buf_len symbol not found\n");
+		goto out;
+	}
+
+	init_log_buf = dma_alloc_coherent(wdog_data->dev, *log_buf_size,
+					  &log_buf_paddr, GFP_KERNEL);
+	if (!init_log_buf) {
+		dev_err(wdog_data->dev, "log_buf dma_alloc_coherent failed\n");
+		goto out;
+	}
+
+	minidump_reg_init_log_buf();
+	memcpy(init_log_buf, *addr, (size_t)(*log_buf_size));
+	pr_info("boot log copy done\n");
+out:
+	return;
+}
+
+static void log_buf_init(void)
+{
+	/* keep granularity of milli seconds */
+	unsigned int curr_time_msec = div_u64(sched_clock(), NSEC_PER_MSEC);
+	unsigned int timeout_msec = LOGBUF_TIMEOUT - curr_time_msec;
+
+	INIT_DELAYED_WORK(&log_buf_work, log_buf_work_fn);
+	queue_delayed_work(system_unbound_wq, &log_buf_work,
+			   msecs_to_jiffies(timeout_msec));
+}
+#else
+static void log_buf_init(void)  { return; }
+#endif
+
 static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
 {
 	unsigned long delay_time;
@@ -606,6 +889,9 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
 			return;
 		}
 	}
+
+	INIT_WORK(&wdog_dd->irq_counts_work, compute_irq_stat);
+	atomic_set(&wdog_dd->irq_counts_running, 0);
 	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
 	wdog_dd->min_slack_ticks = UINT_MAX;
 	wdog_dd->min_slack_ns = ULLONG_MAX;
@@ -755,6 +1041,8 @@ static int msm_watchdog_probe(struct platform_device *pdev)
 	}
 	init_watchdog_data(wdog_dd);
 
+	log_buf_init();
+
 	/* Add wdog info to minidump table */
 	strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name));
 	md_entry.virt_addr = (uintptr_t)wdog_dd;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 63556d1..89f27f4 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -94,6 +94,16 @@
 #define MAX_TX_SG		(3)
 #define NUM_SPI_XFER		(8)
 
+/* SPI sampling registers */
+#define SE_GENI_CGC_CTRL	(0x28)
+#define SE_GENI_CFG_SEQ_START	(0x84)
+#define SE_GENI_CFG_REG108	(0x2B0)
+#define SE_GENI_CFG_REG109	(0x2B4)
+#define CPOL_CTRL_SHFT	1
+#define RX_IO_POS_FF_EN_SEL_SHFT	4
+#define RX_IO_EN2CORE_EN_DELAY_SHFT	8
+#define RX_SI_EN2IO_DELAY_SHFT 12
+
 struct gsi_desc_cb {
 	struct spi_master *spi;
 	struct spi_transfer *xfer;
@@ -152,6 +162,8 @@ struct spi_geni_master {
 	bool shared_ee; /* Dual EE use case */
 	bool dis_autosuspend;
 	bool cmd_done;
+	bool set_miso_sampling;
+	u32 miso_sampling_ctrl_val;
 };
 
 static struct spi_master *get_spi_master(struct device *dev)
@@ -799,6 +811,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
 	int ret = 0, count = 0;
 	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
 	struct se_geni_rsc *rsc = &mas->spi_rsc;
+	u32 cpol, cpha, cfg_reg108, cfg_reg109, cfg_seq_start;
 
 	/* Adjust the IB based on the max speed of the slave.*/
 	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
@@ -861,6 +874,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
 				dev_info(mas->dev, "Failed to get rx DMA ch %ld\n",
 							PTR_ERR(mas->rx));
 				dma_release_channel(mas->tx);
+				goto setup_ipc;
 			}
 			mas->gsi = devm_kzalloc(mas->dev,
 				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
@@ -918,6 +932,54 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
 				"%s:Major:%d Minor:%d step:%dos%d\n",
 			__func__, major, minor, step, mas->oversampling);
 		}
+
+		if (!mas->set_miso_sampling)
+			goto shared_se;
+
+		cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
+		cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
+		cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108);
+		cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109);
+		/* clear CPOL bit */
+		cfg_reg108 &= ~(1 << CPOL_CTRL_SHFT);
+
+		if (major == 1 && minor == 0) {
+			/* Write 1 to RX_SI_EN2IO_DELAY reg */
+			cfg_reg108 &= ~(0x7 << RX_SI_EN2IO_DELAY_SHFT);
+			cfg_reg108 |= (1 << RX_SI_EN2IO_DELAY_SHFT);
+			/* Write 0 to RX_IO_POS_FF_EN_SEL reg */
+			cfg_reg108 &= ~(1 << RX_IO_POS_FF_EN_SEL_SHFT);
+		} else if ((major < 2) || (major == 2 && minor < 5)) {
+			/* Write 0 to RX_IO_EN2CORE_EN_DELAY reg */
+			cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT);
+		} else {
+			/*
+			 * Write miso_sampling_ctrl_set to
+			 * RX_IO_EN2CORE_EN_DELAY reg
+			 */
+			cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT);
+			cfg_reg108 |= (mas->miso_sampling_ctrl_val <<
+					RX_IO_EN2CORE_EN_DELAY_SHFT);
+		}
+
+		geni_write_reg(cfg_reg108, mas->base, SE_GENI_CFG_REG108);
+
+		if (cpol == 0 && cpha == 0)
+			cfg_reg109 = 1;
+		else if (cpol == 1 && cpha == 0)
+			cfg_reg109 = 0;
+		geni_write_reg(cfg_reg109, mas->base,
+					SE_GENI_CFG_REG109);
+		if (!(major == 1 && minor == 0))
+			geni_write_reg(1, mas->base, SE_GENI_CFG_SEQ_START);
+		cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108);
+		cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109);
+		cfg_seq_start = geni_read_reg(mas->base, SE_GENI_CFG_SEQ_START);
+
+		GENI_SE_DBG(mas->ipc, false, mas->dev,
+			"%s cfg108: 0x%x cfg109: 0x%x cfg_seq_start: 0x%x\n",
+			__func__, cfg_reg108, cfg_reg109, cfg_seq_start);
+shared_se:
 		mas->shared_se =
 			(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
 							FIFO_IF_DISABLE);
@@ -1149,6 +1211,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 		return -EINVAL;
 	}
 
+	/* Check for zero length transfer */
+	if (xfer->len < 1) {
+		dev_err(mas->dev, "Zero length transfer\n");
+		return -EINVAL;
+	}
+
 	if (mas->cur_xfer_mode != GSI_DMA) {
 		reinit_completion(&mas->xfer_done);
 		setup_fifo_xfer(xfer, mas, slv->mode, spi);
@@ -1538,6 +1606,15 @@ static int spi_geni_probe(struct platform_device *pdev)
 		of_property_read_bool(pdev->dev.of_node,
 				"qcom,shared_ee");
 
+	geni_mas->set_miso_sampling = of_property_read_bool(pdev->dev.of_node,
+				"qcom,set-miso-sampling");
+	if (geni_mas->set_miso_sampling) {
+		if (!of_property_read_u32(pdev->dev.of_node,
+				"qcom,miso-sampling-ctrl-val",
+				&geni_mas->miso_sampling_ctrl_val))
+			dev_info(&pdev->dev, "MISO_SAMPLING_SET: %d\n",
+				geni_mas->miso_sampling_ctrl_val);
+	}
 	geni_mas->phys_addr = res->start;
 	geni_mas->size = resource_size(res);
 	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 8556510..2f74de5 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -200,13 +200,15 @@ static int of_thermal_set_trips(struct thermal_zone_device *tz,
 		return -EINVAL;
 
 	mutex_lock(&data->senps->lock);
+	if (data->mode == THERMAL_DEVICE_DISABLED)
+		goto set_trips_exit;
 	of_thermal_aggregate_trip_types(tz, GENMASK(THERMAL_TRIP_CRITICAL, 0),
 					&low, &high);
 	data->senps->trip_low = low;
 	data->senps->trip_high = high;
 	ret = data->senps->ops->set_trips(data->senps->sensor_data,
 					  low, high);
-
+set_trips_exit:
 	mutex_unlock(&data->senps->lock);
 	return ret;
 }
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 2471627..cc78d37 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -102,6 +102,17 @@
 
 	  If you want this support, you should say Y here.
 
+config MSM_BCL_PERIPHERAL_CTL
+	bool "BCL driver to control the PMIC BCL peripheral"
+	depends on SPMI && THERMAL_OF
+	help
+	  Say Y here to enable this BCL PMIC peripheral driver. This driver
+	  provides routines to configure and monitor the BCL
+	  PMIC peripheral. This driver registers the battery current and
+	  voltage sensors with the thermal core framework and can take
+	  threshold input and notify the thermal core when the threshold is
+	  reached.
+
 config QTI_CPU_ISOLATE_COOLING_DEVICE
 	bool "QTI CPU Isolate cooling devices"
 	depends on THERMAL_OF
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 71785f2..8a0b619 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o
 obj-$(CONFIG_QTI_AOP_REG_COOLING_DEVICE) += regulator_aop_cdev.o
 obj-$(CONFIG_REGULATOR_COOLING_DEVICE) += regulator_cdev.o
+obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
 obj-$(CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE) += cpu_isolate.o
 obj-$(CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE) += lmh_cpu_vdd_cdev.o
 obj-$(CONFIG_QTI_LIMITS_ISENSE_CDSP) += msm_isense_cdsp.o
diff --git a/drivers/thermal/qcom/adc-tm.c b/drivers/thermal/qcom/adc-tm.c
index 195d2e9..b6359dd 100644
--- a/drivers/thermal/qcom/adc-tm.c
+++ b/drivers/thermal/qcom/adc-tm.c
@@ -196,6 +196,10 @@ static const struct of_device_id adc_tm_match_table[] = {
 		.data = &data_adc_tm5,
 	},
 	{
+		.compatible = "qcom,adc-tm-rev2",
+		.data = &data_adc_tm_rev2,
+	},
+	{
 		.compatible = "qcom,adc-tm5-iio",
 		.data = &data_adc_tm5,
 	},
diff --git a/drivers/thermal/qcom/adc-tm.h b/drivers/thermal/qcom/adc-tm.h
index b616aff..b26af66 100644
--- a/drivers/thermal/qcom/adc-tm.h
+++ b/drivers/thermal/qcom/adc-tm.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_ADC_TM_H__
@@ -164,6 +164,7 @@ struct adc_tm_data {
 
 extern const struct adc_tm_data data_adc_tm5;
 extern const struct adc_tm_data data_adc_tm7;
+extern const struct adc_tm_data data_adc_tm_rev2;
 
 /**
  * Channel index for the corresponding index to adc_tm_channel_select
diff --git a/drivers/thermal/qcom/adc-tm5.c b/drivers/thermal/qcom/adc-tm5.c
index 654689e..c66f6c0 100644
--- a/drivers/thermal/qcom/adc-tm5.c
+++ b/drivers/thermal/qcom/adc-tm5.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -1140,3 +1140,11 @@ const struct adc_tm_data data_adc_tm5 = {
 	.hw_settle = (unsigned int []) {15, 100, 200, 300, 400, 500, 600, 700,
 					1, 2, 4, 8, 16, 32, 64, 128},
 };
+
+const struct adc_tm_data data_adc_tm_rev2 = {
+	.ops			= &ops_adc_tm5,
+	.full_scale_code_volt	= 0x4000,
+	.decimation = (unsigned int []) {256, 512, 1024},
+	.hw_settle = (unsigned int []) {0, 100, 200, 300, 400, 500, 600, 700,
+					800, 900, 1, 2, 4, 6, 8, 10},
+};
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
new file mode 100644
index 0000000..a9cf4f5
--- /dev/null
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+
+#define BCL_DRIVER_NAME       "bcl_peripheral"
+#define BCL_VBAT_INT          "bcl-low-vbat"
+#define BCL_VLOW_VBAT_INT     "bcl-very-low-vbat"
+#define BCL_CLOW_VBAT_INT     "bcl-crit-low-vbat"
+#define BCL_IBAT_INT          "bcl-high-ibat"
+#define BCL_VHIGH_IBAT_INT    "bcl-very-high-ibat"
+#define BCL_MONITOR_EN        0x46
+#define BCL_VBAT_MIN          0x5C
+#define BCL_IBAT_MAX          0x5D
+#define BCL_MAX_MIN_CLR       0x48
+#define BCL_IBAT_MAX_CLR      3
+#define BCL_VBAT_MIN_CLR      2
+#define BCL_VBAT_ADC_LOW      0x72
+#define BCL_VBAT_COMP_LOW     0x75
+#define BCL_VBAT_COMP_TLOW    0x76
+#define BCL_IBAT_HIGH         0x78
+#define BCL_IBAT_TOO_HIGH     0x79
+#define BCL_LMH_CFG           0xA3
+#define BCL_CFG               0x6A
+#define LMH_INT_POL_HIGH      0x12
+#define LMH_INT_EN            0x15
+#define BCL_VBAT_SCALING      39000
+#define BCL_IBAT_SCALING      80
+#define BCL_LMH_CFG_VAL       0x3
+#define BCL_CFG_VAL           0x81
+#define LMH_INT_VAL           0x7
+#define BCL_READ_RETRY_LIMIT  3
+#define VAL_CP_REG_BUF_LEN    3
+#define VAL_REG_BUF_OFFSET    0
+#define VAL_CP_REG_BUF_OFFSET 2
+#define BCL_STD_VBAT_NR       9
+#define BCL_VBAT_NO_READING   127
+
+enum bcl_dev_type {
+	BCL_HIGH_IBAT,
+	BCL_VHIGH_IBAT,
+	BCL_LOW_VBAT,
+	BCL_VLOW_VBAT,
+	BCL_CLOW_VBAT,
+	BCL_SOC_MONITOR,
+	BCL_TYPE_MAX,
+};
+
+struct bcl_peripheral_data {
+	int                     irq_num;
+	long                    trip_temp;
+	int                     trip_val;
+	int                     last_val;
+	struct mutex            state_trans_lock;
+	bool			irq_enabled;
+	struct thermal_zone_of_device_ops ops;
+	struct thermal_zone_device *tz_dev;
+};
+
+struct bcl_device {
+	struct regmap			*regmap;
+	uint16_t			fg_bcl_addr;
+	uint16_t			fg_lmh_addr;
+	struct notifier_block		psy_nb;
+	struct work_struct		soc_eval_work;
+	struct bcl_peripheral_data	param[BCL_TYPE_MAX];
+};
+
+static struct bcl_device *bcl_perph;
+static int vbat_low[BCL_STD_VBAT_NR] = {
+		2400, 2500, 2600, 2700, 2800, 2900,
+		3000, 3100, 3200};
+
+static int bcl_read_multi_register(int16_t reg_offset, uint8_t *data, int len)
+{
+	int  ret = 0;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_bulk_read(bcl_perph->regmap,
+			       (bcl_perph->fg_bcl_addr + reg_offset),
+			       data, len);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d\n", reg_offset, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int bcl_write_general_register(int16_t reg_offset,
+					uint16_t base, uint8_t data)
+{
+	int  ret = 0;
+	uint8_t *write_buf = &data;
+
+	if (!bcl_perph) {
+		pr_err("BCL device not initialized\n");
+		return -EINVAL;
+	}
+	ret = regmap_write(bcl_perph->regmap, (base + reg_offset), *write_buf);
+	if (ret < 0) {
+		pr_err("Error reading register %d. err:%d\n", reg_offset, ret);
+		return ret;
+	}
+	pr_debug("wrote 0x%02x to 0x%04x\n", data, base + reg_offset);
+
+	return ret;
+}
+
+static int bcl_write_register(int16_t reg_offset, uint8_t data)
+{
+	return bcl_write_general_register(reg_offset,
+			bcl_perph->fg_bcl_addr, data);
+}
+
+static void convert_vbat_to_adc_val(int *val)
+{
+	*val = (*val * 1000) / BCL_VBAT_SCALING;
+}
+
+static void convert_adc_to_vbat_val(int *val)
+{
+	*val = *val * BCL_VBAT_SCALING / 1000;
+}
+
+static void convert_ibat_to_adc_val(int *val)
+{
+	*val = *val / BCL_IBAT_SCALING;
+}
+
+static void convert_adc_to_ibat_val(int *val)
+{
+	*val = *val * BCL_IBAT_SCALING;
+}
+
+static int bcl_set_ibat(void *data, int low, int high)
+{
+	int ret = 0, ibat_ua, thresh_value;
+	int8_t val = 0;
+	int16_t addr;
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	thresh_value = high;
+	if (bat_data->trip_temp == thresh_value)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+	if (bat_data->irq_num && bat_data->irq_enabled) {
+		disable_irq_nosync(bat_data->irq_num);
+		bat_data->irq_enabled = false;
+	}
+	if (thresh_value == INT_MAX) {
+		bat_data->trip_temp = thresh_value;
+		goto set_trip_exit;
+	}
+
+	ibat_ua = thresh_value;
+	convert_ibat_to_adc_val(&thresh_value);
+	val = (int8_t)thresh_value;
+	if (&bcl_perph->param[BCL_HIGH_IBAT] == bat_data) {
+		addr = BCL_IBAT_HIGH;
+		pr_debug("ibat high threshold:%d mA ADC:0x%02x\n",
+				ibat_ua, val);
+	} else if (&bcl_perph->param[BCL_VHIGH_IBAT] == bat_data) {
+		addr = BCL_IBAT_TOO_HIGH;
+		pr_debug("ibat too high threshold:%d mA ADC:0x%02x\n",
+				ibat_ua, val);
+	} else {
+		goto set_trip_exit;
+	}
+	ret = bcl_write_register(addr, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto set_trip_exit;
+	}
+	bat_data->trip_temp = ibat_ua;
+
+	if (bat_data->irq_num && !bat_data->irq_enabled) {
+		enable_irq(bat_data->irq_num);
+		bat_data->irq_enabled = true;
+	}
+
+set_trip_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+
+	return ret;
+}
+
+static int bcl_set_vbat(void *data, int low, int high)
+{
+	int ret = 0, vbat_uv, vbat_idx, thresh_value;
+	int8_t val = 0;
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+	uint16_t addr;
+
+	thresh_value = low;
+	if (bat_data->trip_temp == thresh_value)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+
+	if (bat_data->irq_num && bat_data->irq_enabled) {
+		disable_irq_nosync(bat_data->irq_num);
+		bat_data->irq_enabled = false;
+	}
+	if (thresh_value == INT_MIN) {
+		bat_data->trip_temp = thresh_value;
+		goto set_trip_exit;
+	}
+	vbat_uv = thresh_value;
+	convert_vbat_to_adc_val(&thresh_value);
+	val = (int8_t)thresh_value;
+	/*
+	 * very low and critical low trip can support only standard
+	 * trip thresholds
+	 */
+	if (&bcl_perph->param[BCL_LOW_VBAT] == bat_data) {
+		addr = BCL_VBAT_ADC_LOW;
+		pr_debug("vbat low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else if (&bcl_perph->param[BCL_VLOW_VBAT] == bat_data) {
+		/*
+		 * Scan the standard voltage table, sorted in ascending order
+		 * and find the closest threshold that is lower or equal to
+		 * the requested value. Passive trip supports thresholds
+		 * indexed from 1...BCL_STD_VBAT_NR in the voltage table.
+		 */
+		for (vbat_idx = 2; vbat_idx < BCL_STD_VBAT_NR;
+			vbat_idx++) {
+			if (vbat_uv >= vbat_low[vbat_idx])
+				continue;
+			break;
+		}
+		addr = BCL_VBAT_COMP_LOW;
+		val = vbat_idx - 2;
+		vbat_uv = vbat_low[vbat_idx - 1];
+		pr_debug("vbat too low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else if (&bcl_perph->param[BCL_CLOW_VBAT] == bat_data) {
+		/* Hot trip supports thresholds indexed from
+		 * 0...BCL_STD_VBAT_NR-1 in the voltage table.
+		 */
+		for (vbat_idx = 1; vbat_idx < (BCL_STD_VBAT_NR - 1);
+			vbat_idx++) {
+			if (vbat_uv >= vbat_low[vbat_idx])
+				continue;
+			break;
+		}
+		addr = BCL_VBAT_COMP_TLOW;
+		val = vbat_idx - 1;
+		vbat_uv = vbat_low[vbat_idx - 1];
+		pr_debug("vbat critic low threshold:%d mv ADC:0x%02x\n",
+				vbat_uv, val);
+	} else {
+		goto set_trip_exit;
+	}
+
+	ret = bcl_write_register(addr, val);
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto set_trip_exit;
+	}
+	bat_data->trip_temp = vbat_uv;
+	if (bat_data->irq_num && !bat_data->irq_enabled) {
+		enable_irq(bat_data->irq_num);
+		bat_data->irq_enabled = true;
+	}
+
+set_trip_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+	return ret;
+}
+
+static int bcl_clear_vbat_min(void)
+{
+	int ret  = 0;
+
+	ret = bcl_write_register(BCL_MAX_MIN_CLR,
+			BIT(BCL_VBAT_MIN_CLR));
+	if (ret)
+		pr_err("Error in clearing vbat min reg. err:%d\n", ret);
+
+	return ret;
+}
+
+static int bcl_clear_ibat_max(void)
+{
+	int ret  = 0;
+
+	ret = bcl_write_register(BCL_MAX_MIN_CLR,
+			BIT(BCL_IBAT_MAX_CLR));
+	if (ret)
+		pr_err("Error in clearing ibat max reg. err:%d\n", ret);
+
+	return ret;
+}
+
+static int bcl_read_ibat(void *data, int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(BCL_IBAT_MAX, val,
+					VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		*adc_value = bat_data->last_val;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	if (*adc_value == 0) {
+		/*
+		 * The sensor sometime can read a value 0 if there is
+		 * consequtive reads
+		 */
+		*adc_value = bat_data->last_val;
+	} else {
+		convert_adc_to_ibat_val(adc_value);
+		bat_data->last_val = *adc_value;
+	}
+	pr_debug("ibat:%d mA\n", bat_data->last_val);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_ibat_and_clear(void *data, int *adc_value)
+{
+	int ret = 0;
+
+	ret = bcl_read_ibat(data, adc_value);
+	if (ret)
+		return ret;
+	return bcl_clear_ibat_max();
+}
+
+static int bcl_read_vbat(void *data, int *adc_value)
+{
+	int ret = 0, timeout = 0;
+	int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	do {
+		ret = bcl_read_multi_register(BCL_VBAT_MIN, val,
+			VAL_CP_REG_BUF_LEN);
+		if (ret) {
+			pr_err("BCL register read error. err:%d\n", ret);
+			goto bcl_read_exit;
+		}
+	} while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+		&& timeout++ < BCL_READ_RETRY_LIMIT);
+	if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+		ret = -ENODEV;
+		goto bcl_read_exit;
+	}
+	*adc_value = (int)val[VAL_REG_BUF_OFFSET];
+	if (*adc_value == BCL_VBAT_NO_READING) {
+		*adc_value = bat_data->last_val;
+	} else {
+		convert_adc_to_vbat_val(adc_value);
+		bat_data->last_val = *adc_value;
+	}
+	pr_debug("vbat:%d mv\n", bat_data->last_val);
+
+bcl_read_exit:
+	return ret;
+}
+
+static int bcl_read_vbat_and_clear(void *data, int *adc_value)
+{
+	int ret;
+
+	ret = bcl_read_vbat(data, adc_value);
+	if (ret)
+		return ret;
+	return bcl_clear_vbat_min();
+}
+
+static irqreturn_t bcl_handle_ibat(int irq, void *data)
+{
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled) {
+		WARN_ON(1);
+		disable_irq_nosync(irq);
+		perph_data->irq_enabled = false;
+		goto exit_intr;
+	}
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return IRQ_HANDLED;
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bcl_handle_vbat(int irq, void *data)
+{
+	struct bcl_peripheral_data *perph_data =
+		(struct bcl_peripheral_data *)data;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled) {
+		WARN_ON(1);
+		disable_irq_nosync(irq);
+		perph_data->irq_enabled = false;
+		goto exit_intr;
+	}
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return IRQ_HANDLED;
+
+exit_intr:
+	mutex_unlock(&perph_data->state_trans_lock);
+	return IRQ_HANDLED;
+}
+
+static int bcl_get_devicetree_data(struct platform_device *pdev)
+{
+	int ret = 0;
+	const __be32 *prop = NULL;
+	struct device_node *dev_node = pdev->dev.of_node;
+
+	prop = of_get_address(dev_node, 0, NULL, NULL);
+	if (prop) {
+		bcl_perph->fg_bcl_addr = be32_to_cpu(*prop);
+		pr_debug("fg_user_adc@%04x\n", bcl_perph->fg_bcl_addr);
+	} else {
+		dev_err(&pdev->dev, "No fg_user_adc registers found\n");
+		return -ENODEV;
+	}
+
+	prop = of_get_address(dev_node, 1, NULL, NULL);
+	if (prop) {
+		bcl_perph->fg_lmh_addr = be32_to_cpu(*prop);
+		pr_debug("fg_lmh@%04x\n", bcl_perph->fg_lmh_addr);
+	} else {
+		dev_err(&pdev->dev, "No fg_lmh registers found\n");
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
+static int bcl_set_soc(void *data, int low, int high)
+{
+	struct bcl_peripheral_data *bat_data =
+		(struct bcl_peripheral_data *)data;
+
+	if (low == bat_data->trip_temp)
+		return 0;
+
+	mutex_lock(&bat_data->state_trans_lock);
+	pr_debug("low soc threshold:%d\n", low);
+	bat_data->trip_temp = low;
+	if (low == INT_MIN) {
+		bat_data->irq_enabled = false;
+		goto unlock_and_exit;
+	}
+	bat_data->irq_enabled = true;
+	schedule_work(&bcl_perph->soc_eval_work);
+
+unlock_and_exit:
+	mutex_unlock(&bat_data->state_trans_lock);
+	return 0;
+}
+
+static int bcl_read_soc(void *data, int *val)
+{
+	static struct power_supply *batt_psy;
+	union power_supply_propval ret = {0,};
+	int err = 0;
+
+	*val = 100;
+	if (!batt_psy)
+		batt_psy = power_supply_get_by_name("battery");
+	if (batt_psy) {
+		err = power_supply_get_property(batt_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		if (err) {
+			pr_err("battery percentage read error:%d\n",
+				err);
+			return err;
+		}
+		*val = ret.intval;
+	}
+	pr_debug("soc:%d\n", *val);
+
+	return err;
+}
+
+static void bcl_evaluate_soc(struct work_struct *work)
+{
+	int battery_percentage;
+	struct bcl_peripheral_data *perph_data =
+		&bcl_perph->param[BCL_SOC_MONITOR];
+
+	if (bcl_read_soc((void *)perph_data, &battery_percentage))
+		return;
+
+	mutex_lock(&perph_data->state_trans_lock);
+	if (!perph_data->irq_enabled)
+		goto eval_exit;
+	if (battery_percentage > perph_data->trip_temp)
+		goto eval_exit;
+
+	perph_data->trip_val = battery_percentage;
+	mutex_unlock(&perph_data->state_trans_lock);
+	of_thermal_handle_trip(perph_data->tz_dev);
+
+	return;
+eval_exit:
+	mutex_unlock(&perph_data->state_trans_lock);
+}
+
+static int battery_supply_callback(struct notifier_block *nb,
+			unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+
+	if (strcmp(psy->desc->name, "battery"))
+		return NOTIFY_OK;
+	schedule_work(&bcl_perph->soc_eval_work);
+
+	return NOTIFY_OK;
+}
+
+static void bcl_fetch_trip(struct platform_device *pdev, const char *int_name,
+		struct bcl_peripheral_data *data,
+		irqreturn_t (*handle)(int, void *))
+{
+	int ret = 0, irq_num = 0;
+
+	/*
+	 * Allow flexibility for the HLOS to set the trip temperature for
+	 * all the thresholds but handle the interrupt for only one vbat
+	 * and ibat interrupt. The LMH-DCVSh will handle and mitigate for the
+	 * rest of the ibat/vbat interrupts.
+	 */
+	if (!handle) {
+		mutex_lock(&data->state_trans_lock);
+		data->irq_num = 0;
+		data->irq_enabled = false;
+		mutex_unlock(&data->state_trans_lock);
+		return;
+	}
+
+	irq_num = platform_get_irq_byname(pdev, int_name);
+	if (irq_num) {
+		mutex_lock(&data->state_trans_lock);
+		ret = devm_request_threaded_irq(&pdev->dev,
+				irq_num, NULL, handle,
+				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				int_name, data);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Error requesting trip irq. err:%d\n",
+				ret);
+			mutex_unlock(&data->state_trans_lock);
+			return;
+		}
+		disable_irq_nosync(irq_num);
+		data->irq_num = irq_num;
+		data->irq_enabled = false;
+		mutex_unlock(&data->state_trans_lock);
+	}
+}
+
+static void bcl_probe_soc(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct bcl_peripheral_data *soc_data;
+
+	soc_data = &bcl_perph->param[BCL_SOC_MONITOR];
+	mutex_init(&soc_data->state_trans_lock);
+	soc_data->ops.get_temp = bcl_read_soc;
+	soc_data->ops.set_trips = bcl_set_soc;
+	INIT_WORK(&bcl_perph->soc_eval_work, bcl_evaluate_soc);
+	bcl_perph->psy_nb.notifier_call = battery_supply_callback;
+	ret = power_supply_reg_notifier(&bcl_perph->psy_nb);
+	if (ret < 0) {
+		pr_err("Unable to register soc notifier. err:%d\n", ret);
+		return;
+	}
+	soc_data->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				BCL_SOC_MONITOR, soc_data, &soc_data->ops);
+	if (IS_ERR(soc_data->tz_dev)) {
+		pr_err("vbat register failed. err:%ld\n",
+				PTR_ERR(soc_data->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(soc_data->tz_dev, THERMAL_DEVICE_UP);
+	schedule_work(&bcl_perph->soc_eval_work);
+}
+
+static void bcl_vbat_init(struct platform_device *pdev,
+		struct bcl_peripheral_data *vbat, enum bcl_dev_type type)
+{
+	mutex_init(&vbat->state_trans_lock);
+	switch (type) {
+	case BCL_LOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_VBAT_INT, vbat, bcl_handle_vbat);
+		break;
+	case BCL_VLOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_VLOW_VBAT_INT, vbat, NULL);
+		break;
+	case BCL_CLOW_VBAT:
+		bcl_fetch_trip(pdev, BCL_CLOW_VBAT_INT, vbat, NULL);
+		break;
+	default:
+		return;
+	}
+	vbat->ops.get_temp = bcl_read_vbat_and_clear;
+	vbat->ops.set_trips = bcl_set_vbat;
+	vbat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				type, vbat, &vbat->ops);
+	if (IS_ERR(vbat->tz_dev)) {
+		pr_err("vbat register failed. err:%ld\n",
+				PTR_ERR(vbat->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(vbat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_vbat(struct platform_device *pdev)
+{
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_LOW_VBAT], BCL_LOW_VBAT);
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_VLOW_VBAT], BCL_VLOW_VBAT);
+	bcl_vbat_init(pdev, &bcl_perph->param[BCL_CLOW_VBAT], BCL_CLOW_VBAT);
+}
+
+static void bcl_ibat_init(struct platform_device *pdev,
+		struct bcl_peripheral_data *ibat, enum bcl_dev_type type)
+{
+	mutex_init(&ibat->state_trans_lock);
+	if (type == BCL_HIGH_IBAT)
+		bcl_fetch_trip(pdev, BCL_IBAT_INT, ibat, bcl_handle_ibat);
+	else
+		bcl_fetch_trip(pdev, BCL_VHIGH_IBAT_INT, ibat, NULL);
+	ibat->ops.get_temp = bcl_read_ibat_and_clear;
+	ibat->ops.set_trips = bcl_set_ibat;
+	ibat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+				type, ibat, &ibat->ops);
+	if (IS_ERR(ibat->tz_dev)) {
+		pr_err("ibat register failed. err:%ld\n",
+				PTR_ERR(ibat->tz_dev));
+		return;
+	}
+	thermal_zone_device_update(ibat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_ibat(struct platform_device *pdev)
+{
+	bcl_ibat_init(pdev, &bcl_perph->param[BCL_HIGH_IBAT], BCL_HIGH_IBAT);
+	bcl_ibat_init(pdev, &bcl_perph->param[BCL_VHIGH_IBAT], BCL_VHIGH_IBAT);
+}
+
+static void bcl_configure_lmh_peripheral(void)
+{
+	bcl_write_register(BCL_LMH_CFG, BCL_LMH_CFG_VAL);
+	bcl_write_register(BCL_CFG, BCL_CFG_VAL);
+	bcl_write_general_register(LMH_INT_POL_HIGH,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+	bcl_write_general_register(LMH_INT_EN,
+			bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+}
+
+static int bcl_remove(struct platform_device *pdev)
+{
+	int i = 0;
+
+	for (; i < BCL_TYPE_MAX; i++) {
+		if (!bcl_perph->param[i].tz_dev)
+			continue;
+		if (i == BCL_SOC_MONITOR) {
+			power_supply_unreg_notifier(&bcl_perph->psy_nb);
+			flush_work(&bcl_perph->soc_eval_work);
+		}
+		thermal_zone_of_sensor_unregister(&pdev->dev,
+				bcl_perph->param[i].tz_dev);
+	}
+	bcl_perph = NULL;
+
+	return 0;
+}
+
+static int bcl_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	bcl_perph = devm_kzalloc(&pdev->dev, sizeof(*bcl_perph), GFP_KERNEL);
+	if (!bcl_perph)
+		return -ENOMEM;
+
+	bcl_perph->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!bcl_perph->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	bcl_get_devicetree_data(pdev);
+	bcl_probe_ibat(pdev);
+	bcl_probe_vbat(pdev);
+	bcl_probe_soc(pdev);
+	bcl_configure_lmh_peripheral();
+
+	dev_set_drvdata(&pdev->dev, bcl_perph);
+	ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
+	if (ret) {
+		pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+		goto bcl_probe_exit;
+	}
+
+	return 0;
+
+bcl_probe_exit:
+	bcl_remove(pdev);
+	return ret;
+}
+
+static const struct of_device_id bcl_match[] = {
+	{
+		.compatible = "qcom,msm-bcl-lmh",
+	},
+	{},
+};
+
+static struct platform_driver bcl_driver = {
+	.probe  = bcl_probe,
+	.remove = bcl_remove,
+	.driver = {
+		.name           = BCL_DRIVER_NAME,
+		.owner          = THIS_MODULE,
+		.of_match_table = bcl_match,
+	},
+};
+
+builtin_platform_driver(bcl_driver);
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 95155ac..7c9e53c 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
@@ -35,9 +35,16 @@
 #include <trace/events/lmh.h>
 
 #define LIMITS_DCVSH			0x10
+#define LIMITS_PROFILE_CHANGE		0x01
 #define LIMITS_NODE_DCVS		0x44435653
 
 #define LIMITS_SUB_FN_THERMAL		0x54484D4C
+#define LIMITS_SUB_FN_CRNT		0x43524E54
+#define LIMITS_SUB_FN_REL		0x52454C00
+#define LIMITS_SUB_FN_BCL		0x42434C00
+
+#define LIMITS_ALGO_MODE_ENABLE		0x454E424C
+
 #define LIMITS_HI_THRESHOLD		0x48494748
 #define LIMITS_LOW_THRESHOLD		0x4C4F5700
 #define LIMITS_ARM_THRESHOLD		0x41524D00
@@ -337,6 +344,23 @@ static struct limits_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
 	return NULL;
 }
 
+static int enable_lmh(void)
+{
+	int ret = 0;
+	struct scm_desc desc_arg;
+
+	desc_arg.args[0] = 1;
+	desc_arg.arginfo = SCM_ARGS(1, SCM_VAL);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_PROFILE_CHANGE),
+			&desc_arg);
+	if (ret) {
+		pr_err("Error switching profile:[1]. err:%d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
 static int lmh_set_max_limit(int cpu, u32 freq)
 {
 	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
@@ -591,6 +615,45 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	/* Check legcay LMH HW enablement is needed or not */
+	if (of_property_read_bool(dn, "qcom,legacy-lmh-enable")) {
+		/* Enable the thermal algorithm early */
+		ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+			 LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
+		if (ret) {
+			pr_err("Unable to enable THERM algo for cluster%d\n",
+				affinity);
+			return ret;
+		}
+		/* Enable the LMH outer loop algorithm */
+		ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_CRNT,
+			 LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
+		if (ret) {
+			pr_err("Unable to enable CRNT algo for cluster%d\n",
+				affinity);
+			return ret;
+		}
+		/* Enable the Reliability algorithm */
+		ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_REL,
+			 LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
+		if (ret) {
+			pr_err("Unable to enable REL algo for cluster%d\n",
+				affinity);
+			return ret;
+		}
+		/* Enable the BCL algorithm */
+		ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_BCL,
+			 LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
+		if (ret) {
+			pr_err("Unable to enable BCL algo for cluster%d\n",
+				affinity);
+			return ret;
+		}
+		ret = enable_lmh();
+		if (ret)
+			return ret;
+	}
+
 	no_cdev_register = of_property_read_bool(dn,
 				"qcom,no-cooling-device-register");
 
@@ -631,11 +694,13 @@ static int limits_dcvs_probe(struct platform_device *pdev)
 			return PTR_ERR(tzdev);
 	}
 
-	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
-	if (!hw->min_freq_reg) {
-		pr_err("min frequency enable register remap failed\n");
-		ret = -ENOMEM;
-		goto unregister_sensor;
+	if (!no_cdev_register) {
+		hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+		if (!hw->min_freq_reg) {
+			pr_err("min frequency enable register remap failed\n");
+			ret = -ENOMEM;
+			goto unregister_sensor;
+		}
 	}
 
 	mutex_init(&hw->access_lock);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index eb06819..040666e 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -172,8 +172,7 @@ struct msm_geni_serial_port {
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx,
-			unsigned long *flags);
+			bool drop_rx);
 	struct device *wrapper_dev;
 	struct se_geni_rsc serial_rsc;
 	dma_addr_t tx_dma;
@@ -213,12 +212,12 @@ static int handle_rx_console(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx, unsigned long *flags);
+			bool drop_rx);
 static int handle_rx_hs(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx, unsigned long *flags);
+			bool drop_rx);
 static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
 static int msm_geni_serial_power_on(struct uart_port *uport);
 static void msm_geni_serial_power_off(struct uart_port *uport);
@@ -238,7 +237,7 @@ static int uart_line_id;
 static struct msm_geni_serial_port msm_geni_console_port;
 static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
 static void msm_geni_serial_handle_isr(struct uart_port *uport,
-				unsigned long *flags);
+				unsigned long *flags, bool is_irq_masked);
 
 /*
  * The below API is required to check if uport->lock (spinlock)
@@ -351,7 +350,6 @@ bool geni_wait_for_cmd_done(struct uart_port *uport, bool is_irq_masked)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	unsigned long timeout = POLL_ITERATIONS;
-	unsigned long ret;
 	unsigned long flags = 0;
 
 	/*
@@ -366,13 +364,13 @@ bool geni_wait_for_cmd_done(struct uart_port *uport, bool is_irq_masked)
 		 */
 		if (msm_port->m_cmd) {
 			while (!msm_port->m_cmd_done && timeout > 0) {
-				msm_geni_serial_handle_isr(uport, &flags);
+				msm_geni_serial_handle_isr(uport, &flags, true);
 				timeout--;
 				udelay(100);
 			}
 		} else if (msm_port->s_cmd) {
 			while (!msm_port->s_cmd_done && timeout > 0) {
-				msm_geni_serial_handle_isr(uport, &flags);
+				msm_geni_serial_handle_isr(uport, &flags, true);
 				timeout--;
 				udelay(100);
 			}
@@ -380,16 +378,16 @@ bool geni_wait_for_cmd_done(struct uart_port *uport, bool is_irq_masked)
 	} else {
 		/* Waiting for 10 milli second for interrupt to be fired */
 		if (msm_port->m_cmd)
-			ret = wait_for_completion_timeout
+			timeout = wait_for_completion_timeout
 					(&msm_port->m_cmd_timeout,
 				msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC));
 		else if (msm_port->s_cmd)
-			ret = wait_for_completion_timeout
+			timeout = wait_for_completion_timeout
 					(&msm_port->s_cmd_timeout,
 				msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC));
 	}
 
-	return ret ? 0 : 1;
+	return timeout ? 0 : 1;
 }
 
 static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
@@ -1033,13 +1031,12 @@ static int handle_rx_console(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx, unsigned long *flags)
+			bool drop_rx)
 {
 	int i, c;
 	unsigned char *rx_char;
 	struct tty_port *tport;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-	bool locked;
 
 	tport = &uport->state->port;
 	for (i = 0; i < rx_fifo_wc; i++) {
@@ -1065,23 +1062,6 @@ static int handle_rx_console(struct uart_port *uport,
 				tty_insert_flip_char(tport, rx_char[c], flag);
 		}
 	}
-	if (!drop_rx) {
-		/*
-		 * Driver acquiring port->lock in isr function and calling
-		 * tty_flip_buffer_push() which in turn will wait for
-		 * another lock from framework __queue_work function.
-		 * release the port lock before calling tty_flip_buffer_push()
-		 * to avoid deadlock scenarios.
-		 */
-		locked = msm_geni_serial_spinlocked(uport);
-		if (locked) {
-			spin_unlock_irqrestore(&uport->lock, *flags);
-			tty_flip_buffer_push(tport);
-			spin_lock_irqsave(&uport->lock, *flags);
-		} else {
-			tty_flip_buffer_push(tport);
-		}
-	}
 	return 0;
 }
 #else
@@ -1089,7 +1069,7 @@ static int handle_rx_console(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx, unsigned long *flags)
+			bool drop_rx)
 {
 	return -EPERM;
 }
@@ -1520,7 +1500,7 @@ static void stop_rx_sequencer(struct uart_port *uport)
 			    "%s cancel failed is_rx_active:%d 0x%x\n",
 			    __func__, is_rx_active, geni_status);
 		if (uart_console(uport) && !is_rx_active) {
-			msm_geni_serial_handle_isr(uport, &flags);
+			msm_geni_serial_handle_isr(uport, &flags, true);
 			goto exit_rx_seq;
 		}
 		port->s_cmd_done = false;
@@ -1568,7 +1548,7 @@ static int handle_rx_hs(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx, unsigned long *flags)
+			bool drop_rx)
 {
 	unsigned char *rx_char;
 	struct tty_port *tport;
@@ -1600,8 +1580,7 @@ static int handle_rx_hs(struct uart_port *uport,
 	return ret;
 }
 
-static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx,
-				     unsigned long *flags)
+static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx)
 {
 	int ret = 0;
 	unsigned int rx_fifo_status;
@@ -1620,7 +1599,7 @@ static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx,
 	rx_last = rx_fifo_status & RX_LAST;
 	if (rx_fifo_wc)
 		ret = port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
-						rx_last, drop_rx, flags);
+						rx_last, drop_rx);
 	return ret;
 }
 
@@ -1803,7 +1782,8 @@ static int msm_geni_serial_handle_dma_tx(struct uart_port *uport)
 }
 
 static void msm_geni_serial_handle_isr(struct uart_port *uport,
-				       unsigned long *flags)
+				       unsigned long *flags,
+				       bool is_irq_masked)
 {
 	unsigned int m_irq_status;
 	unsigned int s_irq_status;
@@ -1889,8 +1869,16 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport,
 		}
 
 		if (s_irq_status & (S_RX_FIFO_WATERMARK_EN |
-							S_RX_FIFO_LAST_EN))
-			msm_geni_serial_handle_rx(uport, drop_rx, flags);
+							S_RX_FIFO_LAST_EN)) {
+			msm_geni_serial_handle_rx(uport, drop_rx);
+			if (!drop_rx && !is_irq_masked) {
+				spin_unlock_irqrestore(&uport->lock, *flags);
+				tty_flip_buffer_push(tport);
+				spin_lock_irqsave(&uport->lock, *flags);
+			} else if (!drop_rx) {
+				tty_flip_buffer_push(tport);
+			}
+		}
 	} else {
 		dma_tx_status = geni_read_reg_nolog(uport->membase,
 							SE_DMA_TX_IRQ_STAT);
@@ -1984,7 +1972,7 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
 	unsigned long flags;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	msm_geni_serial_handle_isr(uport, &flags);
+	msm_geni_serial_handle_isr(uport, &flags, false);
 	spin_unlock_irqrestore(&uport->lock, flags);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 3a5c417..ea90c04 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1569,6 +1569,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 		ret = gsi_get_xfer_index(ep);
 		break;
 	case GSI_EP_OP_STORE_DBL_INFO:
+		if (!dwc->pullups_connected) {
+			dbg_log_string("No Pullup\n");
+			return -ESHUTDOWN;
+		}
+
 		request = (struct usb_gsi_request *)op_data;
 		gsi_store_ringbase_dbl_info(ep, request);
 		break;
@@ -3316,8 +3321,12 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
 		mdwc->vbus_active = event;
 	}
 
+	/*
+	 * Drive a pulse on DP to ensure proper CDP detection
+	 * and only when the vbus connect event is a valid one.
+	 */
 	if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_CDP &&
-			mdwc->vbus_active) {
+			mdwc->vbus_active && !mdwc->check_eud_state) {
 		dev_dbg(mdwc->dev, "Connected to CDP, pull DP up\n");
 		usb_phy_drive_dp_pulse(mdwc->hs_phy, DP_PULSE_WIDTH_MSEC);
 	}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 32a7bd8..3778784 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -753,12 +753,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
 		reg |= DWC3_DALEPENA_EP(dep->number);
 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
+		dep->trb_dequeue = 0;
+		dep->trb_enqueue = 0;
+
 		if (usb_endpoint_xfer_control(desc))
 			goto out;
 
 		/* Initialize the TRB ring */
-		dep->trb_dequeue = 0;
-		dep->trb_enqueue = 0;
 		memset(dep->trb_pool, 0,
 		       sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
 
@@ -2516,6 +2517,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 
 	/* begin to receive SETUP packets */
 	dwc->ep0state = EP0_SETUP_PHASE;
+	dwc->ep0_bounced = false;
 	dwc->link_state = DWC3_LINK_STATE_SS_DIS;
 	dwc3_ep0_out_start(dwc);
 
@@ -3377,6 +3379,10 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 			dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
 		else
 			dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+		dwc->eps[0]->trb_enqueue = 0;
+		dwc->eps[1]->trb_enqueue = 0;
+
 		dwc3_ep0_stall_and_restart(dwc);
 	}
 
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 616558e..57a67c1 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -139,6 +139,7 @@ struct f_cdev_opts {
 	struct f_cdev *port;
 	char *func_name;
 	u8 port_num;
+	u8 proto;
 };
 
 static int major, minors;
@@ -161,8 +162,8 @@ static struct usb_interface_descriptor cser_interface_desc = {
 	/* .bInterfaceNumber = DYNAMIC */
 	.bNumEndpoints =	3,
 	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
-	.bInterfaceSubClass =	0,
-	.bInterfaceProtocol =	0,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	/* .bInterfaceProtocol = DYNAMIC */
 	/* .iInterface = DYNAMIC */
 };
 
@@ -778,6 +779,8 @@ static int usb_cser_bind(struct usb_configuration *c, struct usb_function *f)
 	struct f_cdev *port = func_to_port(f);
 	int status;
 	struct usb_ep *ep;
+	struct f_cdev_opts *opts =
+			container_of(f->fi, struct f_cdev_opts, func_inst);
 
 	if (cser_string_defs[0].id == 0) {
 		status = usb_string_id(c->cdev);
@@ -791,6 +794,7 @@ static int usb_cser_bind(struct usb_configuration *c, struct usb_function *f)
 		goto fail;
 	port->port_usb.data_id = status;
 	cser_interface_desc.bInterfaceNumber = status;
+	cser_interface_desc.bInterfaceProtocol = opts->proto;
 
 	status = -ENODEV;
 	ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_in_desc);
@@ -2005,6 +2009,9 @@ static int cser_set_inst_name(struct usb_function_instance *f, const char *name)
 		port->port_usb.send_modem_ctrl_bits = dun_cser_send_ctrl_bits;
 		port->port_usb.disconnect = dun_cser_disconnect;
 		port->port_usb.send_break = dun_cser_send_break;
+		opts->proto = 0x40;
+	} else {
+		opts->proto = 0x60;
 	}
 
 	return 0;
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
index 10f165a..ffd427f 100644
--- a/drivers/usb/gadget/function/f_diag.c
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -3,7 +3,7 @@
  * Diag Function Device - Route ARM9 and ARM11 DIAG messages
  * between HOST and DEVICE.
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
  * Author: Brian Swetland <swetland@google.com>
  */
 #include <linux/init.h>
@@ -60,8 +60,8 @@ static struct usb_interface_descriptor intf_desc = {
 	.bLength            =	sizeof(intf_desc),
 	.bDescriptorType    =	USB_DT_INTERFACE,
 	.bNumEndpoints      =	2,
-	.bInterfaceClass    =	0xFF,
-	.bInterfaceSubClass =	0xFF,
+	.bInterfaceClass    =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
 	.bInterfaceProtocol =	0x30,
 };
 
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 7ed9cf7..511d46d 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3449,8 +3449,10 @@ static int ffs_func_set_alt(struct usb_function *f,
 			return intf;
 	}
 
-	if (ffs->func)
+	if (ffs->func) {
 		ffs_func_eps_disable(ffs->func);
+		ffs->func = NULL;
+	}
 
 	if (ffs->state == FFS_DEACTIVATED) {
 		ffs->state = FFS_CLOSING;
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index ae698bf..4aaee1d 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _F_GSI_H
@@ -344,8 +344,8 @@ static struct usb_interface_descriptor rmnet_gsi_interface_desc = {
 	.bDescriptorType =	USB_DT_INTERFACE,
 	.bNumEndpoints =	3,
 	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
-	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
-	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	0x50,
 	/* .iInterface = DYNAMIC */
 };
 
@@ -1355,9 +1355,9 @@ static struct usb_interface_descriptor qdss_gsi_data_intf_desc = {
 	.bDescriptorType    =	USB_DT_INTERFACE,
 	.bAlternateSetting  =   0,
 	.bNumEndpoints      =	1,
-	.bInterfaceClass    =	0xff,
-	.bInterfaceSubClass =	0xff,
-	.bInterfaceProtocol =	0xff,
+	.bInterfaceClass    =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	0x80,
 };
 
 static struct usb_endpoint_descriptor qdss_gsi_fs_data_desc = {
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 19a6511..80ff811 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1128,18 +1128,15 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
 	 * in kernel context, which is necessary for vfs_read and
 	 * vfs_write to use our buffers in the kernel address space.
 	 */
-	dev->xfer_result = 0;
-	if (dev->xfer_file_length) {
-		queue_work(dev->wq, work);
-		/* wait for operation to complete */
-		flush_workqueue(dev->wq);
-
-		/* read the result */
-		smp_rmb();
-	}
-	ret = dev->xfer_result;
+	queue_work(dev->wq, work);
+	/* wait for operation to complete */
+	flush_workqueue(dev->wq);
 	fput(filp);
 
+	/* read the result */
+	smp_rmb();
+	ret = dev->xfer_result;
+
 fail:
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED)
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index e5c179b..505ac43 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -22,9 +22,9 @@ static struct usb_interface_descriptor qdss_data_intf_desc = {
 	.bDescriptorType    =	USB_DT_INTERFACE,
 	.bAlternateSetting  =   0,
 	.bNumEndpoints      =	1,
-	.bInterfaceClass    =	0xff,
-	.bInterfaceSubClass =	0xff,
-	.bInterfaceProtocol =	0xff,
+	.bInterfaceClass    =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	0x70,
 };
 
 static struct usb_endpoint_descriptor qdss_hs_data_desc = {
@@ -56,9 +56,9 @@ static struct usb_interface_descriptor qdss_ctrl_intf_desc = {
 	.bDescriptorType    =	USB_DT_INTERFACE,
 	.bAlternateSetting  =   0,
 	.bNumEndpoints      =	2,
-	.bInterfaceClass    =	0xff,
-	.bInterfaceSubClass =	0xff,
-	.bInterfaceProtocol =	0xff,
+	.bInterfaceClass    =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	0x70,
 };
 
 static struct usb_endpoint_descriptor qdss_hs_ctrl_in_desc = {
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index c860f30..0f63755 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -46,7 +46,7 @@ static struct usb_interface_descriptor gser_interface_desc = {
 	.bNumEndpoints =	2,
 	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
 	.bInterfaceSubClass =	0,
-	.bInterfaceProtocol =	0,
+	.bInterfaceProtocol =	0x40,
 	/* .iInterface = DYNAMIC */
 };
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index a86bac4..6692263 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -248,7 +248,7 @@ static void *usbpd_ipc_log;
 #define PD_MAX_DATA_OBJ		7
 
 #define PD_SRC_CAP_EXT_DB_LEN	24
-#define PD_STATUS_DB_LEN	5
+#define PD_STATUS_DB_LEN	6
 #define PD_BATTERY_CAP_DB_LEN	9
 
 #define PD_MAX_EXT_MSG_LEN		260
@@ -1377,10 +1377,12 @@ int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
 		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
 		const u32 *vdos, int num_vdos)
 {
-	u32 svdm_hdr = SVDM_HDR(svid, 0, obj_pos, cmd_type, cmd);
+	u32 svdm_hdr = SVDM_HDR(svid, pd->spec_rev == USBPD_REV_30 ? 1 : 0,
+			obj_pos, cmd_type, cmd);
 
-	usbpd_dbg(&pd->dev, "VDM tx: svid:%x cmd:%x cmd_type:%x svdm_hdr:%x\n",
-			svid, cmd, cmd_type, svdm_hdr);
+	usbpd_dbg(&pd->dev, "VDM tx: svid:%04x ver:%d obj_pos:%d cmd:%x cmd_type:%x svdm_hdr:%x\n",
+			svid, pd->spec_rev == USBPD_REV_30 ? 1 : 0, obj_pos,
+			cmd, cmd_type, svdm_hdr);
 
 	return usbpd_send_vdm(pd, svdm_hdr, vdos, num_vdos);
 }
@@ -1547,7 +1549,7 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
 	ktime_t recvd_time = ktime_get();
 
 	usbpd_dbg(&pd->dev,
-			"VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x has_dp: %s\n",
+			"VDM rx: svid:%04x cmd:%x cmd_type:%x vdm_hdr:%x has_dp: %s\n",
 			svid, cmd, cmd_type, vdm_hdr,
 			pd->has_dp ? "true" : "false");
 
@@ -1574,11 +1576,9 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
 		return;
 	}
 
-	if (SVDM_HDR_VER(vdm_hdr) > 1) {
-		usbpd_dbg(&pd->dev, "Discarding SVDM with incorrect version:%d\n",
+	if (SVDM_HDR_VER(vdm_hdr) > 1)
+		usbpd_dbg(&pd->dev, "Received SVDM with unsupported version:%d\n",
 				SVDM_HDR_VER(vdm_hdr));
-		return;
-	}
 
 	if (cmd_type != SVDM_CMD_TYPE_INITIATOR &&
 			pd->current_state != PE_SRC_STARTUP_WAIT_FOR_VDM_RESP)
@@ -2984,13 +2984,15 @@ static bool handle_ext_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
 		complete(&pd->is_ready);
 		break;
 	case MSG_STATUS:
-		if (rx_msg->data_len != PD_STATUS_DB_LEN) {
-			usbpd_err(&pd->dev, "Invalid status db\n");
-			break;
-		}
+		if (rx_msg->data_len > PD_STATUS_DB_LEN)
+			usbpd_err(&pd->dev, "Invalid status db length:%d\n",
+					rx_msg->data_len);
+
+		memset(&pd->status_db, 0, sizeof(pd->status_db));
 		memcpy(&pd->status_db, rx_msg->payload,
-			sizeof(pd->status_db));
+			min((size_t)rx_msg->data_len, sizeof(pd->status_db)));
 		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+		complete(&pd->is_ready);
 		break;
 	case MSG_BATTERY_CAPABILITIES:
 		if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
@@ -3985,9 +3987,9 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
 				"explicit" : "implicit");
 	add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
 
-	add_uevent_var(env, "SDB=%02x %02x %02x %02x %02x", pd->status_db[0],
-			pd->status_db[1], pd->status_db[2], pd->status_db[3],
-			pd->status_db[4]);
+	add_uevent_var(env, "SDB=%02x %02x %02x %02x %02x %02x",
+			pd->status_db[0], pd->status_db[1], pd->status_db[2],
+			pd->status_db[3], pd->status_db[4], pd->status_db[5]);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 818c5a0..ea0a061 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -280,6 +280,12 @@ static int msm_hsphy_enable_power(struct msm_hsphy *phy, bool on)
 	if (ret)
 		dev_err(phy->phy.dev, "Unable unconfig VDD:%d\n",
 								ret);
+	/* Return from here based on power_enabled. If it is not set
+	 * then return -EINVAL since either set_voltage or
+	 * regulator_enable failed
+	 */
+	if (!phy->power_enabled)
+		return -EINVAL;
 err_vdd:
 	phy->power_enabled = false;
 	dev_dbg(phy->phy.dev, "HSUSB PHY's regulators are turned OFF.\n");
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 2330ea0..a2e2e52 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1216,19 +1216,21 @@ static int block_operations(struct f2fs_sb_info *sbi)
 		goto retry_flush_quotas;
 	}
 
+retry_flush_nodes:
 	down_write(&sbi->node_write);
 
 	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
 		up_write(&sbi->node_write);
-		up_write(&sbi->node_change);
-		f2fs_unlock_all(sbi);
 		atomic_inc(&sbi->wb_sync_req[NODE]);
 		err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
 		atomic_dec(&sbi->wb_sync_req[NODE]);
-		if (err)
+		if (err) {
+			up_write(&sbi->node_change);
+			f2fs_unlock_all(sbi);
 			goto out;
+		}
 		cond_resched();
-		goto retry_flush_quotas;
+		goto retry_flush_nodes;
 	}
 
 	/*
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 5cd49dd..53a0da2 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -159,6 +159,7 @@
 #define ADC_GPIO6_PU1				0x37
 #define ADC_GPIO7_PU1				0x38
 #define ADC_SBUx_PU1				0x39
+#define ANA_IN					0x1d
 
 /* 100k pull-up2 */
 #define ADC_BAT_THERM_PU2			0x4a
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 4aff0c9..046cc11 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -609,6 +609,14 @@
 #define	MSM_BUS_SLAVE_TLMM_NORTH 731
 #define	MSM_BUS_SLAVE_TLMM_WEST 732
 #define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LPASS_TCM 734
+#define	MSM_BUS_SLAVE_TLMM_CENTER 736
+#define	MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define	MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define	MSM_BUS_SLAVE_CDSP 739
+#define	MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define	MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define	MSM_BUS_SLAVE_CSI_PHY_CFG 742
 #define	MSM_BUS_SLAVE_SERVICE_A1NOC 744
 #define	MSM_BUS_SLAVE_ANOC_PCIE_SNOC 745
 #define	MSM_BUS_SLAVE_SERVICE_A2NOC 746
@@ -701,6 +709,7 @@
 #define	MSM_BUS_SLAVE_AHB2PHY_2 836
 #define	MSM_BUS_SLAVE_HWKM 837
 #define	MSM_BUS_SLAVE_PKA_WRAPPER 838
+#define	MSM_BUS_SLAVE_LAST 839
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index dcabab1..87f1402 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -62,6 +62,7 @@
 #define DIAG_IOCTL_QUERY_MD_PID	41
 #define DIAG_IOCTL_QUERY_PD_FEATUREMASK	42
 #define DIAG_IOCTL_PASSTHRU_CONTROL	43
+#define DIAG_IOCTL_MDM_HDLC_TOGGLE	44
 
 /* PC Tools IDs */
 #define APQ8060_TOOLS_ID	4062
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index aca7592..6d24110 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __LINUX_DMA_MAPPING_FAST_H
@@ -23,9 +23,11 @@ struct dma_fast_smmu_mapping {
 	size_t		 num_4k_pages;
 
 	unsigned int	bitmap_size;
+	/* bitmap has 1s marked only valid mappings */
 	unsigned long	*bitmap;
+	/* clean_bitmap has 1s marked for both valid and stale tlb mappings */
+	unsigned long	*clean_bitmap;
 	unsigned long	next_start;
-	unsigned long	upcoming_stale_bit;
 	bool		have_stale_tlbs;
 
 	dma_addr_t	pgtbl_dma_handle;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 8c48bfa..5c105c9 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -166,6 +166,7 @@ void put_iova_domain(struct iova_domain *iovad);
 struct iova *split_and_remove_iova(struct iova_domain *iovad,
 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+void free_global_cached_iovas(struct iova_domain *iovad);
 #else
 static inline int iova_cache_get(void)
 {
@@ -273,6 +274,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
 					 struct iova_domain *iovad)
 {
 }
+
+static inline void free_global_cached_iovas(struct iova_domain *iovad)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 51a5ec4..46305f4 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -222,6 +222,7 @@ struct reg_write_info {
  * @rddm_size: RAM dump size that host should allocate for debugging purpose
  * @sbl_size: SBL image size
  * @seg_len: BHIe vector size
+ * @img_pre_alloc: allocate rddm and fbc image buffers one time
  * @fbc_image: Points to firmware image buffer
  * @rddm_image: Points to RAM dump buffer
  * @max_chan: Maximum number of channels controller support
@@ -294,6 +295,8 @@ struct mhi_controller {
 	size_t seg_len;
 	u32 session_id;
 	u32 sequence_id;
+
+	bool img_pre_alloc;
 	struct image_info *fbc_image;
 	struct image_info *rddm_image;
 
@@ -607,6 +610,30 @@ void mhi_device_get(struct mhi_device *mhi_dev, int vote);
 int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
 
 /**
+ * mhi_device_get_sync_atomic - Asserts device_wait and moves device to M0
+ * @mhi_dev: Device associated with the channels
+ * @timeout_us: timeout, in micro-seconds
+ *
+ * The device_wake is asserted to keep device in M0 or bring it to M0.
+ * If device is not in M0 state, then this function will wait for device to
+ * move to M0, until @timeout_us elapses.
+ * However, if device's M1 state-change event races with this function
+ * then there is a possiblity of device moving from M0 to M2 and back
+ * to M0. That can't be avoided as host must transition device from M1 to M2
+ * as per the spec.
+ * Clients can ignore that transition after this function returns as the device
+ * is expected to immediately  move from M2 to M0 as wake is asserted and
+ * wouldn't enter low power state.
+ *
+ * Returns:
+ * 0 if operation was successful (however, M0 -> M2 -> M0 is possible later) as
+ * mentioned above.
+ * -ETIMEDOUT is device faled to move to M0 before @timeout_us elapsed
+ * -EIO if the MHI state is one of the ERROR states.
+ */
+int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us);
+
+/**
  * mhi_device_put - re-enable low power modes
  * @mhi_dev: Device associated with the channels
  * @vote: vote to remove
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 83b667f..58952fa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1225,6 +1225,11 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
 #define PERF_SECURITY_KERNEL		2
 #define PERF_SECURITY_TRACEPOINT	3
 
+static inline bool perf_paranoid_any(void)
+{
+	return sysctl_perf_event_paranoid > 2;
+}
+
 static inline int perf_is_paranoid(void)
 {
 	return sysctl_perf_event_paranoid > -1;
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index eb37edc..80863ba 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -21,7 +21,8 @@ enum cnss_bus_width_type {
 	CNSS_BUS_WIDTH_LOW,
 	CNSS_BUS_WIDTH_MEDIUM,
 	CNSS_BUS_WIDTH_HIGH,
-	CNSS_BUS_WIDTH_VERY_HIGH
+	CNSS_BUS_WIDTH_VERY_HIGH,
+	CNSS_BUS_WIDTH_LOW_LATENCY
 };
 
 enum cnss_platform_cap_flag {
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index d2a016e..35d7c88 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -39,6 +39,7 @@ enum {
 	ND_OPT_DNSSL = 31,		/* RFC6106 */
 	ND_OPT_6CO = 34,		/* RFC6775 */
 	ND_OPT_CAPTIVE_PORTAL = 37,	/* RFC7710 */
+	ND_OPT_PREF64 = 38,		/* RFC-ietf-6man-ra-pref64-09 */
 	__ND_OPT_MAX
 };
 
diff --git a/include/soc/qcom/cx_ipeak.h b/include/soc/qcom/cx_ipeak.h
index eed850a..f52d953 100644
--- a/include/soc/qcom/cx_ipeak.h
+++ b/include/soc/qcom/cx_ipeak.h
@@ -6,7 +6,7 @@
 #ifndef __SOC_COM_CX_IPEAK_H
 #define __SOC_COM_CX_IPEAK_H
 
-typedef void (*cx_ipeak_victim_fn)(void *data, u32 freq_limit);
+typedef int (*cx_ipeak_victim_fn)(void *data, u32 freq_limit);
 
 struct device_node;
 struct cx_ipeak_client;
diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h
index 336fe27..f20cb14 100644
--- a/include/trace/events/udp.h
+++ b/include/trace/events/udp.h
@@ -27,6 +27,54 @@ TRACE_EVENT(udp_fail_queue_rcv_skb,
 	TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
 );
 
+TRACE_EVENT(udpv4_fail_rcv_buf_errors,
+
+	TP_PROTO(struct sk_buff *skb),
+
+	TP_ARGS(skb),
+
+	TP_STRUCT__entry(
+		__field(void *, saddr)
+		__field(void *, daddr)
+		__field(__be16, sport)
+		__field(__be16, dport)
+	),
+
+	TP_fast_assign(
+		__entry->saddr = &ip_hdr(skb)->saddr;
+		__entry->daddr = &ip_hdr(skb)->daddr;
+		__entry->sport = ntohs(udp_hdr(skb)->source);
+		__entry->dport = ntohs(udp_hdr(skb)->dest);
+	),
+
+	TP_printk("src %pI4:%u dst %pI4:%u", __entry->saddr,
+		  __entry->sport, __entry->daddr, __entry->dport)
+);
+
+TRACE_EVENT(udpv6_fail_rcv_buf_errors,
+
+	TP_PROTO(struct sk_buff *skb),
+
+	TP_ARGS(skb),
+
+	TP_STRUCT__entry(
+		__field(void *, saddr)
+		__field(void *, daddr)
+		__field(__be16, sport)
+		__field(__be16, dport)
+	),
+
+	TP_fast_assign(
+		__entry->saddr = &ipv6_hdr(skb)->saddr;
+		__entry->daddr = &ipv6_hdr(skb)->daddr;
+		__entry->sport = ntohs(udp_hdr(skb)->source);
+		__entry->dport = ntohs(udp_hdr(skb)->dest);
+	),
+
+	TP_printk("src %pI6:%u dst %pI6:%u", __entry->saddr,
+		  __entry->sport, __entry->daddr, __entry->dport)
+);
+
 #endif /* _TRACE_UDP_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 8a9adc6..214bc1b 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -215,7 +215,11 @@ struct drm_msm_gem_cpu_fini {
  */
 struct drm_msm_gem_submit_reloc {
 	__u32 submit_offset;  /* in, offset from submit_bo */
+#ifdef __cplusplus
+	__u32 or_val;
+#else
 	__u32 or;             /* in, value OR'd with result */
+#endif
 	__s32 shift;          /* in, amount of left shift (can be negative) */
 	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
 	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 238ad0f..fb93388 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -425,8 +425,13 @@ static cpumask_var_t perf_online_mask;
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
+#else
 int sysctl_perf_event_paranoid __read_mostly = 2;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -10870,6 +10875,9 @@ SYSCALL_DEFINE5(perf_event_open,
 	if (flags & ~PERF_FLAG_ALL)
 		return -EINVAL;
 
+	if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	/* Do we allow access to perf_event_open(2) ? */
 	err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
 	if (err)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e688b32..0044877 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2486,7 +2486,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 	struct rq_flags rf;
 
 #if defined(CONFIG_SMP)
-	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
+	if ((sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) ||
+			walt_want_remote_wakeup()) {
 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
 		ttwu_queue_remote(p, cpu, wake_flags);
 		return;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fe4c420..40f2d69 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6890,7 +6890,6 @@ enum fastpaths {
 	NONE = 0,
 	SYNC_WAKEUP,
 	PREV_CPU_FASTPATH,
-	MANY_WAKEUP,
 };
 
 static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
@@ -7686,8 +7685,13 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
 	int delta = 0;
 	int task_boost = per_task_boost(p);
 	int boosted = (schedtune_task_boost(p) > 0) || (task_boost > 0);
-	int start_cpu = get_start_cpu(p);
+	int start_cpu;
 
+	if (is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
+			cpumask_test_cpu(prev_cpu, &p->cpus_allowed))
+		return prev_cpu;
+
+	start_cpu = get_start_cpu(p);
 	if (start_cpu < 0)
 		goto eas_not_ready;
 
@@ -7714,13 +7718,6 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
 		goto done;
 	}
 
-	if (is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
-				bias_to_this_cpu(p, prev_cpu, start_cpu)) {
-		best_energy_cpu = prev_cpu;
-		fbt_env.fastpath = MANY_WAKEUP;
-		goto done;
-	}
-
 	rcu_read_lock();
 	pd = rcu_dereference(rd->pd);
 	if (!pd)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b156320..701b7b7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2740,6 +2740,12 @@ enum sched_boost_policy {
 
 #ifdef CONFIG_SCHED_WALT
 
+#define WALT_MANY_WAKEUP_DEFAULT 1000
+static inline bool walt_want_remote_wakeup(void)
+{
+	return sysctl_sched_many_wakeup_threshold < WALT_MANY_WAKEUP_DEFAULT;
+}
+
 static inline int cluster_first_cpu(struct sched_cluster *cluster)
 {
 	return cpumask_first(&cluster->cpus);
@@ -3188,6 +3194,10 @@ static inline unsigned int power_cost(int cpu, u64 demand)
 #endif
 
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+static inline bool walt_want_remote_wakeup(void)
+{
+	return false;
+}
 #endif	/* CONFIG_SCHED_WALT */
 
 struct sched_avg_stats {
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 3ef27fc..11f0cfb 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1026,7 +1026,7 @@ unsigned int max_possible_efficiency = 1;
 unsigned int min_possible_efficiency = UINT_MAX;
 
 unsigned int sysctl_sched_conservative_pl;
-unsigned int sysctl_sched_many_wakeup_threshold = 1000;
+unsigned int sysctl_sched_many_wakeup_threshold = WALT_MANY_WAKEUP_DEFAULT;
 
 #define INC_STEP 8
 #define DEC_STEP 2
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d60ffab1..d3b8b3c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1919,9 +1919,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 		int is_udplite = IS_UDPLITE(sk);
 
 		/* Note that an ENOMEM error is charged twice */
-		if (rc == -ENOMEM)
+		if (rc == -ENOMEM) {
 			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 					is_udplite);
+			trace_udpv4_fail_rcv_buf_errors(skb);
+		}
 		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 		kfree_skb(skb);
 		trace_udp_fail_queue_rcv_skb(rc, sk);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 45d8d08..171c527 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -198,6 +198,7 @@ static inline int ndisc_is_useropt(const struct net_device *dev,
 	return opt->nd_opt_type == ND_OPT_RDNSS ||
 		opt->nd_opt_type == ND_OPT_DNSSL ||
 		opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
+		opt->nd_opt_type == ND_OPT_PREF64 ||
 		ndisc_ops_is_useropt(dev, opt->nd_opt_type);
 }
 
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 5c5e752..c3060a3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -54,6 +54,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <trace/events/skb.h>
+#include <trace/events/udp.h>
 #include "udp_impl.h"
 
 static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
@@ -538,9 +539,11 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 		int is_udplite = IS_UDPLITE(sk);
 
 		/* Note that an ENOMEM error is charged twice */
-		if (rc == -ENOMEM)
+		if (rc == -ENOMEM) {
 			UDP6_INC_STATS(sock_net(sk),
 					 UDP_MIB_RCVBUFERRORS, is_udplite);
+			trace_udpv6_fail_rcv_buf_errors(skb);
+		}
 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 		kfree_skb(skb);
 		return -1;
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index ab21357f..e532b7a 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -171,12 +171,11 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
 	INIT_LIST_HEAD(&qdev->ul_pkts);
 	spin_lock_init(&qdev->ul_lock);
 
+	dev_set_drvdata(&mhi_dev->dev, qdev);
 	rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
 	if (rc)
 		return rc;
 
-	dev_set_drvdata(&mhi_dev->dev, qdev);
-
 	dev_dbg(qdev->dev, "QTI MHI QRTR driver probed\n");
 
 	return 0;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5871058..59dd4ae 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -160,6 +160,7 @@ static struct work_struct qrtr_backup_work;
  * @kworker: worker thread for recv work
  * @task: task to run the worker thread
  * @read_data: scheduled work for recv work
+ * @say_hello: scheduled work for initiating hello
  * @ws: wakeupsource avoid system suspend
  * @ilc: ipc logging context reference
  */
@@ -170,6 +171,7 @@ struct qrtr_node {
 	unsigned int nid;
 	unsigned int net_id;
 	atomic_t hello_sent;
+	atomic_t hello_rcvd;
 
 	struct radix_tree_root qrtr_tx_flow;
 	struct wait_queue_head resume_tx;
@@ -181,6 +183,7 @@ struct qrtr_node {
 	struct kthread_worker kworker;
 	struct task_struct *task;
 	struct kthread_work read_data;
+	struct kthread_work say_hello;
 
 	struct wakeup_source *ws;
 
@@ -529,6 +532,10 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 		kfree_skb(skb);
 		return rc;
 	}
+	if (atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO) {
+		kfree_skb(skb);
+		return 0;
+	}
 
 	/* If sk is null, this is a forwarded packet and should not wait */
 	if (!skb->sk) {
@@ -966,6 +973,30 @@ static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
 	qrtr_node_enqueue(node, skb, cb->type, &from, &to, 0);
 	qrtr_node_release(node);
 }
+
+static void qrtr_sock_queue_skb(struct qrtr_node *node, struct sk_buff *skb,
+				struct qrtr_sock *ipc)
+{
+	struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+	int rc;
+
+	/* Don't queue HELLO if control port already received */
+	if (cb->type == QRTR_TYPE_HELLO) {
+		if (atomic_read(&node->hello_rcvd)) {
+			kfree_skb(skb);
+			return;
+		}
+		atomic_inc(&node->hello_rcvd);
+	}
+
+	rc = sock_queue_rcv_skb(&ipc->sk, skb);
+	if (rc) {
+		pr_err("%s: qrtr pkt dropped flow[%d] rc[%d]\n",
+		       __func__, cb->confirm_rx, rc);
+		kfree_skb(skb);
+	}
+}
+
 /* Handle and route a received packet.
  *
  * This will auto-reply with resume-tx packet as necessary.
@@ -1008,18 +1039,40 @@ static void qrtr_node_rx_work(struct kthread_work *work)
 			if (!ipc) {
 				kfree_skb(skb);
 			} else {
-				if (sock_queue_rcv_skb(&ipc->sk, skb)) {
-					pr_err("%s qrtr pkt dropped flow[%d]\n",
-					       __func__, cb->confirm_rx);
-					kfree_skb(skb);
-				}
-
+				qrtr_sock_queue_skb(node, skb, ipc);
 				qrtr_port_put(ipc);
 			}
 		}
 	}
 }
 
+static void qrtr_hello_work(struct kthread_work *work)
+{
+	struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+	struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_node *node;
+	struct qrtr_sock *ctrl;
+	struct sk_buff *skb;
+
+	ctrl = qrtr_port_lookup(QRTR_PORT_CTRL);
+	if (!ctrl)
+		return;
+
+	skb = qrtr_alloc_ctrl_packet(&pkt);
+	if (!skb) {
+		qrtr_port_put(ctrl);
+		return;
+	}
+
+	node = container_of(work, struct qrtr_node, say_hello);
+	pkt->cmd = cpu_to_le32(QRTR_TYPE_HELLO);
+	from.sq_node = qrtr_local_nid;
+	to.sq_node = node->nid;
+	qrtr_node_enqueue(node, skb, QRTR_TYPE_HELLO, &from, &to, 0);
+	qrtr_port_put(ctrl);
+}
+
 /**
  * qrtr_endpoint_register() - register a new endpoint
  * @ep: endpoint to register
@@ -1048,8 +1101,10 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
 	node->nid = QRTR_EP_NID_AUTO;
 	node->ep = ep;
 	atomic_set(&node->hello_sent, 0);
+	atomic_set(&node->hello_rcvd, 0);
 
 	kthread_init_work(&node->read_data, qrtr_node_rx_work);
+	kthread_init_work(&node->say_hello, qrtr_hello_work);
 	kthread_init_worker(&node->kworker);
 	node->task = kthread_run(kthread_worker_fn, &node->kworker, "qrtr_rx");
 	if (IS_ERR(node->task)) {
@@ -1071,6 +1126,7 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
 	up_write(&qrtr_node_lock);
 	ep->node = node;
 
+	kthread_queue_work(&node->kworker, &node->say_hello);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
@@ -1352,6 +1408,17 @@ static int __qrtr_bind(struct socket *sock,
 		qrtr_reset_ports();
 	mutex_unlock(&qrtr_port_lock);
 
+	if (port == QRTR_PORT_CTRL) {
+		struct qrtr_node *node;
+
+		down_write(&qrtr_node_lock);
+		list_for_each_entry(node, &qrtr_all_epts, item) {
+			atomic_set(&node->hello_sent, 0);
+			atomic_set(&node->hello_rcvd, 0);
+		}
+		up_write(&qrtr_node_lock);
+	}
+
 	/* unbind previous, if any */
 	if (!zapped)
 		qrtr_port_remove(ipc);
@@ -1451,7 +1518,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 
 	down_read(&qrtr_node_lock);
 	list_for_each_entry(node, &qrtr_all_epts, item) {
-		if (node->nid == QRTR_EP_NID_AUTO)
+		if (node->nid == QRTR_EP_NID_AUTO && type != QRTR_TYPE_HELLO)
 			continue;
 		skbn = skb_clone(skb, GFP_KERNEL);
 		if (!skbn)
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 67d1314..b5115c6 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -151,7 +151,7 @@
 # Use the merged file as the starting point for:
 # alldefconfig: Fills in any missing symbols with Kconfig default
 # allnoconfig: Fills in any missing symbols with # CONFIG_* is not set
-make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
+make $MAKE_ARGS KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
 
 
 # Check all specified config values took (might have missed-dependency issues)
diff --git a/security/Kconfig b/security/Kconfig
index bfb5302..e483bbc 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -18,6 +18,15 @@
 
 	  If you are unsure how to answer this question, answer N.
 
+config SECURITY_PERF_EVENTS_RESTRICT
+	bool "Restrict unprivileged use of performance events"
+	depends on PERF_EVENTS
+	help
+	  If you say Y here, the kernel.perf_event_paranoid sysctl
+	  will be set to 3 by default, and no unprivileged use of the
+	  perf_event_open syscall will be permitted unless it is
+	  changed.
+
 config SECURITY
 	bool "Enable different security models"
 	depends on SYSFS
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b8d1a90..3679936 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -755,6 +755,28 @@ struct snd_soc_component *soc_find_component(
 EXPORT_SYMBOL(soc_find_component);
 
 /**
+ * soc_find_component_locked: soc_find_component with client lock acquired
+ *
+ * @of_node: of_node of the component to query.
+ * @name: name of the component to query.
+ *
+ * function to find out if a component is already registered with ASoC core.
+ *
+ * Returns component handle for success, else NULL error.
+ */
+struct snd_soc_component *soc_find_component_locked(
+	const struct device_node *of_node, const char *name)
+{
+	struct snd_soc_component *component = NULL;
+
+	mutex_lock(&client_mutex);
+	component = soc_find_component(of_node, name);
+	mutex_unlock(&client_mutex);
+	return component;
+}
+EXPORT_SYMBOL(soc_find_component_locked);
+
+/**
  * snd_soc_find_dai - Find a registered DAI
  *
  * @dlc: name of the DAI or the DAI driver and optional component info to match